Scrape multiple urls using QWebPage

Posted on

Question :

Scrape multiple urls using QWebPage

I’m using Qt’s QWebPage to render a page that uses javascript to update its content dynamically – so a library that just downloads a static version of the page (such as urllib2) won’t work.

My problem is, when I render a second page, about 99% of the time the program just crashes. At other times, it will work three times before crashing. I’ve also gotten a few segfaults, but it is all very random.

My guess is the object I’m using to render isn’t getting deleted properly, so trying to reuse it is possibly causing some problems for myself. I’ve looked all over and no one really seems to be having this same issue.

Here’s the code I’m using. The program downloads web pages from steam’s community market so I can create a database of all the items. I need to call the getItemsFromPage function multiple times to get all of the items, as they are broken up into pages (showing results 1-10 out of X amount).

import csv
import re
import sys
from string import replace
from bs4 import BeautifulSoup
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.QtWebKit import *

class Item:
    __slots__ = ("name", "count", "price", "game")

    def __repr__(self):
        return + "(" + str(self.count) + ")"

    def __str__(self):
        return + ", " + str(self.count) + ", $" + str(self.price)

class Render(QWebPage):  
    def __init__(self, url): = QApplication(sys.argv)

    def _loadFinished(self, result):
        self.frame = self.mainFrame()

def getItemsFromPage(appid, page=1):

    r = Render("" + str(appid) + "#p" + str(page))

    soup = BeautifulSoup(str(r.frame.toHtml().toUtf8()))

    itemLst = soup.find_all("div", "market_listing_row market_recent_listing_row")

    items = []

    for k in itemLst:
        i = Item() = k.find("span", "market_listing_item_name").string
        i.count = int(replace(k.find("span", "market_listing_num_listings_qty").string, ",", ""))
        i.price = float('$([0-9]+.[0-9]+)', str(k)).group(1)) = appid


    return items

if __name__ == "__main__":

    print "Updating market items to dota2.csv ..."

    i = 1

    with open("dota2.csv", "w") as f:
        writer = csv.writer(f)

        r = None

        while True:
            print "Page " + str(i)

            items = getItemsFromPage(570)

            if len(items) == 0:
                print "No items found, stopping..."

            for k in items:
                writer.writerow((, k.count, k.price,

            i += 1

    print "Done."

Calling getItemsFromPage once works fine. Subsequent calls give me my problem. The output of the program is typically

Updating market items to dota2.csv ...
Page 1
Page 2

and then it crashes. It should go on for over 700 pages.

Asked By: Jesse


Answer #1:

The problem with your program is that you are attempting to create a new QApplication with every url you fetch.

Instead, only one QApplication and one WebPage should be created. The WebPage can use its loadFinished signal to create an internal loop by fetching a new url after each one has been processed. Custom html processing can be added by connecting a user-defined slot to a signal which emits the html text and the url when they become available. The scripts below (for PyQt5 and PyQt4) show how to implement this.

Here are some examples which show how to use the WebPage class:


def my_html_processor(html, url):
    print('loaded: [%d chars] %s' % (len(html), url))

import sys
app = QApplication(sys.argv)
webpage = WebPage(verbose=False)

# example 1: process list of urls

urls = [''] * 3
print('Processing list of urls...')

# example 2: process one url continuously
# import signal, itertools
# signal.signal(signal.SIGINT, signal.SIG_DFL)
# print('Processing url continuously...')
# print('Press Ctrl+C to quit')
# url = ''
# webpage.process(itertools.repeat(url))


PyQt5 WebPage:

from PyQt5.QtCore import pyqtSignal, QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage

class WebPage(QWebEnginePage):
    htmlReady = pyqtSignal(str, str)

    def __init__(self, verbose=False):
        self._verbose = verbose

    def process(self, urls):
        self._urls = iter(urls)

    def fetchNext(self):
            url = next(self._urls)
        except StopIteration:
            return False
        return True

    def processCurrentPage(self, html):
        self.htmlReady.emit(html, self.url().toString())
        if not self.fetchNext():

    def handleLoadFinished(self):

    def javaScriptConsoleMessage(self, *args, **kwargs):
        if self._verbose:
            super().javaScriptConsoleMessage(*args, **kwargs)

PyQt4 WebPage:

from PyQt4.QtCore import pyqtSignal, QUrl
from PyQt4.QtGui import QApplication
from PyQt4.QtWebKit import QWebPage

class WebPage(QWebPage):
    htmlReady = pyqtSignal(str, str)

    def __init__(self, verbose=False):
        super(WebPage, self).__init__()
        self._verbose = verbose

    def start(self, urls):
        self._urls = iter(urls)

    def fetchNext(self):
            url = next(self._urls)
        except StopIteration:
            return False
        return True

    def processCurrentPage(self):
            self.mainFrame().toHtml(), self.mainFrame().url().toString())
        print('loaded: [%d bytes] %s' % (self.bytesReceived(), url))

    def handleLoadFinished(self):
        if not self.fetchNext():

    def javaScriptConsoleMessage(self, *args, **kwargs):
        if self._verbose:
            super(WebPage, self).javaScriptConsoleMessage(*args, **kwargs)
Answered By: ekhumoro

Leave a Reply

Your email address will not be published. Required fields are marked *