Python 使用QTimer在infinte循环中运行函数

Python 使用QTimer在infinte循环中运行函数,python,loops,pyqt5,qtimer,Python,Loops,Pyqt5,Qtimer,我是Python新手,仍在努力学习,但我无法处理这个问题。我想在无限循环中运行一些函数(在类中)。因为这是一个QApplication,所以我知道我应该用QTimer来完成它。然而,在探索如何做到这一点时,我找不到一个可行的替代方案。一个常见的解决方案是: timer = QTimer() timer.timeout.connect(function) timer.start(60000) 但是,当我将这些插入到代码中时,没有什么区别。我试图在函数、类等下插入它,但没有得到结果。我要循环的函数

我是Python新手,仍在努力学习,但我无法处理这个问题。我想在无限循环中运行一些函数(在类中)。因为这是一个QApplication,所以我知道我应该用QTimer来完成它。然而,在探索如何做到这一点时,我找不到一个可行的替代方案。一个常见的解决方案是:

timer = QTimer()
timer.timeout.connect(function)
timer.start(60000)
但是,当我将这些插入到代码中时,没有什么区别。我试图在函数、类等下插入它,但没有得到结果。我要循环的函数如下:

__author__ = 'pc'
import requests
from bs4 import BeautifulSoup
from PyQt5 import QtCore, QtWidgets, QtWebEngineWidgets
import sqlite3
import sys, getopt, time
from PyQt5.QtCore import QTimer

records = []
def scrape_page(url, html):
    soup = BeautifulSoup(html, 'html.parser')
    data = soup.find('div', class_='tablo_dual_board')
    try:
        datas = data.text.splitlines()
        datas1 = list(filter(None, datas))
        records.append(datas1)
    except:
        pass

def process_records():
    # add record to database ...
    print('process records:', len(records))

def generate_urls():
    onexurl = "https://1xbahis19.com/en/live/Football/"
    reply = requests.get(onexurl)
    soup = BeautifulSoup(reply.content, "html.parser")
    income = soup.find_all("ul", {"id":"games_content"})
    links = soup.find_all("a", {"class": "c-events__name"})
    urls = []
    for matchlink in links:
        urls.append("https://1xbahis19.com/en/"+(matchlink.get("href")))
    return urls

class WebPage(QtWebEngineWidgets.QWebEnginePage):
    def __init__(self):
        super(WebPage, self).__init__()
        self.loadFinished.connect(self.handleLoadFinished)

    def start(self, urls):
        self._urls = iter(urls)
        self.fetchNext()

    def fetchNext(self):
        try:
            url = next(self._urls)
        except StopIteration:
            return False
        else:
            self.load(QtCore.QUrl(url))
        return True

    def processCurrentPage(self, html):
        scrape_page(self.url().toString(), html)
        if not self.fetchNext():
            process_records()
            print(records)
            QtWidgets.qApp.quit()

    def handleLoadFinished(self):
        self.toHtml(self.processCurrentPage)

app = QtWidgets.QApplication(sys.argv)
webpage = WebPage()
webpage.start(generate_urls())
timer = QTimer()
timer.timeout.connect(WebPage)
timer.start(60000)
app.exec_()

有人能帮忙吗?

我想你应该定期运行刮板。下面的脚本将每60秒刮取一次所有URL。
信号
部分提供了一种终止无限循环的方法-只需按Ctrl+C键(即
键盘中断
),它就会立即停止

import requests
from bs4 import BeautifulSoup
from PyQt5 import QtCore, QtWidgets, QtWebEngineWidgets
import sqlite3
import sys, getopt, time
from PyQt5.QtCore import QTimer

import signal
# press Ctrl+C to stop the script
signal.signal(signal.SIGINT, signal.SIG_DFL)

records = []
def scrape_page(url, html):
    print('scraping page:', url)
    soup = BeautifulSoup(html, 'html.parser')
    data = soup.find('div', class_='tablo_dual_board')
    try:
        datas = data.text.splitlines()
        datas1 = list(filter(None, datas))
        records.append(datas1)
    except:
        pass

def process_records():
    # add record to database ...
    print('processed records:', len(records))
    # clear the current records
    del records[:]
    # re-run after a timeout
    QTimer.singleShot(60000, run)

def run():
    print('running scraper...')
    webpage.start(generate_urls())

def generate_urls():
    print('generating urls...')
    onexurl = "https://1xbahis19.com/en/live/Football/"
    reply = requests.get(onexurl)
    soup = BeautifulSoup(reply.content, "html.parser")
    income = soup.find_all("ul", {"id":"games_content"})
    links = soup.find_all("a", {"class": "c-events__name"})
    urls = []
    for matchlink in links:
        urls.append("https://1xbahis19.com/en/"+(matchlink.get("href")))
    return urls

class WebPage(QtWebEngineWidgets.QWebEnginePage):
    def __init__(self):
        super(WebPage, self).__init__()
        self.loadFinished.connect(self.handleLoadFinished)

    def start(self, urls):
        self._urls = iter(urls)
        self.fetchNext()

    def fetchNext(self):
        try:
            url = next(self._urls)
        except StopIteration:
            return False
        else:
            self.load(QtCore.QUrl(url))
        return True

    def processCurrentPage(self, html):
        scrape_page(self.url().toString(), html)
        if not self.fetchNext():
            process_records()

    def handleLoadFinished(self):
        self.toHtml(self.processCurrentPage)

app = QtWidgets.QApplication(sys.argv)
webpage = WebPage()
run()
app.exec_()

你从哪里来的,谁叫你用定时器的?@eyllanesc我从其他人那里学到了,Stacst再次感谢你,Ekhumaro,但我恐怕不能让它工作。它可以完美地工作一次,但不会循环。顺便说一下,我知道计时器。开始(60000)是指60秒而不是60分钟,对吗?另外,我已经直接复制并添加了您的代码,所以我认为它正在单独调用
run()
。@AhmetUluer。请参阅我的最新答案。我认为您的脚本的主要问题是您需要删除
processCurrentPage
中的行,该行在处理当前一批URL后退出应用程序。非常感谢您,您为我节省了很多时间。它工作得很好。