Python 如何提高aiohttp爬虫程序的速度?

Python 如何提高aiohttp爬虫程序的速度?,python,web-crawler,aiohttp,Python,Web Crawler,Aiohttp,我有20000个URL需要请求。但这需要很长的时间(超过4或5个小时),如果使用请求+多处理(池4),只需要3个小时 我尝试使用aiohttp+多处理,但似乎不起作用。通过优化此代码或使用任何可用的技术,代码能否尽可能快?谢谢我不知道下面的方法是否快速 import aiohttp from bs4 import BeautifulSoup from xlrd import open_workbook from xlwt import Workbook url_list = [https://

我有20000个URL需要请求。但这需要很长的时间(超过4或5个小时),如果使用请求+多处理(池4),只需要3个小时


我尝试使用aiohttp+多处理,但似乎不起作用。通过优化此代码或使用任何可用的技术,代码能否尽可能快?谢谢

我不知道下面的方法是否快速

import aiohttp
from bs4 import BeautifulSoup
from xlrd import open_workbook
from xlwt import Workbook

url_list = [https://www.facebook.com,https://www.baidu.com,https://www.yahoo.com,...]
#There are more than 20000 different websites in the list
#Some websites may not be accessible
keywords=['xxx','xxx'....]
start = time.time()
localtime = time.asctime(time.localtime(time.time()))
print("start time :", localtime)
choose_url=[]
url_title=[]
async def get(url, session):
    try:
        async with session.get(url=url,timeout=0) as response:
            resp = await response.text()
            soup = BeautifulSoup(resp, "lxml")
            title = soup.find("title").text.strip()
            for keyword in keywords:
                if keyword in title:
                    choose_url.append(url)
                    url_title.append(title)
                    print("Successfully got url {} with resp's name {}.".format(url, title))
                    break
    except Exception as e:
        pass

async def main(urls):
    connector = aiohttp.TCPConnector(ssl=False,limit=0,limit_per_host =0)
    session = aiohttp.ClientSession(connector=connector)
    ret = await asyncio.gather(*[get(url, session) for url in urls])
    print("Finalized all. Return is a list of outputs.")
    await session.close()
def write_exccel(choose_url,url_title):
    #write choose_url,url_title to excel 
    pass

asyncio.run(main(url_list))
write_exccel(choose_url,url_title)
localtime = time.asctime(time.localtime(time.time()))
print("now time is  :", localtime)
end = time.time()
print('time used:', end - start)
import time
from simplified_scrapy import Spider, SimplifiedDoc, SimplifiedMain, utils

class MySpider(Spider):
    name = 'demo_spider'
    start_urls = ["https://www.facebook.com","https://www.baidu.com","https://www.yahoo.com"]  # Entry page
    keywords = ['xxx','xxx']
    choose_url=[]
    url_title=[]
    concurrencyPer1s = 10
    def extract(self, url, html, models, modelNames):
        doc = SimplifiedDoc(html)
        title = doc.title
        if title.containsOr(self.keywords):
            self.choose_url.append(url.url)
            self.url_title.append(title.text)
            print("Successfully got url {} with resp's name {}.".format(url, title.text))
    def urlCount(self):
        count = Spider.urlCount(self)
        if count==0:
            SimplifiedMain.setRunFlag(False)
        return count

start = time.time()
localtime = time.asctime(time.localtime(time.time()))
print("start time :", localtime)
SimplifiedMain.startThread(MySpider(),{"concurrency":600, "concurrencyPer1S":100, "intervalTime":0.001, "max_workers":10})  # Start download
localtime = time.asctime(time.localtime(time.time()))
print("now time is  :", localtime)
end = time.time()
print('time used:', end - start)