如何使用webdriver并行化Python循环?

如何使用webdriver并行化Python循环?,python,loops,selenium-webdriver,parallel-processing,Python,Loops,Selenium Webdriver,Parallel Processing,在我下面的代码中,我尝试从不同的URL页面总共导入25页。我目前使用的是一个简单的循环,但它花费了太多的时间。如何并行化此代码以减少执行时间 代码如下: #!/usr/bin/python3 # -*- coding: utf­-8 ­-*- from selenium import webdriver import statistics as stat import numpy as np driver = webdriver.Firefox() url = 'https://www.co

在我下面的代码中,我尝试从不同的URL页面总共导入25页。我目前使用的是一个简单的循环,但它花费了太多的时间。如何并行化此代码以减少执行时间

代码如下:

#!/usr/bin/python3
# -*- coding: utf­-8 ­-*-

from selenium import webdriver
import statistics as stat
import numpy as np

driver = webdriver.Firefox()
url = 'https://www.coteur.com/cotes-foot.php'
driver.get(url)

#Store url associated with the soccer games
url_links = []
for i in driver.find_elements_by_xpath('//a[contains(@href, "match/cotes-")]'):
    url_links.append(i.get_attribute('href'))

driver.close()
print(len(url_links), '\n')

for l in range(len(url_links)):
    driver = webdriver.Firefox()
    driver.get(url_links[l])

    #Store odds into table
    odds = []
    header = []
    for i in driver.find_elements_by_xpath('//button[contains(@class, "btn btn-default btn-xs btncote")]'):
        odds.append(i.text)

    for i in driver.find_elements_by_xpath('//th[contains(@width, "20%")]'):
        header.append(i.text)

    rows = int(len(odds)/3) 
    columns = 3
    odds = [float(i) for i in odds]
    odds = np.array(odds)
    header = np.array(header)
    odds = odds.reshape(rows, columns)

    print(odds, '\n')
    driver;close()

如果您使用python>3.5,ThreadPoolExecutor将完成完美的工作

from selenium import webdriver
import statistics as stat
import numpy as np
from concurrent.futures import ThreadPoolExecutor

driver = webdriver.Firefox()
url = 'https://www.coteur.com/cotes-foot.php'
driver.get(url)

#Store url associated with the soccer games
url_links = []
for i in driver.find_elements_by_xpath('//a[contains(@href, "match/cotes-")]'):
    url_links.append(i.get_attribute('href'))

driver.close()
print(len(url_links), '\n')

def sraper(url_link):
    driver = webdriver.Firefox()
    driver.get(url_link)

    #Store odds into table
    odds = []
    header = []
    for i in driver.find_elements_by_xpath('//button[contains(@class, "btn btn-default btn-xs btncote")]'):
        odds.append(i.text)

    for i in driver.find_elements_by_xpath('//th[contains(@width, "20%")]'):
        header.append(i.text)

    rows = int(len(odds)/3) 
    columns = 3
    odds = [float(i) for i in odds]
    odds = np.array(odds)
    header = np.array(header)
    odds = odds.reshape(rows, columns)

    print(odds, '\n')
    driver;close()
    
with ThreadPoolExecutor(max_workers=8) as executor:
    executor.map(sraper, url_links)