Python selenium.common.exceptions.InvalidSessionIdeException:消息:会话id无效

Python selenium.common.exceptions.InvalidSessionIdeException:消息:会话id无效,python,selenium,selenium-webdriver,Python,Selenium,Selenium Webdriver,从selenium.webdriver导入Chrome 导入csv 进口刮刀 导入链接刮板 def main(): 如果name==“main”: main()您好,请修改您的格式,以便代码更易于阅读 header = ["ID", "Name", "Rating", "Reviews", "Desc", "Spec", "Overview",

从selenium.webdriver导入Chrome 导入csv 进口刮刀 导入链接刮板

def main():

如果name==“main”:
main()

您好,请修改您的格式,以便代码更易于阅读
header = ["ID", "Name", "Rating", "Reviews", "Desc", "Spec", "Overview",
          "Price", "Brand", "Breadcrumb", "URL", "Images"]

moviedata_csv_file = open('productData.csv', 'a+',
                          newline='', encoding="utf-8")
writer = csv.writer(moviedata_csv_file)
writer.writerow(header)
print("To scrape the entire website, type \'all\' below.")
url = input("Enter a valid url: ")

if url != 'all':
    start = int(input("Start index: "))
    end = int(input("Ending index: "))
    urls = linkScraper.GetProductLinks(url, start, end)
    driver = Chrome("chromedriver")
    for x in urls:
        print("Processing:",x)
        writer.writerow(prodscraper.Scrap(x, driver))
else:
    parentUrl = linkScraper.GetAllValidProductUrl()
    for u in parentUrl:
        urls = linkScraper.GetProductLinks(u, 1, 5)
        driver = Chrome("chromedriver")
        for x in urls:
            print("Processing:", x)
            writer.writerow(prodscraper.Scrape(x, driver))

#driver.close()
driver.quit()



from selenium.webdriver import Chrome
import csvimport prodscraper
import linkScraper

def main():

    header = ["ID", "Name", "Rating", "Reviews", "Desc", "Spec", "Overview",
              "Price", "Brand", "Breadcrumb", "URL", "Images"]

    moviedata_csv_file = open('productData.csv', 'a+',
                              newline='', encoding="utf-8")
    writer = csv.writer(moviedata_csv_file)
    writer.writerow(header)
    print("To scrape the entire website, type \'all\' below.")
    url = input("Enter a valid url: ")

    if url != 'all':
        start = int(input("Start index: "))
        end = int(input("Ending index: "))
        urls = linkScraper.GetProductLinks(url, start, end)
        driver = Chrome("chromedriver")
        for x in urls:
            print("Processing:",x)
            writer.writerow(prodscraper.Scrap(x, driver))
    else:
        parentUrl = linkScraper.GetAllValidProductUrl()
        for u in parentUrl:
            urls = linkScraper.GetProductLinks(u, 1, 5)
            driver = Chrome("chromedriver")
            for x in urls:
                print("Processing:", x)
                writer.writerow(prodscraper.Scrape(x, driver))

    #driver.close()
    driver.quit()


if __name__ == "__main__":
    main()