Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/python-3.x/18.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python ,并且最终会破裂。可能不会。打开多个标签也无济于事。是否可能每隔x次搜索重新启动驱动程序?绕过CAPTCHA似乎对我有点敌意。。。做一个好的互联网公民™...请记住,SO的目标是成为一个巨型/长尾FAQ。本月有效,下个月失效的东西并不是一个真正合适的FAQ_Python_Python 3.x_Selenium_Selenium Chromedriver - Fatal编程技术网

Python ,并且最终会破裂。可能不会。打开多个标签也无济于事。是否可能每隔x次搜索重新启动驱动程序?绕过CAPTCHA似乎对我有点敌意。。。做一个好的互联网公民™...请记住,SO的目标是成为一个巨型/长尾FAQ。本月有效,下个月失效的东西并不是一个真正合适的FAQ

Python ,并且最终会破裂。可能不会。打开多个标签也无济于事。是否可能每隔x次搜索重新启动驱动程序?绕过CAPTCHA似乎对我有点敌意。。。做一个好的互联网公民™...请记住,SO的目标是成为一个巨型/长尾FAQ。本月有效,下个月失效的东西并不是一个真正合适的FAQ,python,python-3.x,selenium,selenium-chromedriver,Python,Python 3.x,Selenium,Selenium Chromedriver,,并且最终会破裂。可能不会。打开多个标签也无济于事。是否可能每隔x次搜索重新启动驱动程序?绕过CAPTCHA似乎对我有点敌意。。。做一个好的互联网公民™...请记住,SO的目标是成为一个巨型/长尾FAQ。本月有效,下个月失效的东西并不是一个真正合适的FAQ条目。事实上,有人可能会说,由于范围将不断扩大,以涵盖新的反措施和反措施,它天生就“太宽”而无法回答。 from selenium import webdriver from selenium.webdriver.common.by impor


,并且最终会破裂。可能不会。打开多个标签也无济于事。是否可能每隔x次搜索重新启动驱动程序?绕过CAPTCHA似乎对我有点敌意。。。做一个好的互联网公民™...请记住,SO的目标是成为一个巨型/长尾FAQ。本月有效,下个月失效的东西并不是一个真正合适的FAQ条目。事实上,有人可能会说,由于范围将不断扩大,以涵盖新的反措施和反措施,它天生就“太宽”而无法回答。
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import psycopg2
import os
import datetime

final_results=[]
positions=[]

option = webdriver.ChromeOptions()
option.add_argument("—-incognito")
browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=option)

#def db_connect():
try:
    #Database connection string
     DSN = "dbname='postgres' user='postgres' host='localhost' password='postgres' port='5432'"
     #DWH table to which data is ported
     TABLE_NAME = 'staging.search_url'
     #Connecting DB..
     conn = psycopg2.connect(DSN)
     print("Database connected...")
     #conn.set_client_encoding('utf-8')
     cur = conn.cursor()
     cur.execute("SET datestyle='German'")
except (Exception, psycopg2.Error) as error:
     print('database connection failed')
     quit()

def get_products(url):
    browser.get(url)
    names = browser.find_elements_by_xpath("//span[@class='pymv4e']")
    upd_product_name_list=list(filter(None, names))
    product_name = [x.text for x in upd_product_name_list]
    product = [x for x in product_name if len(x.strip()) > 2]
    upd_product_name_list.clear()
    product_name.clear()
    return product

##################################
search_url_fetch="""select url_to_be_searched from staging.search_url where id in(65,66,67,68)"""
psql_cursor = conn.cursor()
psql_cursor.execute(search_url_fetch)
serach_url_list = psql_cursor.fetchall()
print('Fetched DB values')
##################################

for row in serach_url_list:
    passed_url=''
    passed_url=str(row)
    passed_url=passed_url.replace(',)','')
    passed_url=passed_url.replace('(','')
    new_url=''
    new_url=passed_url[1:len(passed_url)-1]
    print('Passed URL :'+new_url)
    print("\n")

    filtered=[]
    filtered.clear()
    filtered = get_products(new_url)
    if not filtered:
        new_url=new_url+'+kaufen'
        get_products(new_url)
        print('Modified URL :'+new_url)

    if filtered:
         print(filtered)
         positions.clear()
         for x in range(1, len(filtered)+1):
           positions.append(str(x))
         gobal_position=0
         gobal_position=len(positions)
         print('global postion first: '+str(gobal_position))
         print("\n")

         company_name_list = browser.find_elements_by_xpath("//div[@class='LbUacb']")
         # use list comprehension to get the actual repo titles and not the selenium objects.
         company = []
         company.clear()
         company = [x.text for x in company_name_list]
         # print out all the titles.
         print('Company Name:')
         print(company, '\n')


         price_list = browser.find_elements_by_xpath("//div[@class='e10twf T4OwTb']")
         # use list comprehension to get the actual repo titles and not the selenium objects.
         price = []
         price.clear()
         price = [x.text for x in price_list]
         print('Price:')
         print(price)
         print("\n")

         urls=[]
         urls.clear()
         find_href = browser.find_elements_by_xpath("//a[@class='plantl pla-unit-single-clickable-target clickable-card']")
         for my_href in find_href:
             url_list=my_href.get_attribute("href")
             urls.append(url_list)
             #print(my_href.get_attribute("href"))
         print('URLS:')
         print(urls)
         print("\n")

         print('Final Result: ')
         result = zip(positions,filtered, urls, company,price)
         final_results.clear()
         final_results.append(tuple(result))
         print(final_results)
         print("\n")


         print('global postion end :'+str(gobal_position))
         i=0
         #try:
         for d in final_results:
                print( d[i])
                while i <= gobal_position:
                  cur.execute("""INSERT into staging.pla_crawler_results(position, product_name, url,company,price) VALUES (%s, %s, %s,%s, %s)""", d[i])
                  print('Inserted succesfully')
                  conn.commit()
                  i=i+1
         #except (Exception, psycopg2.Error) as error:
             #pass