Python-从谷歌图像搜索下载图像?

Python-从谷歌图像搜索下载图像?,python,web-scraping,Python,Web Scraping,我想用python下载google图像搜索的所有图像。我使用的代码有时似乎有问题。我的代码是 import os import sys import time from urllib import FancyURLopener import urllib2 import simplejson # Define search term searchTerm = "parrot" # Replace spaces ' ' in search term for '%20' in order to

我想用python下载google图像搜索的所有图像。我使用的代码有时似乎有问题。我的代码是

import os
import sys
import time
from urllib import FancyURLopener
import urllib2
import simplejson

# Define search term
searchTerm = "parrot"

# Replace spaces ' ' in search term for '%20' in order to comply with request
searchTerm = searchTerm.replace(' ','%20')


# Start FancyURLopener with defined version 
class MyOpener(FancyURLopener): 
    version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127     Firefox/2.0.0.11'
    myopener = MyOpener()

    # Set count to 0
    count= 0

    for i in range(0,10):
    # Notice that the start changes for each iteration in order to request a new set of   images for each loop
    url = ('https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0& q='+searchTerm+'&start='+str(i*10)+'&userip=MyIP')
    print url
    request = urllib2.Request(url, None, {'Referer': 'testing'})
    response = urllib2.urlopen(request)

# Get results using JSON
    results = simplejson.load(response)
    data = results['responseData']
    dataInfo = data['results']

# Iterate for each result and get unescaped url
    for myUrl in dataInfo:
        count = count + 1
        my_url = myUrl['unescapedUrl']
        myopener.retrieve(myUrl['unescapedUrl'],str(count)+'.jpg')        
下载了几页后,我发现如下错误:

回溯(最近一次呼叫最后一次):

文件“C:\Python27\img\u google3.py”,第37行,在
数据信息=数据[“结果”]
TypeError:“非类型”对象没有属性“\uuuu getitem\uuuu”
怎么办

The,您需要使用来实现您想要实现的目标。要获取图像,需要执行以下操作:

import urllib2
import simplejson
import cStringIO

fetcher = urllib2.build_opener()
searchTerm = 'parrot'
startIndex = 0
searchUrl = "http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" + searchTerm + "&start=" + startIndex
f = fetcher.open(searchUrl)
deserialized_output = simplejson.load(f)
imageUrl = deserialized_output['responseData']['results'][0]['unescapedUrl']
file = cStringIO.StringIO(urllib.urlopen(imageUrl).read())
img = Image.open(file)
这将为您提供4个结果,作为JSON,您需要通过增加API请求中的
startIndex
来迭代获取结果

要获取图像,您需要使用类似的库

例如,要访问第一个图像,您需要执行以下操作:

import urllib2
import simplejson
import cStringIO

fetcher = urllib2.build_opener()
searchTerm = 'parrot'
startIndex = 0
searchUrl = "http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" + searchTerm + "&start=" + startIndex
f = fetcher.open(searchUrl)
deserialized_output = simplejson.load(f)
imageUrl = deserialized_output['responseData']['results'][0]['unescapedUrl']
file = cStringIO.StringIO(urllib.urlopen(imageUrl).read())
img = Image.open(file)

我已经修改了我的代码。现在,代码可以为给定的查询下载100个图像,并且图像是完全高分辨率的,这就是正在下载的原始图像

我正在使用urllib2和Beauty soup下载图像

from bs4 import BeautifulSoup
import requests
import re
import urllib2
import os
import cookielib
import json

def get_soup(url,header):
    return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),'html.parser')


query = raw_input("query image")# you can change the query for the image  here
image_type="ActiOn"
query= query.split()
query='+'.join(query)
url="https://www.google.co.in/search?q="+query+"&source=lnms&tbm=isch"
print url
#add the directory for your image here
DIR="Pictures"
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
soup = get_soup(url,header)


ActualImages=[]# contains the link for Large original images, type of  image
for a in soup.find_all("div",{"class":"rg_meta"}):
    link , Type =json.loads(a.text)["ou"]  ,json.loads(a.text)["ity"]
    ActualImages.append((link,Type))

print  "there are total" , len(ActualImages),"images"

if not os.path.exists(DIR):
            os.mkdir(DIR)
DIR = os.path.join(DIR, query.split()[0])

if not os.path.exists(DIR):
            os.mkdir(DIR)
###print images
for i , (img , Type) in enumerate( ActualImages):
    try:
        req = urllib2.Request(img, headers={'User-Agent' : header})
        raw_img = urllib2.urlopen(req).read()

        cntr = len([i for i in os.listdir(DIR) if image_type in i]) + 1
        print cntr
        if len(Type)==0:
            f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+".jpg"), 'wb')
        else :
            f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+"."+Type), 'wb')


        f.write(raw_img)
        f.close()
    except Exception as e:
        print "could not load : "+img
        print e

我希望这能帮助你

谷歌不推荐他们的API,谷歌的抓取很复杂,所以我建议改用Bing API:


谷歌不是那么好,微软也不是那么坏

没有研究过你的代码,但这是一个使用selenium的示例解决方案,它试图从搜索词中获取400张图片

# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import os
import urllib2

searchterm = 'vannmelon' # will also be the name of the folder
url = "https://www.google.co.in/search?q="+searchterm+"&source=lnms&tbm=isch"
browser = webdriver.Firefox()
browser.get(url)
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
counter = 0
succounter = 0

if not os.path.exists(searchterm):
    os.mkdir(searchterm)

for _ in range(500):
    browser.execute_script("window.scrollBy(0,10000)")

for x in browser.find_elements_by_xpath("//div[@class='rg_meta']"):
    counter = counter + 1
    print "Total Count:", counter
    print "Succsessful Count:", succounter
    print "URL:",json.loads(x.get_attribute('innerHTML'))["ou"]

    img = json.loads(x.get_attribute('innerHTML'))["ou"]
    imgtype = json.loads(x.get_attribute('innerHTML'))["ity"]
    try:
        req = urllib2.Request(img, headers={'User-Agent': header})
        raw_img = urllib2.urlopen(req).read()
        File = open(os.path.join(searchterm , searchterm + "_" + str(counter) + "." + imgtype), "wb")
        File.write(raw_img)
        File.close()
        succounter = succounter + 1
    except:
            print "can't get img"

print succounter, "pictures succesfully downloaded"
browser.close()

您还可以将Selenium与Python结合使用。以下是如何:

from selenium import webdriver
import urllib
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome('C:/Python27/Scripts/chromedriver.exe')
word="apple"
url="http://images.google.com/search?q="+word+"&tbm=isch&sout=1"
driver.get(url)
imageXpathSelector='//*[@id="ires"]/table/tbody/tr[1]/td[1]/a/img'
img=driver.find_element_by_xpath(imageXpathSelector)
src=(img.get_attribute('src'))
urllib.urlretrieve(src, word+".jpg")
driver.close()
(此代码适用于Python 2.7) 请注意,您应使用“pip install Selenium”安装Selenium软件包,并应从下载chromedriver.exe

与其他web抓取技术相反,Selenium打开浏览器并下载项目,因为Selenium的任务是测试而不是抓取。

此外,要从搜索结果下载任意数量的图像,我们需要在加载前400个结果后模拟点击“显示更多结果”按钮

from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import json
import urllib2
import sys
import time

# adding path to geckodriver to the OS environment variable
# assuming that it is stored at the same path as this script
os.environ["PATH"] += os.pathsep + os.getcwd()
download_path = "dataset/"

def main():
    searchtext = sys.argv[1] # the search query
    num_requested = int(sys.argv[2]) # number of images to download
    number_of_scrolls = num_requested / 400 + 1 
    # number_of_scrolls * 400 images will be opened in the browser

    if not os.path.exists(download_path + searchtext.replace(" ", "_")):
        os.makedirs(download_path + searchtext.replace(" ", "_"))

    url = "https://www.google.co.in/search?q="+searchtext+"&source=lnms&tbm=isch"
    driver = webdriver.Firefox()
    driver.get(url)

    headers = {}
    headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
    extensions = {"jpg", "jpeg", "png", "gif"}
    img_count = 0
    downloaded_img_count = 0

    for _ in xrange(number_of_scrolls):
        for __ in xrange(10):
            # multiple scrolls needed to show all 400 images
            driver.execute_script("window.scrollBy(0, 1000000)")
            time.sleep(0.2)
        # to load next 400 images
        time.sleep(0.5)
        try:
            driver.find_element_by_xpath("//input[@value='Show more results']").click()
        except Exception as e:
            print "Less images found:", e
            break

    # imges = driver.find_elements_by_xpath('//div[@class="rg_meta"]') # not working anymore
    imges = driver.find_elements_by_xpath('//div[contains(@class,"rg_meta")]')
    print "Total images:", len(imges), "\n"
    for img in imges:
        img_count += 1
        img_url = json.loads(img.get_attribute('innerHTML'))["ou"]
        img_type = json.loads(img.get_attribute('innerHTML'))["ity"]
        print "Downloading image", img_count, ": ", img_url
        try:
            if img_type not in extensions:
                img_type = "jpg"
            req = urllib2.Request(img_url, headers=headers)
            raw_img = urllib2.urlopen(req).read()
            f = open(download_path+searchtext.replace(" ", "_")+"/"+str(downloaded_img_count)+"."+img_type, "wb")
            f.write(raw_img)
            f.close
            downloaded_img_count += 1
        except Exception as e:
            print "Download failed:", e
        finally:
            print
        if downloaded_img_count >= num_requested:
            break

    print "Total downloaded: ", downloaded_img_count, "/", img_count
    driver.quit()

if __name__ == "__main__":
    main()

完整的代码是

我知道这个问题很老,但我最近遇到了它,以前的答案都不管用了。所以我写了这个脚本来收集谷歌的图片。到目前为止,它可以下载尽可能多的可用图像

这里还有一个github链接

免责声明:由于版权问题,收集的图像只能用于研究和教育目的

from bs4 import BeautifulSoup as Soup
import urllib2
import json
import urllib

#programtically go through google image ajax json return and save links to list#
#num_images is more of a suggestion                                            #  
#it will get the ceiling of the nearest 100 if available                       #
def get_links(query_string, num_images):
    #initialize place for links
    links = []
    #step by 100 because each return gives up to 100 links
    for i in range(0,num_images,100):
        url = 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q='+query_string+'\
        &tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start='+str(i)+'\
        &yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s'

        #set user agent to avoid 403 error
        request = urllib2.Request(url, None, {'User-Agent': 'Mozilla/5.0'}) 

        #returns json formatted string of the html
        json_string = urllib2.urlopen(request).read() 

        #parse as json
        page = json.loads(json_string) 

        #html found here
        html = page[1][1] 

        #use BeautifulSoup to parse as html
        new_soup = Soup(html,'lxml')

        #all img tags, only returns results of search
        imgs = new_soup.find_all('img')

        #loop through images and put src in links list
        for j in range(len(imgs)):
            links.append(imgs[j]["src"])

    return links

#download images                              #
#takes list of links, directory to save to    # 
#and prefix for file names                    #
#saves images in directory as a one up number #
#with prefix added                            #
#all images will be .jpg                      #
def get_images(links,directory,pre):
    for i in range(len(links)):
        urllib.urlretrieve(links[i], "./"+directory+"/"+str(pre)+str(i)+".jpg")

#main function to search images                 #
#takes two lists, base term and secondary terms #
#also takes number of images to download per    #
#combination                                    #
#it runs every combination of search terms      #
#with base term first then secondary            #
def search_images(base,terms,num_images):
    for y in range(len(base)):
        for x in range(len(terms)):
            all_links = get_links(base[y]+'+'+terms[x],num_images)
            get_images(all_links,"images",x)

if __name__ == '__main__':
    terms = ["cars","numbers","scenery","people","dogs","cats","animals"]
    base = ["animated"]
    search_images(base,terms,1000)

这是我最新的谷歌图片snarfer,用Python编写,使用Selenium和headless Chrome

它需要
python-selenium
chromium驱动程序
、以及来自pip的名为
retry
的模块

链接:

用法示例:

google-images.py tiger 10 --opts isz:lt,islt:svga,itp:photo > urls.txt
parallel=5
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
(i=0; while read url; do wget -e robots=off -T10 --tries 10 -U"$user_agent" "$url" -O`printf %04d $i`.jpg & i=$(($i+1)) ; [ $(($i % $parallel)) = 0 ] && wait; done < urls.txt; wait)
google-images.pytiger 10——选择isz:lt,islt:svga,itp:photo>url.txt
平行=5
user_agent=“Mozilla/5.0(Windows NT 10.0;Win64;x64)AppleWebKit/537.36(KHTML,如Gecko)Chrome/81.0.4044.138 Safari/537.36”
(i=0;读取url时;do wget-e robots=off-T10——尝试10-U“$user\U agent”“$url”-O`printf%04d$i`。jpg&i=$($i+1));[$($i%$parallel))=0]&等待;完成
帮助用法:

$ google-images.py --help usage: google-images.py [-h] [--safe SAFE] [--opts OPTS] query n Fetch image URLs from Google Image Search. positional arguments: query image search query n number of images (approx) optional arguments: -h, --help show this help message and exit --safe SAFE safe search [off|active|images] --opts OPTS search options, e.g. isz:lt,islt:svga,itp:photo,ic:color,ift:jpg $google-images.py--帮助 用法:google-images.py[-h][--safe-safe][--opts-opts]query n 从谷歌图像搜索获取图像URL。 位置参数: 查询图像搜索查询 n图像数量(大约) 可选参数: -h、 --帮助显示此帮助消息并退出 --安全搜索[关闭|活动|图像] --选择选择搜索选项,例如。 isz:lt,islt:svga,itp:photo,ic:color,ift:jpg 代码:

#/usr/bin/env蟒蛇3
#需要:selenium,chromium驱动程序,重试
从selenium导入webdriver
从selenium.webdriver.chrome.options导入选项
将selenium.common.exceptions作为sel_ex导入
导入系统
导入时间
导入urllib.parse
从重试导入重试
导入argparse
导入日志记录
logging.basicConfig(stream=sys.stderr,level=logging.INFO)
logger=logging.getLogger()
重试\u记录器=无
css_thumbnail=“img.Q4LuWd”
css_large=“img.n3VNCb”
css_load_more=“.mye4qd”
selenium_exceptions=(sel_ex.Element ClickInterceptedException,sel_ex.Element NotInteractiableException,sel_ex.StaleElement ReferenceException)
def滚动至末端(wd):
wd.execute_脚本(“window.scrollTo(0,document.body.scrollHeight);”)
@重试(异常=按键错误,重试次数=6,延迟=0.1,退避=2,记录器=重试\u记录器)
def get_缩略图(wd,想要大于等于0):
wd.execute_脚本(“document.querySelector(“{}”).click();”.format(css_load_more))
缩略图=wd。通过css选择器查找元素(css缩略图)
n_结果=len(缩略图)
如果n_结果=n:
打破
返回源
def谷歌图像搜索(wd,query,safe=“off”,n=20,opts='',out=None):
搜索\u url\u t=”https://www.google.com/search?safe={safe}&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img&tbs={opts}”
search\u url=search\u url\t.format(q=urllib.parse.quote(查询),opts=urllib.parse.quote(opts),safe=safe)
wd.get(搜索url)
源=获取图像(wd,n=n,out=out)
返回源
def main():
parser=argparse.ArgumentParser(description='Fetch image url from Google image Search'。)
parser.add_参数('--safe',type=str,default=“off”,help='safe search[off | active | images]'))
parser.add_参数('--opts',type=str,default=“”,help='search options,例如isz:lt,islt:svga,itp:photo,ic:color,ift:jpg')
parser.add_参数('query',type=str,help='image search query')
parser.add_参数('n',type=int,default=20,help='number of images(近似)'
args=parser.parse_args()
opts=Options()
选择添加参数(“--headless”)
#opts.add_参数(“--blink settings=imagesEnabled=false”)
使用webdriver.Chrome(options=opts)作为wd:
sources=google\u image\u搜索(wd,args.query,safe=args.safe,n=args.n,opts=args.opts,out=sys.stdout)
main()

这是因为其他代码片段已经过时,不再适用于我。下载100 ima
from bs4 import BeautifulSoup
import urllib2
import os


class GoogleeImageDownloader(object):
    _URL = "https://www.google.co.in/search?q={}&source=lnms&tbm=isch"
    _BASE_DIR = 'GoogleImages'
    _HEADERS = {
        'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
    }

    def __init__(self):
        query = raw_input("Enter keyword to search images\n")
        self.dir_name = os.path.join(self._BASE_DIR, query.split()[0])
        self.url = self._URL.format(urllib2.quote(query)) 
        self.make_dir_for_downloads()
        self.initiate_downloads()

    def make_dir_for_downloads(self):
        print "Creating necessary directories"
        if not os.path.exists(self._BASE_DIR):
            os.mkdir(self._BASE_DIR)

        if not os.path.exists(self.dir_name):
            os.mkdir(self.dir_name)

    def initiate_downloads(self):
        src_list = []
        soup = BeautifulSoup(urllib2.urlopen(urllib2.Request(self.url,headers=self._HEADERS)),'html.parser')
        for img in soup.find_all('img'):
            if img.has_attr("data-src"):
                src_list.append(img['data-src'])
        print "{} of images collected for downloads".format(len(src_list))
        self.save_images(src_list)

    def save_images(self, src_list):
        print "Saving Images..."
        for i , src in enumerate(src_list):
            try:
                req = urllib2.Request(src, headers=self._HEADERS)
                raw_img = urllib2.urlopen(req).read()
                with open(os.path.join(self.dir_name , str(i)+".jpg"), 'wb') as f:
                    f.write(raw_img)
            except Exception as e:
                print ("could not save image")
                raise e


if __name__ == "__main__":
    GoogleeImageDownloader()
from bs4 import BeautifulSoup
import requests
import urllib

user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers = {'User-Agent':user_agent} 
urls = ["https://www.ecosia.org/images?q=india%20pan%20card%20example"]
#The url's from which the image is to be extracted.
index = 0

for url in urls:
    request = urllib.request.Request(url,None,headers) #The assembled request
    response = urllib.request.urlopen(request)
    data = response.read() # Read the html result page

    soup = BeautifulSoup(data, 'html.parser')
    
    for link in soup.find_all('img'):   
        #The images are enclosed in 'img' tag and the 'src' contains the url of the image.
        img_url = link.get('src')
        dest = str(index) + ".jpg"  #Destination to store the image.
        try:
            urllib.request.urlretrieve(img_url)
            index += 1
        except:
            continue
import os, time, shutil, httpx, asyncio
from urllib.parse import urlparse
from serpapi import GoogleSearch

# https://stackoverflow.com/a/39217788/1291371
async def download_file(url):
    print(f'Downloading {url}')

    # https://stackoverflow.com/a/18727481/1291371
    parsed_url = urlparse(url)
    local_filename = os.path.basename(parsed_url.path)

    os.makedirs('images', exist_ok=True)

    async with httpx.AsyncClient() as client:
        async with client.stream('GET', url) as response:
            async with open(f'images/{local_filename}', 'wb') as f:
                await asyncio.to_thread(shutil.copyfileobj, response.raw, f)

    return local_filename

async def main():
    start = time.perf_counter()

    params = {
        "engine": "google",
        "ijn": "0",
        "q": "lasagna",
        "tbm": "isch",
        "api_key": os.getenv("API_KEY"),
    }

    search = GoogleSearch(params)
    results = search.get_dict()

    download_files_tasks = [
        download_file(image['original']) for image in results['images_results']
    ]

    await asyncio.gather(*download_files_tasks, return_exceptions=True)

    print(
        f"Downloaded {len(download_files_tasks)} images in {time.perf_counter() - start:0.4f} seconds")

asyncio.run(main())
from selenium import webdriver
import re
url="https://photos.app.goo.gl/xxxxxxx"
driver = webdriver.Firefox()
driver.get(url)
regPrms="^background-image\:url\(.*\)$"
regPrms="^The.*Spain$"
html = driver.page_source

urls=re.findall("(?P<url>https?://[^\s\"$]+)", html)

fin=[]
for url in urls:
        if "video-downloads" in url:
            fin.append(url)
print("The Following ZIP contains all your pictures")
for url in fin:
        print("-------------------")
        print(url)