Python 我能';不要将ImagePipeline添加到我的scrapy脚本中

Python 我能';不要将ImagePipeline添加到我的scrapy脚本中,python,scrapy,Python,Scrapy,我正在开发一个脚本来从任何网站下载图像,但我遇到了一些麻烦。我知道它被开发成狗屎,但是…我需要它来做一个使用scrapy的课程,所以它是这样的: 当我抛出脚本时,这会在终端中显示在“image\u url”中解析的URL列表 但是图片没有下载。我检查了所有的提示/输出,发现理论上,ItemPipeline不收费 [scrapy] INFO: Enabled item pipelines:[] 我将在这里告诉你代码,我尝试了很多不同的设置,但是没有成功。我需要你的帮助 from scrapy.l

我正在开发一个脚本来从任何网站下载图像,但我遇到了一些麻烦。我知道它被开发成狗屎,但是…我需要它来做一个使用scrapy的课程,所以它是这样的:

当我抛出脚本时,这会在终端中显示在“image\u url”中解析的URL列表

但是图片没有下载。我检查了所有的提示/输出,发现理论上,ItemPipeline不收费

[scrapy] INFO: Enabled item pipelines:[]
我将在这里告诉你代码,我尝试了很多不同的设置,但是没有成功。我需要你的帮助

from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from scrapy import Request
from scrapy.item import Item, Field
from scrapy.spiders import CrawlSpider, Rule
from scrapy.xlib.pydispatch import dispatcher
from twisted.internet import reactor
from scrapy import signals
from scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerProcess
from scrapy.settings import Settings
from scrapy.pipelines.images import ImagesPipeline


class HackerWayItem(Item):
    image_urls = Field()
    images = Field()

class BloggerSpider(CrawlSpider):
    name="TheHackerWay"
    start_urls=['https://thehackerway.com']
    allowed_domains=['thehackerway.com']
    #custom_setting ={
    #   'ITEM_PIPELINES' : 'scrapy.pipelines.images.ImagesPipeline',
    #   'IMAGES_STORE': '/home/bodhidharma/Escritorio/Practicas/Images/',
    #   'MEDIA_ALLOW_REDIRECTS': 'True'
    #}
    #settings = Settings()
    #settings = get_project_settings()
    #settings.set('ITEM_PIPELINES':     {'scrapy.pipelines.images.ImagesPipeline':1})
    rules = [Rule(LinkExtractor(allow=['.'], allow_domains=['thehackerway.com']), callback='parse_blog')]

def parse_blog(self, response):
    print "link parseado %s" % response.url
    hxs = Selector(response)
    item = HackerWayItem()
    #urls = hxs.xpath("//img/@src").extract()
    #for u in urls:
    #item ['image_urls']=u
    item ['image_urls'] = hxs.xpath("//img/@src").extract()
    #item ['image_urls']=hxs.xpath("//*[@id='post-3762']/div[2]/p[24]/a/img/@src").extract()
    #item ['file_urls']=hxs.xpath("@href").extract()
    yield item
    #print (self.settings.attributes.values())
    #print (item)
    #return item # Retornando el Item.


def catch_item(sender, item, **kwargs):
    #print ("Item Extraido: ")#, item
    pass

if __name__ == '__main__':
    dispatcher.connect(catch_item, signal=signals.item_passed)
    dispatcher.connect(reactor.stop, signal=signals.spider_closed)


    spider = BloggerSpider()

    process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
    })

    process.crawl(spider)
    print ("\n[+] Starting scrapy engine...")
    #crawler.start()
    #reactor.run()

    process.start()

源文件缩进不正确。另外,您有
custom\u设置
,可以启用
imagesipeline
注释,这不是原因吗?嗨!我尝试了自定义设置配置,但没有成功(您可以尝试,这是完整的脚本)。关于去缩进问题…在哪一行?。
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from scrapy import Request
from scrapy.item import Item, Field
from scrapy.spiders import CrawlSpider, Rule
from scrapy.xlib.pydispatch import dispatcher
from twisted.internet import reactor
from scrapy import signals
from scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerProcess
from scrapy.settings import Settings
from scrapy.pipelines.images import ImagesPipeline


class HackerWayItem(Item):
    image_urls = Field()
    images = Field()

class BloggerSpider(CrawlSpider):
    name="TheHackerWay"
    start_urls=['https://thehackerway.com']
    allowed_domains=['thehackerway.com']
    #custom_setting ={
    #   'ITEM_PIPELINES' : 'scrapy.pipelines.images.ImagesPipeline',
    #   'IMAGES_STORE': '/home/bodhidharma/Escritorio/Practicas/Images/',
    #   'MEDIA_ALLOW_REDIRECTS': 'True'
    #}
    #settings = Settings()
    #settings = get_project_settings()
    #settings.set('ITEM_PIPELINES':     {'scrapy.pipelines.images.ImagesPipeline':1})
    rules = [Rule(LinkExtractor(allow=['.'], allow_domains=['thehackerway.com']), callback='parse_blog')]

def parse_blog(self, response):
    print "link parseado %s" % response.url
    hxs = Selector(response)
    item = HackerWayItem()
    #urls = hxs.xpath("//img/@src").extract()
    #for u in urls:
    #item ['image_urls']=u
    item ['image_urls'] = hxs.xpath("//img/@src").extract()
    #item ['image_urls']=hxs.xpath("//*[@id='post-3762']/div[2]/p[24]/a/img/@src").extract()
    #item ['file_urls']=hxs.xpath("@href").extract()
    yield item
    #print (self.settings.attributes.values())
    #print (item)
    #return item # Retornando el Item.


def catch_item(sender, item, **kwargs):
    #print ("Item Extraido: ")#, item
    pass

if __name__ == '__main__':
    dispatcher.connect(catch_item, signal=signals.item_passed)
    dispatcher.connect(reactor.stop, signal=signals.spider_closed)


    spider = BloggerSpider()

    process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
    })

    process.crawl(spider)
    print ("\n[+] Starting scrapy engine...")
    #crawler.start()
    #reactor.run()

    process.start()