无法下载相对URL为Python Scrapy的图像

无法下载相对URL为Python Scrapy的图像,python,web-crawler,scrapy,scrapy-spider,Python,Web Crawler,Scrapy,Scrapy Spider,我正在使用Scrapy从 然而,我只能得到像这样的图片的相对url 所有名为0.jpg的图像,但如果我尝试使用该绝对url,则无法访问该图像 我的代码: items.py import scrapy class VesselItem(scrapy.Item): name = scrapy.Field() nationality = scrapy.Field() image_urls = scrapy.Field() images = scrapy.Field()

我正在使用Scrapy从

然而,我只能得到像这样的图片的相对url

所有名为0.jpg的图像,但如果我尝试使用该绝对url,则无法访问该图像

我的代码: items.py

import scrapy

class VesselItem(scrapy.Item):
    name = scrapy.Field()
    nationality = scrapy.Field()
    image_urls = scrapy.Field()
    images = scrapy.Field()
import scrapy
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem

class VesselPipeline(object):
    def get_media_requests(self, item, info):
        for image_url in item['image_urls']:
            yield scrapy.Request(image_url)

    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem("Item contains no images")
        item['image_paths'] = image_paths
        return item
import scrapy
import string

from vessel.items import VesselItem

class VesselSpider(scrapy.Spider):
    """docstring for VesselSpider"""
    name = "vessel"
    allowed_domains = ["vesselfinder.com"]
    page_name = "http://vesselfinder.com"
    start_urls = [
        # "http://vesselfinder.com/vessels?page=%d" %i for i in range(0,1000)
        "http://vesselfinder.com/vessels"
    ]

    def parse(self, response):
        f = open('vessels.txt', 'a')
        count = 0;

        for sel in response.xpath('//div[@class="items"]/article'):
            item = VesselItem()

            imageStr = str(sel.xpath('div[1]/a/picture/img/@src').extract())
            item['image_urls'] = self.page_name + imageStr[3:-2]
            nameStr = str(sel.xpath('div[2]/header/h1/a/text()').extract())
            item['name'] = nameStr[19:-8]
            typeStr = str(sel.xpath('div[2]/div[2]/div[2]/text()').extract())
            item['type'] = typeStr[3:-2]

            return item
class VesselSpider(scrapy.Spider):
    """docstring for VesselSpider"""
    name = "vessel"
    allowed_domains = ["vesselfinder.com"]
    page_name = "http://vesselfinder.com"
    start_urls = [
        # "http://vesselfinder.com/vessels?page=%d" %i for i in range(0,1000)
        "http://vesselfinder.com/vessels"
    ]

    def parse(self, response):
        f = open('vessels.txt', 'a')
        count = 0;
        for sel in response.xpath('//div[@class="items"]/article'):
            item = VesselItem()
            imageStr = sel.xpath('./div[@class="small-12 medium-5 large-5 columns"]/a/picture/img/@src').extract()
            imageStr = imageStr[0] if imageStr else 'N/A'
            item['image_urls'] = [self.page_name + imageStr]
            nameStr = sel.xpath('./div/header/h1[@class="subheader"]/a/text()').extract()
            nameStr = ' '.join(' '.join(nameStr).split()) if nameStr else 'N/A'
            item['name'] = nameStr
            typeStr = sel.xpath('.//div[@class="small-4 columns" and contains(text(), "Ship type")]/following-sibling::div/text()').extract()
            typeStr = typeStr[0].strip() if typeStr else 'N/A'
            item['ship_type'] = typeStr
            yield item
class VesselItem(scrapy.Item):
    name = scrapy.Field()
    nationality = scrapy.Field()
    image_urls = scrapy.Field()
    images = scrapy.Field()
    ship_type = scrapy.Field()
管道。py

import scrapy

class VesselItem(scrapy.Item):
    name = scrapy.Field()
    nationality = scrapy.Field()
    image_urls = scrapy.Field()
    images = scrapy.Field()
import scrapy
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem

class VesselPipeline(object):
    def get_media_requests(self, item, info):
        for image_url in item['image_urls']:
            yield scrapy.Request(image_url)

    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem("Item contains no images")
        item['image_paths'] = image_paths
        return item
import scrapy
import string

from vessel.items import VesselItem

class VesselSpider(scrapy.Spider):
    """docstring for VesselSpider"""
    name = "vessel"
    allowed_domains = ["vesselfinder.com"]
    page_name = "http://vesselfinder.com"
    start_urls = [
        # "http://vesselfinder.com/vessels?page=%d" %i for i in range(0,1000)
        "http://vesselfinder.com/vessels"
    ]

    def parse(self, response):
        f = open('vessels.txt', 'a')
        count = 0;

        for sel in response.xpath('//div[@class="items"]/article'):
            item = VesselItem()

            imageStr = str(sel.xpath('div[1]/a/picture/img/@src').extract())
            item['image_urls'] = self.page_name + imageStr[3:-2]
            nameStr = str(sel.xpath('div[2]/header/h1/a/text()').extract())
            item['name'] = nameStr[19:-8]
            typeStr = str(sel.xpath('div[2]/div[2]/div[2]/text()').extract())
            item['type'] = typeStr[3:-2]

            return item
class VesselSpider(scrapy.Spider):
    """docstring for VesselSpider"""
    name = "vessel"
    allowed_domains = ["vesselfinder.com"]
    page_name = "http://vesselfinder.com"
    start_urls = [
        # "http://vesselfinder.com/vessels?page=%d" %i for i in range(0,1000)
        "http://vesselfinder.com/vessels"
    ]

    def parse(self, response):
        f = open('vessels.txt', 'a')
        count = 0;
        for sel in response.xpath('//div[@class="items"]/article'):
            item = VesselItem()
            imageStr = sel.xpath('./div[@class="small-12 medium-5 large-5 columns"]/a/picture/img/@src').extract()
            imageStr = imageStr[0] if imageStr else 'N/A'
            item['image_urls'] = [self.page_name + imageStr]
            nameStr = sel.xpath('./div/header/h1[@class="subheader"]/a/text()').extract()
            nameStr = ' '.join(' '.join(nameStr).split()) if nameStr else 'N/A'
            item['name'] = nameStr
            typeStr = sel.xpath('.//div[@class="small-4 columns" and contains(text(), "Ship type")]/following-sibling::div/text()').extract()
            typeStr = typeStr[0].strip() if typeStr else 'N/A'
            item['ship_type'] = typeStr
            yield item
class VesselItem(scrapy.Item):
    name = scrapy.Field()
    nationality = scrapy.Field()
    image_urls = scrapy.Field()
    images = scrapy.Field()
    ship_type = scrapy.Field()
血管_spider.py

import scrapy

class VesselItem(scrapy.Item):
    name = scrapy.Field()
    nationality = scrapy.Field()
    image_urls = scrapy.Field()
    images = scrapy.Field()
import scrapy
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem

class VesselPipeline(object):
    def get_media_requests(self, item, info):
        for image_url in item['image_urls']:
            yield scrapy.Request(image_url)

    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem("Item contains no images")
        item['image_paths'] = image_paths
        return item
import scrapy
import string

from vessel.items import VesselItem

class VesselSpider(scrapy.Spider):
    """docstring for VesselSpider"""
    name = "vessel"
    allowed_domains = ["vesselfinder.com"]
    page_name = "http://vesselfinder.com"
    start_urls = [
        # "http://vesselfinder.com/vessels?page=%d" %i for i in range(0,1000)
        "http://vesselfinder.com/vessels"
    ]

    def parse(self, response):
        f = open('vessels.txt', 'a')
        count = 0;

        for sel in response.xpath('//div[@class="items"]/article'):
            item = VesselItem()

            imageStr = str(sel.xpath('div[1]/a/picture/img/@src').extract())
            item['image_urls'] = self.page_name + imageStr[3:-2]
            nameStr = str(sel.xpath('div[2]/header/h1/a/text()').extract())
            item['name'] = nameStr[19:-8]
            typeStr = str(sel.xpath('div[2]/div[2]/div[2]/text()').extract())
            item['type'] = typeStr[3:-2]

            return item
class VesselSpider(scrapy.Spider):
    """docstring for VesselSpider"""
    name = "vessel"
    allowed_domains = ["vesselfinder.com"]
    page_name = "http://vesselfinder.com"
    start_urls = [
        # "http://vesselfinder.com/vessels?page=%d" %i for i in range(0,1000)
        "http://vesselfinder.com/vessels"
    ]

    def parse(self, response):
        f = open('vessels.txt', 'a')
        count = 0;
        for sel in response.xpath('//div[@class="items"]/article'):
            item = VesselItem()
            imageStr = sel.xpath('./div[@class="small-12 medium-5 large-5 columns"]/a/picture/img/@src').extract()
            imageStr = imageStr[0] if imageStr else 'N/A'
            item['image_urls'] = [self.page_name + imageStr]
            nameStr = sel.xpath('./div/header/h1[@class="subheader"]/a/text()').extract()
            nameStr = ' '.join(' '.join(nameStr).split()) if nameStr else 'N/A'
            item['name'] = nameStr
            typeStr = sel.xpath('.//div[@class="small-4 columns" and contains(text(), "Ship type")]/following-sibling::div/text()').extract()
            typeStr = typeStr[0].strip() if typeStr else 'N/A'
            item['ship_type'] = typeStr
            yield item
class VesselItem(scrapy.Item):
    name = scrapy.Field()
    nationality = scrapy.Field()
    image_urls = scrapy.Field()
    images = scrapy.Field()
    ship_type = scrapy.Field()
当我运行这个爬行器时,我得到了
异常。ValueError:request url:h中缺少scheme
错误,因为我没有提供绝对url

[vessel] ERROR: Error processing {'image_urls': 'http://vesselfinder.com/vessels/ship-photo/0-224138470-a2fdc783d05a019d00ad9db0cef322f7/0.jpg',
     'name': 'XILGARO ALEANTE',
     'type': 'Sailing vessel'}
    Traceback (most recent call last):
      File "/usr/local/lib/python2.7/dist-packages/scrapy/middleware.py", line 62, in _process_chain
        return process_chain(self.methods[methodname], obj, *args)
      File "/usr/local/lib/python2.7/dist-packages/scrapy/utils/defer.py", line 65, in process_chain
        d.callback(input)
      File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 383, in callback
        self._startRunCallbacks(result)
      File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 491, in _startRunCallbacks
        self._runCallbacks()
    --- <exception caught here> ---
      File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 578, in _runCallbacks
        current.result = callback(current.result, *args, **kw)
      File "/usr/local/lib/python2.7/dist-packages/scrapy/contrib/pipeline/media.py", line 40, in process_item
        requests = arg_to_iter(self.get_media_requests(item, info))
      File "/usr/local/lib/python2.7/dist-packages/scrapy/contrib/pipeline/images.py", line 104, in get_media_requests
        return [Request(x) for x in item.get(self.IMAGES_URLS_FIELD, [])]
      File "/usr/local/lib/python2.7/dist-packages/scrapy/http/request/__init__.py", line 26, in __init__
        self._set_url(url)
      File "/usr/local/lib/python2.7/dist-packages/scrapy/http/request/__init__.py", line 61, in _set_url
        raise ValueError('Missing scheme in request url: %s' % self._url)
    exceptions.ValueError: Missing scheme in request url: h
[vessel]错误:处理{'image\u url'时出错:http://vesselfinder.com/vessels/ship-photo/0-224138470-a2fdc783d05a019d00ad9db0cef322f7/0.jpg',
“名称”:“锡尔加罗阿莱安特”,
“类型”:“帆船”}
回溯(最近一次呼叫最后一次):
文件“/usr/local/lib/python2.7/dist packages/scrapy/middleware.py”,第62行,进程链中
返回过程链(self.methods[methodname],obj,*args)
文件“/usr/local/lib/python2.7/dist packages/scrapy/utils/defer.py”,第65行,进程链中
d、 回调(输入)
文件“/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py”,第383行,在回调中
自启动返回(结果)
文件“/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py”,第491行,在startRunCallbacks中
self.\u runCallbacks()
---  ---
文件“/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py”,第578行,在运行回调中
current.result=回调(current.result,*args,**kw)
文件“/usr/local/lib/python2.7/dist packages/scrapy/contrib/pipeline/media.py”,第40行,进程中项目
请求=arg_to_iter(self.get_media_请求(项目、信息))
文件“/usr/local/lib/python2.7/dist packages/scrapy/contrib/pipeline/images.py”,第104行,在get\u media\u请求中
return[item.get(self.IMAGES\u url\u字段,[])中x的请求(x)]
文件“/usr/local/lib/python2.7/dist-packages/scrapy/http/request/_-init__.py”,第26行,在_-init中__
自我设置url(url)
文件“/usr/local/lib/python2.7/dist packages/scrapy/http/request/_init__.py”,第61行,位于集合url中
raise VALUERROR('请求url中缺少方案:%s'%self.\u url)
exceptions.ValueError:请求url:h中缺少方案

我该如何解决这个问题。对于像这样的站点,有没有什么特殊的方法可以获取图像(或其绝对url)。

我认为下面的代码可以做到这一点(对代码的更改很少)

血管_spider.py

import scrapy

class VesselItem(scrapy.Item):
    name = scrapy.Field()
    nationality = scrapy.Field()
    image_urls = scrapy.Field()
    images = scrapy.Field()
import scrapy
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem

class VesselPipeline(object):
    def get_media_requests(self, item, info):
        for image_url in item['image_urls']:
            yield scrapy.Request(image_url)

    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem("Item contains no images")
        item['image_paths'] = image_paths
        return item
import scrapy
import string

from vessel.items import VesselItem

class VesselSpider(scrapy.Spider):
    """docstring for VesselSpider"""
    name = "vessel"
    allowed_domains = ["vesselfinder.com"]
    page_name = "http://vesselfinder.com"
    start_urls = [
        # "http://vesselfinder.com/vessels?page=%d" %i for i in range(0,1000)
        "http://vesselfinder.com/vessels"
    ]

    def parse(self, response):
        f = open('vessels.txt', 'a')
        count = 0;

        for sel in response.xpath('//div[@class="items"]/article'):
            item = VesselItem()

            imageStr = str(sel.xpath('div[1]/a/picture/img/@src').extract())
            item['image_urls'] = self.page_name + imageStr[3:-2]
            nameStr = str(sel.xpath('div[2]/header/h1/a/text()').extract())
            item['name'] = nameStr[19:-8]
            typeStr = str(sel.xpath('div[2]/div[2]/div[2]/text()').extract())
            item['type'] = typeStr[3:-2]

            return item
class VesselSpider(scrapy.Spider):
    """docstring for VesselSpider"""
    name = "vessel"
    allowed_domains = ["vesselfinder.com"]
    page_name = "http://vesselfinder.com"
    start_urls = [
        # "http://vesselfinder.com/vessels?page=%d" %i for i in range(0,1000)
        "http://vesselfinder.com/vessels"
    ]

    def parse(self, response):
        f = open('vessels.txt', 'a')
        count = 0;
        for sel in response.xpath('//div[@class="items"]/article'):
            item = VesselItem()
            imageStr = sel.xpath('./div[@class="small-12 medium-5 large-5 columns"]/a/picture/img/@src').extract()
            imageStr = imageStr[0] if imageStr else 'N/A'
            item['image_urls'] = [self.page_name + imageStr]
            nameStr = sel.xpath('./div/header/h1[@class="subheader"]/a/text()').extract()
            nameStr = ' '.join(' '.join(nameStr).split()) if nameStr else 'N/A'
            item['name'] = nameStr
            typeStr = sel.xpath('.//div[@class="small-4 columns" and contains(text(), "Ship type")]/following-sibling::div/text()').extract()
            typeStr = typeStr[0].strip() if typeStr else 'N/A'
            item['ship_type'] = typeStr
            yield item
class VesselItem(scrapy.Item):
    name = scrapy.Field()
    nationality = scrapy.Field()
    image_urls = scrapy.Field()
    images = scrapy.Field()
    ship_type = scrapy.Field()
items.py

import scrapy

class VesselItem(scrapy.Item):
    name = scrapy.Field()
    nationality = scrapy.Field()
    image_urls = scrapy.Field()
    images = scrapy.Field()
import scrapy
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.exceptions import DropItem

class VesselPipeline(object):
    def get_media_requests(self, item, info):
        for image_url in item['image_urls']:
            yield scrapy.Request(image_url)

    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem("Item contains no images")
        item['image_paths'] = image_paths
        return item
import scrapy
import string

from vessel.items import VesselItem

class VesselSpider(scrapy.Spider):
    """docstring for VesselSpider"""
    name = "vessel"
    allowed_domains = ["vesselfinder.com"]
    page_name = "http://vesselfinder.com"
    start_urls = [
        # "http://vesselfinder.com/vessels?page=%d" %i for i in range(0,1000)
        "http://vesselfinder.com/vessels"
    ]

    def parse(self, response):
        f = open('vessels.txt', 'a')
        count = 0;

        for sel in response.xpath('//div[@class="items"]/article'):
            item = VesselItem()

            imageStr = str(sel.xpath('div[1]/a/picture/img/@src').extract())
            item['image_urls'] = self.page_name + imageStr[3:-2]
            nameStr = str(sel.xpath('div[2]/header/h1/a/text()').extract())
            item['name'] = nameStr[19:-8]
            typeStr = str(sel.xpath('div[2]/div[2]/div[2]/text()').extract())
            item['type'] = typeStr[3:-2]

            return item
class VesselSpider(scrapy.Spider):
    """docstring for VesselSpider"""
    name = "vessel"
    allowed_domains = ["vesselfinder.com"]
    page_name = "http://vesselfinder.com"
    start_urls = [
        # "http://vesselfinder.com/vessels?page=%d" %i for i in range(0,1000)
        "http://vesselfinder.com/vessels"
    ]

    def parse(self, response):
        f = open('vessels.txt', 'a')
        count = 0;
        for sel in response.xpath('//div[@class="items"]/article'):
            item = VesselItem()
            imageStr = sel.xpath('./div[@class="small-12 medium-5 large-5 columns"]/a/picture/img/@src').extract()
            imageStr = imageStr[0] if imageStr else 'N/A'
            item['image_urls'] = [self.page_name + imageStr]
            nameStr = sel.xpath('./div/header/h1[@class="subheader"]/a/text()').extract()
            nameStr = ' '.join(' '.join(nameStr).split()) if nameStr else 'N/A'
            item['name'] = nameStr
            typeStr = sel.xpath('.//div[@class="small-4 columns" and contains(text(), "Ship type")]/following-sibling::div/text()').extract()
            typeStr = typeStr[0].strip() if typeStr else 'N/A'
            item['ship_type'] = typeStr
            yield item
class VesselItem(scrapy.Item):
    name = scrapy.Field()
    nationality = scrapy.Field()
    image_urls = scrapy.Field()
    images = scrapy.Field()
    ship_type = scrapy.Field()
附加示例输出

{'image_urls': u'http://vesselfinder.com/vessels/ship-photo/0-227349190-7c01e2b3a7a5078ea94fff9a0f862f8a/0',
 'name': u'IBTISAM ATAO',
 'ship_type': u'Sailing vessel'}

将图像url包装在如下列表中:

item['image_urls'] = [self.page_name + imageStr[3:-2]]

为什么不尝试将它们作为文件而不是图像下载。看看下面的答案:@SaeerShaikh因为我需要将它们存储为图像以供进一步处理我尝试了您的解决方案,但在删除
*.pyc
文件后问题仍然存在谢谢!现在终于可以工作了。然而,我不明白包装的意义。你能给我解释一下吗?请看我的评论和你问题下面的链接。