Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/349.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 已爬网0页已爬网0项_Python_Web Scraping_Scrapy_Scrapy Spider - Fatal编程技术网

Python 已爬网0页已爬网0项

Python 已爬网0页已爬网0项,python,web-scraping,scrapy,scrapy-spider,Python,Web Scraping,Scrapy,Scrapy Spider,我刚开始学习Python和Scrapy 我的第一个项目是在包含web安全信息的网站上抓取信息。但是当我用cmd运行它时,它说 已爬网0页(以0页/分钟的速度)已爬网0项(以0项/分钟的速度) 似乎什么也没有出来。如果有人能解决我的问题,我将不胜感激 以下是我的spider文件: 项目: import scrapy class ReporteinmobiliarioItem(scrapy.Item): # define the fields for your item here lik

我刚开始学习Python和Scrapy

我的第一个项目是在包含web安全信息的网站上抓取信息。但是当我用cmd运行它时,它说

已爬网0页(以0页/分钟的速度)已爬网0项(以0项/分钟的速度)

似乎什么也没有出来。如果有人能解决我的问题,我将不胜感激

以下是我的spider文件:

项目:

import scrapy


class ReporteinmobiliarioItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    titulo = scrapy.Field()
    precioAlquiler = scrapy.Field()
    ubicacion = scrapy.Field()
    descripcion = scrapy.Field()
    superficieTotal = scrapy.Field()
    superficieCubierta = scrapy.Field()
    antiguedad = scrapy.Field()
    pass
import scrapy
from scrapy.spider import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.exceptions import CloseSpider
from reporteInmobiliario.items import ReporteinmobiliarioItem

class reporteInmobiliario(CrawlSpider):
    name = 'reporteInmobiliario'
    allowed_domains = ['zonaprop.com.ar/']
    item_count = 0
    start_urls = ['https://www.zonaprop.com.ar/terrenos-alquiler-capital-federal.html']

    rules = {
        # Para cada item
        Rule(LinkExtractor(allow = (), restrict_xpaths = ('//li[@class="pagination-action-next"]/a'))),
        Rule(LinkExtractor(allow = (), restrict_xpaths = ('//h4[@class="aviso-data-title"]')),
                           callback = 'parse_item', follow = False)
            }


def parse_item(self,response):
    rp_item = ReporteinmobiliarioItem()

    rp_item['titulo']= response.xpath('//div[@class="card-title"]/text()').extract()
    rp_item['precioAlquiler'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[2]/div[1]/div[2]/p/strong)').extract()
    rp_item['ubicacion'] = response.xpath('normalize-space(//*[@id="map"]/div[1]/div/ul/li)').extract()
    rp_item['descripcion'] = response.xpath('normalize-space(//*[@id="id-descipcion-aviso"]').extract()
    rp_item['superficieTotal'] = response.xpath('//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[4]/span)').extract()
    rp_item['superficieCubierta'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[5]/span)').extract()
    rp_item['antiguedad'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[6]/span)').extract()

    self.item_count += 1
    if self.item_count > 5:
        raise CloseSpider('item_exceeded')
    yield rp_item
蜘蛛网:

import scrapy


class ReporteinmobiliarioItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    titulo = scrapy.Field()
    precioAlquiler = scrapy.Field()
    ubicacion = scrapy.Field()
    descripcion = scrapy.Field()
    superficieTotal = scrapy.Field()
    superficieCubierta = scrapy.Field()
    antiguedad = scrapy.Field()
    pass
import scrapy
from scrapy.spider import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.exceptions import CloseSpider
from reporteInmobiliario.items import ReporteinmobiliarioItem

class reporteInmobiliario(CrawlSpider):
    name = 'reporteInmobiliario'
    allowed_domains = ['zonaprop.com.ar/']
    item_count = 0
    start_urls = ['https://www.zonaprop.com.ar/terrenos-alquiler-capital-federal.html']

    rules = {
        # Para cada item
        Rule(LinkExtractor(allow = (), restrict_xpaths = ('//li[@class="pagination-action-next"]/a'))),
        Rule(LinkExtractor(allow = (), restrict_xpaths = ('//h4[@class="aviso-data-title"]')),
                           callback = 'parse_item', follow = False)
            }


def parse_item(self,response):
    rp_item = ReporteinmobiliarioItem()

    rp_item['titulo']= response.xpath('//div[@class="card-title"]/text()').extract()
    rp_item['precioAlquiler'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[2]/div[1]/div[2]/p/strong)').extract()
    rp_item['ubicacion'] = response.xpath('normalize-space(//*[@id="map"]/div[1]/div/ul/li)').extract()
    rp_item['descripcion'] = response.xpath('normalize-space(//*[@id="id-descipcion-aviso"]').extract()
    rp_item['superficieTotal'] = response.xpath('//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[4]/span)').extract()
    rp_item['superficieCubierta'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[5]/span)').extract()
    rp_item['antiguedad'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[6]/span)').extract()

    self.item_count += 1
    if self.item_count > 5:
        raise CloseSpider('item_exceeded')
    yield rp_item

您需要始终首先检查日志:

2018-09-09:19:21[scrapy.spidermiddleware.offsite]调试:过滤 到“www.zonaprop.com.ar”的场外请求:https://www.zonaprop.com.ar/propiedades/galpon-de-337-m2-7-79-x-43-30-m-a-metros-de-av-43096244.html>

您的第一条规则中也有一个错误(正确的类名是“pagination action next”)。另外,不要忘记修复XPath错误(
parse_item