Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/327.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Scrapy请求-处理一组又一组URL-我可以使用优先级吗?_Python_Scrapy_Scrapy Spider - Fatal编程技术网

Python Scrapy请求-处理一组又一组URL-我可以使用优先级吗?

Python Scrapy请求-处理一组又一组URL-我可以使用优先级吗?,python,scrapy,scrapy-spider,Python,Scrapy,Scrapy Spider,如何让scrapy一组一组地处理URL?我有两个URL列表。我必须处理第一个列表,包括项目管道,然后才能处理第二个列表 两者都应由一个蜘蛛处理 我不确定优先级是否对我有帮助 优先级(int)–此请求的优先级(默认为0)。调度程序使用优先级定义用于处理请求的顺序。优先级值较高的请求将更早执行。允许负值,以指示相对较低的优先级 因为我不知道它是否只是根据优先级重新排序请求——它可能会在第一个列表的最后一个URL之前通过管道连接第二个列表的第一个URL 我能否确保第一个列表中的项将在第二个列表中的项之

如何让scrapy一组一组地处理URL?我有两个URL列表。我必须处理第一个列表,包括
项目管道
,然后才能处理第二个列表

两者都应由一个蜘蛛处理

我不确定
优先级
是否对我有帮助

优先级(int)–此请求的优先级(默认为0)。调度程序使用优先级定义用于处理请求的顺序。优先级值较高的请求将更早执行。允许负值,以指示相对较低的优先级

因为我不知道它是否只是根据优先级重新排序
请求
——它可能会在第一个列表的最后一个URL之前通过管道连接第二个列表的第一个URL

我能否确保第一个列表中的
将在第二个列表中的项之前导出为XML(我使用的是
XMLItemExporter

编辑:

错误(@Wilfredo):

2017-11-23 20:12:16[scrapy.utils.signal]错误:发现错误 信号处理程序:>回溯(最近一次调用last): 文件 “/home/milano/.virtualenvs/eoilenv/local/lib/python2.7/site packages/scrapy/utils/signal.py”, 第30行,发送捕获日志 *参数,**命名)文件“/home/milano/.virtualenvs/eoilenv/local/lib/python2.7/site packages/pydispatch/robustapply.py”, 第55行,在robustApply中 return receiver(*参数,**命名)TypeError:spider_idle()正好接受2个参数(给定1个)2017-11-23 20:12:16 [scrapy.core.engine]信息:关闭十字轴(已完成)

编辑二:

# coding=utf-8
import scrapy
from bot.items import TestItem
from scrapy import Spider, Request, signals
from scrapy.exceptions import DontCloseSpider


class IndexSpider(Spider):
    name = 'index_spider'
    allowed_domains = ['www.scrape.com']

    def start_requests(self):
        for url in ["https://www.scrape.com/eshop"]:
            # for url in ["https://www.scrape.com/search/getAjaxResult?categoryId=1&"]:
            yield Request(url, callback=self.parse_main_page)

    def parse_main_page(self, response):
        # get subcategories and categories
        self.categories = []
        self.subcategories = []
        parts = response.selector.xpath("//div[contains(@class,'side-nav') and not(contains(@class,'side-nav-'))]")

        for part in parts:
            part_name = part.xpath('.//h4/text()').extract_first().strip()
            category_list = [part_name]
            categories_ul = part.xpath('./ul')
            categories_lis = categories_ul.xpath('./li')
            for category_li in categories_lis:
                category_list = category_list[:1]
                category_name = category_li.xpath('./a/text()').extract_first().strip()
                category_href = category_li.xpath('./a/@href').extract_first().strip()
                categoryId = self._extract_categoryId_from_url(category_href)
                category_list.append(category_name)
                self.categories.append((categoryId, category_list))
                subcategories_lis = category_li.xpath('.//li')

                for subcategory_li in subcategories_lis:
                    category_list = category_list[:2]
                    subcategory_href = subcategory_li.xpath('./a/@href').extract_first()
                    subcategory_name = subcategory_li.xpath('./a/text()').extract_first().strip()
                    subcategoryId = self._extract_categoryId_from_url(subcategory_href)
                    category_list.append(subcategory_name)
                    self.subcategories.append((subcategoryId, category_list))
        # Scrape all subcategories (then categories)

        # for sub in self.subcategories:
        #     url = "https://www.scrape.com/search/getAjaxResult?categoryId={}".format(sub[0])
        #     yield Request(url,meta={'tup':sub,'priority':1,'type':'subcategory'},priority=1,callback=self.parse_category)


    def parse_category(self, response):
        tup = response.meta['tup']
        type = response.meta['type']
        priority = response.meta['priority']
        current_page = response.meta.get('page', 1)
        categoryId = tup[0]
        categories_list = tup[1]
        number_of_pages_href = response.selector.xpath(u'//a[text()="Last"]/@href').extract_first()
        try:
            number_of_pages = int(number_of_pages_href.split('p=')[1].split('&')[0])
        except:
            number_of_pages = current_page
        if current_page < number_of_pages:
            url = "https://www.scrape.com/search/getAjaxResult/?categoryId={}&p={}".format(categoryId, current_page + 1)
            yield Request(url, self.parse_category, meta={'tup': tup, 'page': current_page + 1,'priority':priority,'type':type}, priority=priority)
        hrefs = self._extract_all_product_urls(response)
        for href in hrefs:
            yield Request(href, self.parse_product, meta={"categories_list": categories_list,'type':type}, priority=2 if priority==1 else -1)

    def parse_product(self, response):
        yield TestItem(url=response.url,type=response.meta['type'], category_text='|'.join(response.meta['categories_list']))

    def _extract_categoryId_from_url(self, url):
        categoryId = url.split('/')[-2]
        return categoryId

    def _extract_all_product_urls(self, response):
        hrefs = response.selector.xpath("//a[contains(@class, 'shop-item-image')]/@href").extract()
        return [u"https://www.scrape.com{}".format(x) for x in hrefs]

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(IndexSpider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.spider_idle,
                                signal=scrapy.signals.spider_idle)
        return spider

    def spider_idle(self):
        self.crawler.signals.disconnect(self.spider_idle,
                                        signal=scrapy.signals.spider_idle)
        # yield a new group of urls
        if self.categories:
            for cat in self.categories:
                url = "https://www.scrape.com/search/getAjaxResult?categoryId={}".format(cat[0])
                yield Request(url, meta={'tup': cat, 'priority': 0, 'type': 'category'}, priority=0,
                              callback=self.parse_category)
            self.categories = []
            raise DontCloseSpider()
#编码=utf-8
进口羊瘙痒
从bot.items导入测试项
来自scrapy进口蜘蛛、请求、信号
从scrapy.exceptions导入DontCloseSpider
类索引Spider(Spider):
名称='index_spider'
允许的_域=['www.scrap.com']
def start_请求(自我):
对于[”中的urlhttps://www.scrape.com/eshop"]:
#对于[”中的urlhttps://www.scrape.com/search/getAjaxResult?categoryId=1&"]:
屈服请求(url,callback=self.parse\u主页)
def parse_主页面(自我,响应):
#获取子类别和类别
self.categories=[]
self.subcategories=[]
parts=response.selector.xpath(//div[contains(@class,'side-nav')和not(contains(@class,'side-nav-'))]))
对于部分中的部分:
part_name=part.xpath('.//h4/text()')。提取_first().strip()
类别列表=[零件名称]
categories_ul=part.xpath('./ul')
categories\u lis=categories\u ul.xpath('./li')
对于类别列表中的类别列表:
类别列表=类别列表[:1]
category_name=category_li.xpath('./a/text()')。extract_first().strip()
category_href=category_li.xpath('./a/@href').extract_first().strip()
categoryId=self.\u从\u url(category\u href)提取\u categoryId\u
类别列表。追加(类别名称)
self.categories.append((categoryId,category_list))
子类别\u lis=category\u li.xpath('.//li')
对于子类别列表中的子类别列表:
类别列表=类别列表[:2]
subcategory_href=subcategory_li.xpath('./a/@href')。extract_first()
subcategory_name=subcategory_li.xpath('./a/text()')。extract_first().strip()
subcategoryId=self.\u从url中提取\u categoryId\u(subcategory\u href)
类别列表。追加(子类别名称)
self.subcategories.append((子类别ID,类别列表))
#刮除所有子类别(然后是类别)
#对于self.subcategories中的子类:
#url=”https://www.scrape.com/search/getAjaxResult?categoryId={}.格式(sub[0])
#产生请求(url,meta={'tup':sub,'priority':1,'type':'subcategory'},priority=1,callback=self.parse_category)
def parse_类别(自我、响应):
tup=response.meta['tup']
type=response.meta['type']
优先级=响应.meta['priority']
当前页面=response.meta.get('page',1)
categoryId=tup[0]
类别列表=tup[1]
页数\u href=response.selector.xpath(u'//a[text()=“Last”]/@href')。extract\u first()
尝试:
页数=int(页数href.split('p=')[1]。split('&')[0])
除:
页面数量=当前页面
如果当前页面<页面数量:
url=”https://www.scrape.com/search/getAjaxResult/?categoryId={}&p={}.格式(类别ID,当前页面+1)
产生请求(url,self.parse_category,meta={'tup':tup,'page':当前_页面+1,'priority':priority,'type':type},priority=priority)
hrefs=self.\u提取\u所有\u产品\u URL(响应)
对于hrefs中的href:
屈服请求(href,self.parse_product,meta={“categories_list”:categories_list,'type':type},如果优先级==1,优先级=2,否则-1)
def parse_产品(自我,响应):
屈服测试项(url=response.url,type=response.meta['type'],category_text='|'。加入(response.meta['categories_list']))
定义从url(自我,url)中提取类别ID:
categoryId=url.split('/')[-2]
返回类别ID
def_extract_all_product_URL(自我、响应):
hrefs=response.selector.xpath(//a[contains(@class,'shop item image')]/@href”).extract()
返回[u”https://www.scrape.com{}.hrefs中x的格式(x)]
@类方法
来自_爬虫程序的def(cls、爬虫程序、*args、**kwargs):
spider=super(IndexSpider,cls)。来自爬虫程序(爬虫程序,*args,**kwargs)
爬虫。信号。连接(spider.spider\u空闲,
信号=刮擦。信号。十字轴(空闲)
回程卡盘
def卡盘_怠速(自):
self.crawler.signals.disconnect(self.spider\u idle,
def start_requests(self):
    urls = ['url1', 'url2']
    yield Request(
        url=urls[0], 
        callback=self.process_request, 
        meta={'urls': urls, 'current_index':0}

def process_request(self, response):
    # do my thing
    yield {} # yield item
    current_index = response.meta['current_index'] + 1
    if current_index < len(response.meta['urls']:
        yield Request(
            url=response.meta['urls'][current_index], 
            callback=self.process_request, 
            meta={'urls': response.meta['urls'], 'current_index': current_index})
# -*- coding: utf-8 -*-
import scrapy
from scrapy.exceptions import DontCloseSpider

class ExampleSpider(scrapy.Spider):
    name = 'example'
    allowed_domains = ['example.com']

    first_group_of_urls = [
        'http://quotes.toscrape.com/page/1/'
    ]
    second_group_of_urls = [
        'http://quotes.toscrape.com/page/2/'
    ]

    def start_requests(self):
        for url in self.first_group_of_urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        self.logger.debug('In response from %s', response.url)

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(ExampleSpider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.spider_idle,
                                signal=scrapy.signals.spider_idle)
        return spider

    def spider_idle(self):
        self.crawler.signals.disconnect(self.spider_idle,
                                        signal=scrapy.signals.spider_idle)
        for url in self.second_group_of_urls:
            self.crawler.engine.crawl(scrapy.Request(url), self)
        raise DontCloseSpider