Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/337.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Scrapy-递归地刮到第三页_Python_Recursion_Web Scraping_Scrapy - Fatal编程技术网

Python Scrapy-递归地刮到第三页

Python Scrapy-递归地刮到第三页,python,recursion,web-scraping,scrapy,Python,Recursion,Web Scraping,Scrapy,我希望我的请求对于那些更有经验的Scrapy用户来说是非常简单和直接的 本质上,下面的代码可以根据第一个页面中的链接从第二个页面抓取。我想扩展代码,使用第二页中的链接从第三页刮取。使用下面的代码,def parse_items是包含50个列表的登录页(第一级),代码设置为递归地从50个链接中的每一个进行刮取def parse_listing_page指定从“listing page”中刮取哪些项目。在每个列表页面中,我希望我的脚本跟随一个链接到另一个页面,并在返回到“列表页面”然后返回到登录页面

我希望我的请求对于那些更有经验的Scrapy用户来说是非常简单和直接的

本质上,下面的代码可以根据第一个页面中的链接从第二个页面抓取。我想扩展代码,使用第二页中的链接从第三页刮取。使用下面的代码,
def parse_items
是包含50个列表的登录页(第一级),代码设置为递归地从50个链接中的每一个进行刮取
def parse_listing_page
指定从“listing page”中刮取哪些项目。在每个列表页面中,我希望我的脚本跟随一个链接到另一个页面,并在返回到“列表页面”然后返回到登录页面之前刮取一两个项目

下面的代码适用于在两个级别进行递归刮取。如何使用下面的代码将其扩展到3

from scrapy import log
from scrapy.log import ScrapyFileLogObserver
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from firstproject.items import exampleItem
from scrapy.http import Request
import urlparse

logfile_info = open('example_INFOlog.txt', 'a')
logfile_error = open('example_ERRlog.txt', 'a')
log_observer_info = log.ScrapyFileLogObserver(logfile_info, level=log.INFO)
log_observer_error = log.ScrapyFileLogObserver(logfile_error, level=log.ERROR)
log_observer_info.start()
log_observer_error.start()

class MySpider(CrawlSpider):
    name = "example"

    allowed_domains = ["example.com.au"]

    rules = (Rule (SgmlLinkExtractor(allow=("",),restrict_xpaths=('//li[@class="nextLink"]',))
    , callback="parse_items", follow=True),
    )

    def start_requests(self):
        start_urls = reversed([
            "http://www.example.com.au/1?new=true&list=10-to-100",
            "http://www.example.com.au/2?new=true&list=10-to-100",
            "http://www.example.com.au/2?new=true&list=100-to-200",
        ])

        return[Request(url = start_url) for start_url in start_urls ]

    def parse_start_url(self, response):
        return self.parse_items(response)

    def parse_items(self, response):
        hxs = HtmlXPathSelector(response)
        listings = hxs.select("//h2")
        items = []
        for listings in listings:
            item = exampleItem()
            item ["title"] = listings.select("a/text()").extract()[0]
            item ["link"] = listings.select("a/@href").extract()[0]
            items.append(item)

            url = "http://example.com.au%s" % item["link"]
            yield Request(url=url, meta={'item':item},callback=self.parse_listing_page)


    def parse_listing_page(self,response):
        hxs = HtmlXPathSelector(response)

        item = response.meta['item']

        item["item_1"] = hxs.select('#censored Xpath').extract()
        item["item_2"] = hxs.select('#censored Xpath').extract()
        item["item_3"] = hxs.select('#censored Xpath').extract()
        item["item_4"] = hxs.select('#censored Xpath').extract()

        return item

非常感谢

这就是代码流的工作原理


调用
MySpider
类中的
规则
构造函数开始<代码>规则构造函数将回调设置为
parse_items
。在
parse_items
的末尾有一个
yield
,它使函数递归到
parse_listing_页面
。如果您想从
parse\u listing\u页面
递归到第三级,则必须有
请求
parse\u listing\u页面

生成我的更新代码。下面的代码能够以适当的格式(已测试)拉取
计数器链接
,但似乎使用了
else
语句,因此没有产生
解析列表计数器
。如果我删除
If
else
子句,并强制代码回调
parse_listing_counter
,它不会生成任何项(即使是来自
parse_items
或列表页面的项)

我在代码中做错了什么?我还检查了XPath-看起来都不错

from scrapy import log
from scrapy.log import ScrapyFileLogObserver
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from firstproject.items import exampleItem
from scrapy.http import Request
import urlparse

logfile_info = open('example_INFOlog.txt', 'a')
logfile_error = open('example_ERRlog.txt', 'a')
log_observer_info = log.ScrapyFileLogObserver(logfile_info, level=log.INFO)
log_observer_error = log.ScrapyFileLogObserver(logfile_error, level=log.ERROR)
log_observer_info.start()
log_observer_error.start()

class MySpider(CrawlSpider):
    name = "example"

    allowed_domains = ["example.com.au"]

    rules = (Rule (SgmlLinkExtractor(allow=("",),restrict_xpaths=('//li[@class="nextLink"]',))
    , callback="parse_items", follow=True),
    )

    def start_requests(self):
        start_urls = reversed([
            "http://www.example.com.au/1?new=true&list=10-to-100",
            "http://www.example.com.au/2?new=true&list=10-to-100",
            "http://www.example.com.au/2?new=true&list=100-to-200",
        ])

        return[Request(url = start_url) for start_url in start_urls ]

    def parse_start_url(self, response):
        return self.parse_items(response)

    def parse_items(self, response):
        hxs = HtmlXPathSelector(response)
        listings = hxs.select("//h2")
        items = []
        for listings in listings:
            item = exampleItem()
            item ["title"] = listings.select("a/text()").extract()[0]
            item ["link"] = listings.select("a/@href").extract()[0]
            items.append(item)

            url = "http://example.com.au%s" % item["link"]
            yield Request(url=url, meta={'item':item},callback=self.parse_listing_page)


    def parse_listing_page(self,response):
        hxs = HtmlXPathSelector(response)

        item = response.meta['item']

        item["item_1"] = hxs.select('#censored Xpath').extract()
        item["item_2"] = hxs.select('#censored Xpath').extract()
        item["item_3"] = hxs.select('#censored Xpath').extract()
        item["item_4"] = hxs.select('#censored Xpath').extract()

        item["counter_link"] = hxs.selext('#censored Xpath').extract()[0]
        counter_link = response.meta.get('counter_link', None)
        if counter_link:
            url2 = "http://example.com.au%s" % item["counter_link"]
            yield Request(url=url2, meta={'item':item},callback=self.parse_listing_counter)
        else:
            yield item

    def parse_listing_counter(self,response):
        hxs = HtmlXPathSelector(response)

        item = response.meta['item']

        item["counter"] = hxs.select('#censored Xpath').extract()

        return item

谢谢你的贡献。我在下面的代码中做了一些更改,但仍然遇到问题