Python DropItem如果解析的url包含关键字(管道)

Python DropItem如果解析的url包含关键字(管道),python,scrapy,Python,Scrapy,我正试图为一个学校项目构建一个蜘蛛,我正在从allrecipes.com上抓取食谱。一切都很好,但我似乎无法删除重复的食谱,其中一个url包含实际的食谱,而另一个url包含附加了“video=true”的相同url 下面是我在pipelines.py中处理此问题的尝试: from scrapy.exceptions import DropItem from scrapy import log class DuplicatesPipeline(object): # minCal = 50

我正试图为一个学校项目构建一个蜘蛛,我正在从allrecipes.com上抓取食谱。一切都很好,但我似乎无法删除重复的食谱,其中一个url包含实际的食谱,而另一个url包含附加了“video=true”的相同url

下面是我在pipelines.py中处理此问题的尝试:

from scrapy.exceptions import DropItem
from scrapy import log



class DuplicatesPipeline(object):

# minCal = 50

def __init__(self):
    self.urls_seen = set()

def process_vids(self, item, spider):
    video = "video=true"
    url = str(item.get('url'))
    if video in url:
        raise DropItem("Contains video")
    else:   
        return item

def process_item(self, item, spider):
    unique_id = item.get('url')
    if unique_id in self.urls_seen:
        raise DropItem("Duplicate Item found (%s)" % unique_id)
    else:
        self.urls_seen.add('url')
        return item
设置。py:

# Scrapy settings for dirbot project
BOT_NAME = 'dirbot'

SPIDER_MODULES = ['dirbot.spiders']
NEWSPIDER_MODULE = 'dirbot.spiders'
DEFAULT_ITEM_CLASS = 'dirbot.items.Website'

ITEM_PIPELINES = {'dirbot.pipelines.DuplicatesPipeline': 300,}
from scrapy.item import Item, Field


class Website(Item):

    name = Field()
    url = Field()
    description = Field()
    kcal = Field()
    carbs = Field()
    fat = Field()
    protein = Field()
    main = Field()
    sugar = Field()
    fibre = Field()
    author = Field()
    rating = Field()
    img = Field()
from scrapy.spider import Spider
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from scrapy.http import Request
from scrapy.http.request import Request
from dirbot.items import Website
from scrapy.contrib.spiders import CrawlSpider,Rule
import urlparse
import scrapy

page = "http://allrecipes.com/recipes/main.aspx?Page=%d#recipes"

class DmozSpider(Spider):
    name = "dnot"
    allowed_domains = ["allrecipes.com"]
    start_urls = [page % 1]

    rules = [Rule(SgmlLinkExtractor(allow=('allrecipes.com'), restrict_xpaths = '//a[contains(.,"NEXT")]'),
    callback="parse", follow= True),
    ]

    def __init__(self):
        self.page_number = 1

    def parse(self, response):
        print "-------------------------------------------------"
        print self.page_number
        print "-------------------------------------------------"


        sel = Selector(response)
        sites = response.xpath('//div[@id="divGridItemWrapper"]')
        items = []

        for site in sites:
            item = Website()
            recipe = response.xpath('//a[contains(@href, "/Recipe/")]/@href').extract()
            url = "http://www.allrecipes.com" 
            for nth in recipe:
                go = urlparse.urljoin(url, str(nth))
                items.append(item)
                for link in go:
                    yield Request(go, self.recipes)
        if self.page_number <= 3:
            self.page_number += 1
            yield Request(page % self.page_number)
        else:
            pass

    def recipes(self,response):
        item = Website()
        sel = Selector(response)
        recipe = response.xpath('//div[@id="content-wrapper"]')
        items = []
        print "second page - %s" % response.url

        for i in recipe:
            item['url'] = response.url
            item['description'] = i.xpath('//span[@itemprop="description"]/text()').extract()
            item['name'] = i.xpath('//h1[@itemprop="name"]/text()').extract()
            item['kcal'] = i.xpath('//ul/li[contains(.,"kcal")]/span/text()').extract()
            item['carbs'] = i.xpath('//ul/li[contains(.,"Carbohydrates")]/following-sibling::li[1]//span[@id="lblNutrientValue"]/text()').extract()
            item['fat'] = i.xpath('//ul/li[contains(.,"Fat")]/following-sibling::li[1]//span[@id="lblNutrientValue"]/text()').extract()
            item['protein'] = i.xpath('//ul/li[contains(.,"Protein")]/following-sibling::li[1]//span[@id="lblNutrientValue"]/text()').extract()
            item['main'] = "allrecipes.com"
            item['sugar'] = i.xpath('//li/span[@itemprop="sugarContent"]/text()').extract()
            item['fibre'] = i.xpath('//li/span[@itemprop="proteinContent"]/text()').extract()
            item['author'] = i.xpath('//span[@id="lblUser0"]/text()').extract()
            item['rating'] = i.xpath('//div[@class="rating-stars-img"][1]/meta[1][@itemprop="ratingValue"]/@content').extract()
            item['img'] = i.xpath('//img[@id="imgPhoto"]/@src').extract()
            items.append(item)
            yield item
items.py:

# Scrapy settings for dirbot project
BOT_NAME = 'dirbot'

SPIDER_MODULES = ['dirbot.spiders']
NEWSPIDER_MODULE = 'dirbot.spiders'
DEFAULT_ITEM_CLASS = 'dirbot.items.Website'

ITEM_PIPELINES = {'dirbot.pipelines.DuplicatesPipeline': 300,}
from scrapy.item import Item, Field


class Website(Item):

    name = Field()
    url = Field()
    description = Field()
    kcal = Field()
    carbs = Field()
    fat = Field()
    protein = Field()
    main = Field()
    sugar = Field()
    fibre = Field()
    author = Field()
    rating = Field()
    img = Field()
from scrapy.spider import Spider
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from scrapy.http import Request
from scrapy.http.request import Request
from dirbot.items import Website
from scrapy.contrib.spiders import CrawlSpider,Rule
import urlparse
import scrapy

page = "http://allrecipes.com/recipes/main.aspx?Page=%d#recipes"

class DmozSpider(Spider):
    name = "dnot"
    allowed_domains = ["allrecipes.com"]
    start_urls = [page % 1]

    rules = [Rule(SgmlLinkExtractor(allow=('allrecipes.com'), restrict_xpaths = '//a[contains(.,"NEXT")]'),
    callback="parse", follow= True),
    ]

    def __init__(self):
        self.page_number = 1

    def parse(self, response):
        print "-------------------------------------------------"
        print self.page_number
        print "-------------------------------------------------"


        sel = Selector(response)
        sites = response.xpath('//div[@id="divGridItemWrapper"]')
        items = []

        for site in sites:
            item = Website()
            recipe = response.xpath('//a[contains(@href, "/Recipe/")]/@href').extract()
            url = "http://www.allrecipes.com" 
            for nth in recipe:
                go = urlparse.urljoin(url, str(nth))
                items.append(item)
                for link in go:
                    yield Request(go, self.recipes)
        if self.page_number <= 3:
            self.page_number += 1
            yield Request(page % self.page_number)
        else:
            pass

    def recipes(self,response):
        item = Website()
        sel = Selector(response)
        recipe = response.xpath('//div[@id="content-wrapper"]')
        items = []
        print "second page - %s" % response.url

        for i in recipe:
            item['url'] = response.url
            item['description'] = i.xpath('//span[@itemprop="description"]/text()').extract()
            item['name'] = i.xpath('//h1[@itemprop="name"]/text()').extract()
            item['kcal'] = i.xpath('//ul/li[contains(.,"kcal")]/span/text()').extract()
            item['carbs'] = i.xpath('//ul/li[contains(.,"Carbohydrates")]/following-sibling::li[1]//span[@id="lblNutrientValue"]/text()').extract()
            item['fat'] = i.xpath('//ul/li[contains(.,"Fat")]/following-sibling::li[1]//span[@id="lblNutrientValue"]/text()').extract()
            item['protein'] = i.xpath('//ul/li[contains(.,"Protein")]/following-sibling::li[1]//span[@id="lblNutrientValue"]/text()').extract()
            item['main'] = "allrecipes.com"
            item['sugar'] = i.xpath('//li/span[@itemprop="sugarContent"]/text()').extract()
            item['fibre'] = i.xpath('//li/span[@itemprop="proteinContent"]/text()').extract()
            item['author'] = i.xpath('//span[@id="lblUser0"]/text()').extract()
            item['rating'] = i.xpath('//div[@class="rating-stars-img"][1]/meta[1][@itemprop="ratingValue"]/@content').extract()
            item['img'] = i.xpath('//img[@id="imgPhoto"]/@src').extract()
            items.append(item)
            yield item
dnot.py:

# Scrapy settings for dirbot project
BOT_NAME = 'dirbot'

SPIDER_MODULES = ['dirbot.spiders']
NEWSPIDER_MODULE = 'dirbot.spiders'
DEFAULT_ITEM_CLASS = 'dirbot.items.Website'

ITEM_PIPELINES = {'dirbot.pipelines.DuplicatesPipeline': 300,}
from scrapy.item import Item, Field


class Website(Item):

    name = Field()
    url = Field()
    description = Field()
    kcal = Field()
    carbs = Field()
    fat = Field()
    protein = Field()
    main = Field()
    sugar = Field()
    fibre = Field()
    author = Field()
    rating = Field()
    img = Field()
from scrapy.spider import Spider
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from scrapy.http import Request
from scrapy.http.request import Request
from dirbot.items import Website
from scrapy.contrib.spiders import CrawlSpider,Rule
import urlparse
import scrapy

page = "http://allrecipes.com/recipes/main.aspx?Page=%d#recipes"

class DmozSpider(Spider):
    name = "dnot"
    allowed_domains = ["allrecipes.com"]
    start_urls = [page % 1]

    rules = [Rule(SgmlLinkExtractor(allow=('allrecipes.com'), restrict_xpaths = '//a[contains(.,"NEXT")]'),
    callback="parse", follow= True),
    ]

    def __init__(self):
        self.page_number = 1

    def parse(self, response):
        print "-------------------------------------------------"
        print self.page_number
        print "-------------------------------------------------"


        sel = Selector(response)
        sites = response.xpath('//div[@id="divGridItemWrapper"]')
        items = []

        for site in sites:
            item = Website()
            recipe = response.xpath('//a[contains(@href, "/Recipe/")]/@href').extract()
            url = "http://www.allrecipes.com" 
            for nth in recipe:
                go = urlparse.urljoin(url, str(nth))
                items.append(item)
                for link in go:
                    yield Request(go, self.recipes)
        if self.page_number <= 3:
            self.page_number += 1
            yield Request(page % self.page_number)
        else:
            pass

    def recipes(self,response):
        item = Website()
        sel = Selector(response)
        recipe = response.xpath('//div[@id="content-wrapper"]')
        items = []
        print "second page - %s" % response.url

        for i in recipe:
            item['url'] = response.url
            item['description'] = i.xpath('//span[@itemprop="description"]/text()').extract()
            item['name'] = i.xpath('//h1[@itemprop="name"]/text()').extract()
            item['kcal'] = i.xpath('//ul/li[contains(.,"kcal")]/span/text()').extract()
            item['carbs'] = i.xpath('//ul/li[contains(.,"Carbohydrates")]/following-sibling::li[1]//span[@id="lblNutrientValue"]/text()').extract()
            item['fat'] = i.xpath('//ul/li[contains(.,"Fat")]/following-sibling::li[1]//span[@id="lblNutrientValue"]/text()').extract()
            item['protein'] = i.xpath('//ul/li[contains(.,"Protein")]/following-sibling::li[1]//span[@id="lblNutrientValue"]/text()').extract()
            item['main'] = "allrecipes.com"
            item['sugar'] = i.xpath('//li/span[@itemprop="sugarContent"]/text()').extract()
            item['fibre'] = i.xpath('//li/span[@itemprop="proteinContent"]/text()').extract()
            item['author'] = i.xpath('//span[@id="lblUser0"]/text()').extract()
            item['rating'] = i.xpath('//div[@class="rating-stars-img"][1]/meta[1][@itemprop="ratingValue"]/@content').extract()
            item['img'] = i.xpath('//img[@id="imgPhoto"]/@src').extract()
            items.append(item)
            yield item
从scrapy.spider导入spider
从scrapy.contrib.linkextractors.sgml导入SgmlLinkExtractor
从scrapy.selector导入选择器
从scrapy.http导入请求
从scrapy.http.request导入请求
从dirbot.items导入网站
从scrapy.contrib.spider导入爬行蜘蛛,规则
导入URL解析
进口羊瘙痒
第页=”http://allrecipes.com/recipes/main.aspx?Page=%d#recipes"
DMOZ类蜘蛛(蜘蛛):
name=“dnot”
允许的_域=[“allrecipes.com”]
开始\u URL=[第%1页]
rules=[Rule(SgmlLinkExtractor(allow=('allrecipes.com'),restrict_xpath='//a[contains(,“NEXT”)],
callback=“parse”,follow=True),
]
定义初始化(自):
self.page_number=1
def解析(自我,响应):
打印“-------------------------------------------------------”
打印self.page\u编号
打印“-------------------------------------------------------”
sel=选择器(响应)
sites=response.xpath('//div[@id=“divGridItemWrapper”]'))
项目=[]
对于站点中的站点:
项目=网站()
recipe=response.xpath('//a[包含(@href,“/recipe/”)]/@href').extract()
url=”http://www.allrecipes.com" 
对于配方中的第n个:
go=urlparse.urljoin(url,str(n))
items.append(项目)
对于go中的链接:
产量请求(go、self.recipes)

如果self.page\u number您需要创建一个
,该类在
pipelines.py
文件上实现
process\u项
方法,类似于:

from urllib import urlencode
from urlparse import urlparse, urlunparse, parse_qs

class DuplicatesPipeline(object):
    def __init__(self):
        self.ids_seen = set()

    def process_item(self, item, spider):
        url = item['url']
        u = urlparse(url)
        query = parse_qs(u.query)
        query.pop('video', None)
        u = u._replace(query=urlencode(query, True))
        unique_id = urlunparse(u)
        if unique_id and unique_id in self.ids_seen:
            raise DropItem("Duplicate Item found (%s)" % unique_id)
        else:
            self.ids_seen.add(unique_id)
        return item
然后需要将该类添加到
settings.py

ITEM_PIPELINES = {
    'yourproject.pipelines.DuplicatesPipeline': 300,
}
此外,未使用您的
过程\u vids
方法


让我知道它是否对你有帮助。

self.url\u seen.add(item['url'].replace(“&video=true)”)
但是我仍然有重复的url?我的问题是我不想重复两次相同的食谱。我想你是把它们放在一个集合中,这样你就不会有重复的东西了?如果不是item['url'],你也可以这样做。
。endswith(&video=true)
如果项['url'],则替换(“&video=true”,”)不是在self.urls\u seen
,最简单的方法就是添加它,因为集合没有重复项,所以您只有一个,因为假定
&video=true
在字符串中的任何其他地方都不会出现。我尝试了上面的代码,但仍然遇到问题。这必须是我配置设置或管道的方式。感谢您的帮助egardless!这与我拥有的类似…仍然不起作用。我觉得这一定是因为我试图将字符串与列表或其他内容进行匹配。如果您想查看,我已经添加了我的全部代码。感谢您的帮助!@McLean25,请再次检查答案。这效果更好,但仍然不完全有效。删除“video=true”链接,如果没有视频true的配方已经存在。但是,如果没有类似的链接,它将不会删除该链接。但现在应该足够好了。感谢您的帮助!终于让代码起作用了。我编辑了上面的答案以反映功能代码。感谢大家的帮助!!