Web scraping 全局项目加载器-在多个参与者之间共享

Web scraping 全局项目加载器-在多个参与者之间共享,web-scraping,scrapy,web-crawler,scrapy-spider,Web Scraping,Scrapy,Web Crawler,Scrapy Spider,我对Scrapy/Python相当陌生 我想抓取几个网站,但我只会从每个网站上获得三个项目“日期”、“cota”和“名称”,它们每天都会更新,并且始终具有相同的xpath 在删除所有这些之后,我想导出到csv文件,但是用我的代码我得到了 以下格式 但是我想要这样的东西 我特别询问了关于在多个spider之间共享同一个ItemLoader的问题,因为我想到了这一点,但我对其他选择持开放态度 这是我目前为两个网站编写的脚本,稍后我将添加更多蜘蛛: 顺便问一下,对于这样的代码,考虑到scrapy是

我对Scrapy/Python相当陌生

我想抓取几个网站,但我只会从每个网站上获得三个项目“日期”、“cota”和“名称”,它们每天都会更新,并且始终具有相同的xpath

在删除所有这些之后,我想导出到csv文件,但是用我的代码我得到了 以下格式

但是我想要这样的东西

我特别询问了关于在多个spider之间共享同一个ItemLoader的问题,因为我想到了这一点,但我对其他选择持开放态度

这是我目前为两个网站编写的脚本,稍后我将添加更多蜘蛛:

顺便问一下,对于这样的代码,考虑到scrapy是异步的,有没有可能将值弄混

# -*- coding: utf-8 -*-
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.loader import ItemLoader

class fundo(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    name = scrapy.Field()
    cota = scrapy.Field()
    date = scrapy.Field()

class ModalSpider(scrapy.Spider):
    name = 'modal'
    allowed_domains = ['modalasset.com.br']
    start_urls = ['http://modalasset.com.br/cotas-diarias/']

    def parse(self, response):

        l = ItemLoader(item=fundo(),response=response)

        name = response.xpath("//tr[@class='row-6 even']/td/a/text()").extract_first()
        date = response.xpath("//tr[@class='row-6 even']/td/text()")[0].extract()
        cota = response.xpath("//tr[@class='row-6 even']/td/text()")[1].extract()


        l.add_value('name', name)
        l.add_value('date', date)
        l.add_value('cota', cota)

        return l.load_item()

class KapitaloSpider(scrapy.Spider):
    name = 'kapitalo'
    allowed_domains = ['kapitalo.com.br/relatorios.']
    start_urls = ['http://kapitalo.com.br/relatorios.html']

    def parse(self, response):

        l = ItemLoader(item=fundo(),response=response)

        name = response.xpath("//tr[@class='odd']")[1].xpath("td//text()")[0].extract()
        date = response.xpath("//*[@class='event layout_full block bygone']/h2/text()")[0].extract()
        date = date.replace(' Cotas do Dia: ','')
        cota = response.xpath("//tr[@class='odd']")[1].xpath("td//text()")[1].extract()

        l.add_value('name', name)
        l.add_value('date', date)
        l.add_value('cota', cota)

        return l.load_item()


process = CrawlerProcess({
'FEED_FORMAT': 'csv',
'FEED_URI': 'result.csv'
})
process.crawl(ModalSpider)
process.crawl(KapitaloSpider)
process.start() # the script will block here until all crawling jobs are finished
我尝试的另一种方法是使用下面的代码,但是add_value正在替换ItemLoader中的旧值,我不知道为什么。 所以它只返回上一个网站的值。 我宁愿使用第一个代码,因为它允许我使用不同类型的爬行器,对于其中一个网站,我可能需要使用Selenium

# -*- coding: utf-8 -*-
import scrapy
from scrapy.loader import ItemLoader
from scrapy.http import Request

class FundoItem(scrapy.Item):
    name = scrapy.Field()
    date = scrapy.Field()
    cota = scrapy.Field()

class RankingSpider(scrapy.Spider):
    name = 'Ranking'
    allowed_domains = ['modalasset.com.br',
                        'kapitalo.com.br'
                        ]
    start_urls = ['http://modalasset.com.br/cotas-diarias/']

    def parse(self, response):

        l = ItemLoader(item=FundoItem(),response=response)

        name = response.xpath("//tr[@class='row-6 even']/td/a/text()").extract_first()
        date = response.xpath("//tr[@class='row-6 even']/td/text()")[0].extract()
        cota = response.xpath("//tr[@class='row-6 even']/td/text()")[1].extract()

        #item['name'] = name
        #item['date'] = date
        #item['cota'] = cota

        l.add_value('name', name)
        l.add_value('date', date)
        l.add_value('cota', cota)


        yield Request(url = "http://kapitalo.com.br/relatorios.html",
                        callback = self.parse_2,
                        meta={'item':l.load_item()})


    def parse_2 (self,response):

        name = response.xpath("//tr[@class='odd']")[1].xpath("td//text()")[0].extract()
        date = response.xpath("//*[@class='event layout_full block bygone']/h2/text()")[0].extract()
        date = date.replace(' Cotas do Dia: ','')
        cota = response.xpath("//tr[@class='odd']")[1].xpath("td//text()")[1].extract()

        l = ItemLoader(item=response.meta['item'])

        l.add_value('name', name)
        l.add_value('date', date)
        l.add_value('cota', cota)

        return l.load_item()

看起来这是一个问题,因为windows是如何解释csv文件中的换行符的。 我通过使用以下代码而不是FEED_EXPORT解决了这个问题:

import csv
with open('test.csv',mode='a',newline='\n') as csv_file:
writer = csv.writer(csv_file)
writer.writerow([name,date,cota,url])
newline='\n'
解决了这些问题

最终代码为:

#Scrapy framework
#CrawlerProcess to run multiple spiders
#csv to export
#sys,inspect to find all the classes(spiders) in this script

import scrapy
from scrapy.crawler import CrawlerProcess
import csv
import sys, inspect
import datetime

#SPIDER DEFINITIONS

class ModalSpider(scrapy.Spider):
    name = 'modal'
    allowed_domains = ['modalasset.com.br']
    start_urls = ['http://modalasset.com.br/cotas-diarias/']

    def parse(self, response):

        name = response.xpath("//tr[@class='row-6 even']/td/a/text()").extract_first()
        date = response.xpath("//tr[@class='row-6 even']/td/text()")[0].extract()
        cota = response.xpath("//tr[@class='row-6 even']/td/text()")[2].extract()

        write(name,date,cota,response.request.url)


class KapitaloSpider(scrapy.Spider):
    name = 'kapitalo'
    allowed_domains = ['kapitalo.com.br/relatorios.']
    start_urls = ['http://kapitalo.com.br/relatorios.html']

    def parse(self, response):

        #Zeta FIQ FIM
        name = response.xpath("//tr[@class='odd']")[1].xpath("td//text()")[0].extract()
        date = response.xpath("//*[@class='event layout_full block bygone']/h2/text()")[0].extract()
        date = date.replace(' Cotas do Dia: ','')
        date = date.replace('.','/')
        cota = response.xpath("//tr[@class='odd']")[1].xpath("td//text()")[1].extract()

        write(name,date,cota,response.request.url)

        #Kappa FIN FIQ FIM
        name = response.xpath("//tr[@class='odd']")[0].xpath("td//text()")[0].extract()
        cota = response.xpath("//tr[@class='odd']")[0].xpath("td//text()")[1].extract()

        write(name,date,cota,response.request.url)

#write to csv file
#newline='\n' so it won't jump any lines between the entries
def write(name,date,cota,url):
    with open('test.csv',mode='a',newline='\n') as csv_file:
        writer = csv.writer(csv_file)
        writer.writerow([name,date,cota,url])

def crawl():

    #create the columns in the csv file
    with open('test.csv',mode="w",newline='\n') as csv_file:
        writer = csv.writer(csv_file)
        writer.writerow(['Nome do Fundo','Data','Cota do dia','URL'])

    #get all the members from the script
    #if it's a class and it's inside the main (it's a spider)
    #then it crawls
    process = CrawlerProcess()
    for name, obj in inspect.getmembers(sys.modules[__name__]):
        if inspect.isclass(obj):
            if obj.__module__ == '__main__':
                process.crawl(obj)

    process.start()

crawl()