Python 构造Scrapy Spider以将刮取的数据作为CSV返回

Python 构造Scrapy Spider以将刮取的数据作为CSV返回,python,web-scraping,scrapy,Python,Web Scraping,Scrapy,我能够在vscode终端中以可读的格式获取已删除的信息,但当我尝试将代码放入csv时,它只返回一行信息。我认为这意味着它是不可移植的,但我不确定要改变什么才能使它可移植 我认为问题是在我给出响应时出现的。请使用parse_page函数,因为如果没有它,一切都可以正常工作,但我仍然需要从第二页获取信息 可能我必须在整个事件之外创建数据框架,创建RusCodSpider类之外的所有项目的列表,然后创建CSV,但我不确定从何处开始,也不知道如何做 import scrapy from scrapy.c

我能够在vscode终端中以可读的格式获取已删除的信息,但当我尝试将代码放入csv时,它只返回一行信息。我认为这意味着它是不可移植的,但我不确定要改变什么才能使它可移植

我认为问题是在我给出响应时出现的。请使用parse_page函数,因为如果没有它,一切都可以正常工作,但我仍然需要从第二页获取信息

可能我必须在整个事件之外创建数据框架,创建RusCodSpider类之外的所有项目的列表,然后创建CSV,但我不确定从何处开始,也不知道如何做

import scrapy
from scrapy.crawler import CrawlerProcess
import pandas as pd 
import os
from dateutil.parser import parse
from datetime import datetime
import csv
from googletrans import Translator, constants
from pprint import pprint


class RusmodSpider(scrapy.Spider):
    name = 'ruscod'
    allowed_domains = ['test.ru']
    
    def start_requests(self):
        print('')
        print('*'*25)
        f_block = input("What is the f number of the page you want to scrape?: ")
        yield scrapy.Request(url='https://function.test.ru/news/country.htm?f={}&blk=10322350&objInBlock=25'.format(f_block), callback=self.parse, headers={'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:32.0) Gecko/20100101 Firefox/32.0'})

    def parse(self, response):
        container = response.xpath("//div[@class='newsitem']")
        for x in container:
            doc = x.xpath(".//span[@class='date']/text()").get()
            title = x.xpath(".//a[@target='_self']/text()").get()
            rel_url = x.xpath(".//a[@target='_self']/@href").get()
            
            abs_url = 'https://function.test.ru/{}'.format(rel_url)

            # keywords to flag
            for x in title.split():
                if x == "учение" or x == "учения" or x == "условного" or x == "учении" or x == "двустороннего":

                    yield response.follow(url= abs_url, callback= self.parse_page, meta= {'doc' : doc, 'title' : title, 'abs_url' : abs_url})

    def parse_page(self, response):
        items = []
        translator = Translator()
        doc = response.request.meta['doc']
        title = response.request.meta['title']
        abs_url = response.request.meta['abs_url']

        district = response.xpath("//a[@class='date']/text()").get()
        last_line = response.xpath("//p[@style][last()]/text()").get()
        
        trans_title = translator.translate(title, src='ru')
        trans_dis = translator.translate(district, src='ru')
        trans_last_line = translator.translate(last_line, src='ru')

        for x in trans_last_line.text.split():
            if x == "involved":
                trans_last_line = trans_last_line
            else:
                last_line = "n/a"
        
        
        item = {
                        'date_of_creation' : doc,
                        'title' : trans_title.text,
                        'url' : abs_url,
                        'district' : trans_dis.text,
                        'participation' : trans_last_line.text
                    }
        items.append(item)
    
        df = pd.DataFrame(items, columns = ['title', 'date_of_creation', 'url', 'district', 'participation'])
        cur_datetime = datetime.strftime(datetime.now(), '%Y-%m-%d %H%M')
        os_dest = os.path.join(os.environ['HOME'], 'Desktop', 'Russ CoD{}'.format(cur_datetime))
        yield df.to_csv('{}.csv'.format(os_dest))




process = CrawlerProcess()
process.crawl(RusmodSpider)
process.start()

你为什么用熊猫代替羊瘙痒?你为什么用熊猫代替羊瘙痒?