Python 一个接一个地爬

Python 一个接一个地爬,python,scrapy,web-crawler,Python,Scrapy,Web Crawler,我对网络垃圾很陌生。我试图在小说读者网站上爬行,获取小说信息和章节内容,所以我的方法是创建两个蜘蛛,一个用来获取小说信息,另一个用来获取章节内容 import scrapy class BookSpider(scrapy.Spider): name = "book" def __init__(self, books=[], **kwargs): if isinstance(books,str): books = [b

我对网络垃圾很陌生。我试图在小说读者网站上爬行,获取小说信息和章节内容,所以我的方法是创建两个蜘蛛,一个用来获取小说信息,另一个用来获取章节内容

import scrapy

class BookSpider(scrapy.Spider):
    name = "book"

    def __init__(self, books=[], **kwargs):
        if isinstance(books,str):
            books = [books]
        self.start_urls = [f'https://daonovel.com/novel/{book}/' for book in sorted(books)]
        super().__init__(**kwargs) 

    def parse(self, response):
        # self.remove_content(response.css("div.post-title h1 span"))
        fullurl = response.url
        url = fullurl.split("/")[-2]
        title = response.css("div.post-title h1::text").extract()
        title = title[len(title)-1].strip()
        authors = response.css('div.author-content a::text').getall()
        genres = response.css('div.genres-content a::text').getall()
        release = response.css('div.post-status div.post-content_item:nth-child(1) div.summary-content::text').get().strip()
        status = response.css('div.post-status div.post-content_item:nth-child(2) div.summary-content::text').get().strip()
        summary = response.css('div.summary__content p').getall()

        chapters = response.css('ul.version-chap li a::attr(href)').extract()
        chapters.reverse()

        return {
            'fullurl' : fullurl,
            'url' : url,
            'title' : title,
            'authors' : authors,
            'genres' : genres,
            'status' : status,
            'release' : release,
            'summary' : summary,
            'chapters' : chapters
        }

class ChapterSpider(scrapy.Spider):
    name = "chapter"

    def __init__(self, book="", chapters=[], **kwargs):
        if isinstance(chapters,str):
            chapters = [chapters]
        self.book = book
        self.start_urls = [f'https://daonovel.com/novel/{book}/{chapter}/' for chapter in chapters]
        super().__init__(**kwargs) 

    def parse(self, response):
        title = response.css("ol.breadcrumb li.active::text").get().strip()
        
        container = response.css("div.cha-words p").getall() if response.css("div.cha-words p").getall() else  response.css("div.text-left p").getall()
        content = []
        for p in container:
            content.append(str(p))
        
        return {
            'title' : title,
            'content' : content,
            'book_url': self.book,
            'url' : response.url.split("/")[-2]
        }
之后,我创建了一个收集器来收集和处理来自spider的所有数据

from scrapy import signals

class Collector():
    def __init__(self, process, books=[]):
        self.process = process
        if isinstance(books, str):
            books = [books]
        self.books = books
        self.books_data = []

    def create_crawler(self, spider, function, **kwargs):
        # we need Crawler instance to access signals
        crawler = self.process.create_crawler(spider)
        crawler.signals.connect(function, signal=signals.item_scraped)
        x = self.process.crawl(crawler, **kwargs)
        return x

    def process_book_data(self, item, response, spider):
        item['authors'] = [author.strip() for author in item['authors']]
        item['genres'] = [genre.strip() for genre in item['genres']]

        summary = [line for line in item['summary'] if not any(word in line.lower() for word in ("wuxiaworld", "disclaimer"))]
        item['summary'] = str("\n").join(summary)

        item['chapters'] = [chapter.replace(item['fullurl'], '').replace('/', '') for chapter in item['chapters']]
        self.books_data.append(item)

    def process_chapter_data(self, item, response, spider):
        item['content'] = str("\n").join(item['content'])
        
        for book in self.books_data:
            if book['url'] == item['book_url']:
                book['chapters'][book['chapters'].index(item['url'])] = item
    
    def crawl_books(self):
        return self.create_crawler(BookSpider, self.process_book_data, books=self.books)
    
    def crawl_chapters(self, book, chapters):
        return self.create_crawler(ChapterSpider, self.process_chapter_data, book=book, chapters=chapters)
如果我将章节手动放在process.start()之前

它可以工作,但这不是本脚本的目的

现在我的问题是如何在图书爬行器收集完数据后运行章节爬行器?这是我的尝试,但没有成功

from scrapy.crawler import CrawlerProcess
process = CrawlerProcess()
collector = Collector(process, books="a-stay-at-home-dads-restaurant-in-an-alternate-world")
collector.crawl_books()
process.start()

print(collector.books_data) # this work
for book in collector.books_data:
    collector.crawl_chapters(book['url'], book['chapters']) # this didn't work
    print("Chapters ==>", collector.books_data)
如果在“print”(“Chapters=>”,collector.Chapters_data)”之前添加process.start(),则会创建twisted.internet.error.ReactorNotRestartable的错误


我读过这个问题,但不知道如何在我的代码中实现它,我建议更改spider架构,因为scrapy不应该链接spider(当然可能,但通常是不好的做法),它应该在同一个spider中链接请求

您的问题是由这样一个事实引起的,即scrapy设计为获取项目的平面列表,而您需要嵌套的列表,如
book={'title':…,'chapters':[{some chapter data},…]}

我建议您的spider采用下一种架构:

def parse(self, response):
    
    # parse book data here

    book_item = {
        'fullurl' : fullurl,
        'url' : url,
        'title' : title,
        'authors' : authors,
        'genres' : genres,
        'status' : status,
        'release' : release,
        'summary' : summary,
        'chapters' : []
    }
    chapter_urls = ...list of book chapter urls here.
    chapter_url = chapter_urls.pop()

    yield Request(
        url=chapter_url,
        callback=self.parse_chapter
        meta={'book': book_item, 'chapter_urls': chapter_urls}
    )        

def parse_chapter(self, response):
    book = response.meta['book']
    chapter_urls = response.meta['chapter_urls']
    
    # parse chapter data here
    
    chapter = {
        'title' : title,
        'content' : content,
        'book_url': self.book,
        'url' : response.url.split("/")[-2]
    }
    book['chapters'].append(chapter)
    if not chapter_urls:
        yield book
    else:
        chapter_url = chapter_urls.pop()
        yield Request(
            url=chapter_url,
            callback=self.parse_chapter
            meta={'book': book, 'chapter_urls': chapter_urls}
        )  
这将生成包含嵌套章节的
书籍
实体

希望它能对你有所帮助,尽管它对你的问题并不是很准确的答案。祝你好运


第二版:

class YourSpider(Spider):
    books = {}
    ...

    def parse(self, response):
        # Get book info here.
        book_item = {
            'fullurl' : fullurl,
            'url' : url,
            'title' : title,
            'authors' : authors,
            'genres' : genres,
            'status' : status,
            'release' : release,
            'summary' : summary,
            'chapters' : []
        } 
        self.books[book_item['title']] = book_item
        chapter_urls = [..list of chapter urls]
        chapter_url = chapter_urls.pop()
        
        # This will trigger multiple request async
        for chapter_url in chapter_urls:
            yield scrapy.Request(
                url=chapter_url,
                callback=self.parse_chapter,
                meta={'book': book}
            )

    def parse_chapter(self, response):
        book_title = response.meta['book_title']

        # parse chapter data here

        chapter = {
            'title' : title,
            'content' : content,
            'book_url': self.book,
            'url' : response.url.split("/")[-2]
        }
        self.books[book_title].append(chapter)

        yield self.books[book_title]
         

感谢您的回复,它可以工作,但是如果它只使用一个爬行器,它将需要很长时间来迭代所有章节,更不用说如果有多本书,有没有办法将作业拆分为多个爬行器?例如,一本书将有5个爬行器来获取章节?这需要很长时间,因为章节是按顺序被刮取的一个接一个。它可以一次异步多个刮取,但在这种情况下,您必须在spide实例中拥有书籍存储。现在,我将更新我的答案。好的,那么我的下一个问题是如何为我的spider设置多个异步?因为要解析这一章,我需要等待书籍被解析?顺便说一句,我已经了解了如何n多请求异步,是通过使用循环,我已经为它更新了您的答案,非常感谢您的帮助!
class YourSpider(Spider):
    books = {}
    ...

    def parse(self, response):
        # Get book info here.
        book_item = {
            'fullurl' : fullurl,
            'url' : url,
            'title' : title,
            'authors' : authors,
            'genres' : genres,
            'status' : status,
            'release' : release,
            'summary' : summary,
            'chapters' : []
        } 
        self.books[book_item['title']] = book_item
        chapter_urls = [..list of chapter urls]
        chapter_url = chapter_urls.pop()
        
        # This will trigger multiple request async
        for chapter_url in chapter_urls:
            yield scrapy.Request(
                url=chapter_url,
                callback=self.parse_chapter,
                meta={'book': book}
            )

    def parse_chapter(self, response):
        book_title = response.meta['book_title']

        # parse chapter data here

        chapter = {
            'title' : title,
            'content' : content,
            'book_url': self.book,
            'url' : response.url.split("/")[-2]
        }
        self.books[book_title].append(chapter)

        yield self.books[book_title]