Scrapy:在异地链接上爬行1级深度
在scrapy中,对于允许的域之外的所有链接,我将如何让scrapy爬行只有1级深度。在爬网中,我希望能够确保站点中的所有出站链接都正常工作,而不是404'd。我不希望它爬网的不允许域的整个网站。我目前正在处理允许的域404。我知道我可以将深度限制设置为1,但这也会影响允许的域 我的代码:Scrapy:在异地链接上爬行1级深度,scrapy,scrapy-spider,Scrapy,Scrapy Spider,在scrapy中,对于允许的域之外的所有链接,我将如何让scrapy爬行只有1级深度。在爬网中,我希望能够确保站点中的所有出站链接都正常工作,而不是404'd。我不希望它爬网的不允许域的整个网站。我目前正在处理允许的域404。我知道我可以将深度限制设置为1,但这也会影响允许的域 我的代码: from scrapy.selector import Selector from scrapy.spiders import CrawlSpider, Rule from scrapy.linkextrac
from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from smcrawl.items import Website
import smcrawl.util
def iterate(lists):
for a in lists:
return a
class WalmartSpider(CrawlSpider):
handle_httpstatus_list = [200, 302, 404, 500, 502]
name = "surveymonkeycouk"
allowed_domains = ["surveymonkey.co.uk", "surveymonkey.com"]
start_urls = ['https://www.surveymonkey.co.uk/']
rules = (
Rule(
LinkExtractor(
allow=(),
deny=(),
process_value=smcrawl.util.trim),
callback="parse_items",
follow=True,),
)
#process_links=lambda links: [link for link in links if not link.nofollow] = filter nofollow links
#parses start urls
def parse_start_url(self, response):
list(self.parse_items(response))
def parse_items(self, response):
hxs = Selector(response)
sites = response.selector.xpath('//html')
items = []
for site in sites:
if response.status == 404:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
elif response.status == 200:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 302:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 404:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 500:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
elif response.status == 502:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
else:
item = Website()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['canonical'] = site.xpath('//head/link[@rel="canonical"]/@href').extract()
item['robots'] = site.xpath('//meta[@name="robots"]/@content').extract()
item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
item['description'] = site.xpath('//meta[@name="description"]/@content').extract()
item['redirect'] = response.status
titles = site.xpath('/html/head/title/text()').extract()
try:
titles = iterate(titles)
titles = titles.strip()
except:
pass
item['title'] = titles
h1 = site.xpath('//h1/text()').extract()
try:
h1 = iterate(h1)
h1 = h1.strip()
except:
pass
item['h1'] = h1
items.append(item)
return items
好的,您可以做的一件事是避免使用
允许的\u域
,这样您就不会过滤任何异地请求
但为了使其有趣,您可以创建自己的OffsiteMiddleware
,如下所示:
from scrapy.spidermiddlewares.offsite import OffsiteMiddleware
class MyOffsiteMiddleware(OffsiteMiddleware):
offsite_domains = set()
def should_follow(self, request, spider):
regex = self.host_regex
host = urlparse_cached(request).hostname or ''
if host in offsite_domains:
return False
if not bool(regex.search(host)):
self.offsite_domains.add(host)
return True
我没有测试它,但它应该可以工作,请记住,您应该禁用默认中间件并启用您的设置:
SPIDER_MIDDLEWARES = {
'myproject.middlewares.MyOffsiteMiddleware': 543,
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': None,
}
我已经提到了答案。这与我所寻找的解决方案有点不同,但有了一个我愿意抓取的URL白名单,最终结果是一样的。谢谢大家! 到目前为止你能分享你的代码吗?@eLRuLL添加了你能定义
offsite\u请求
?self.offsite\u域
。我该如何抓取所有的起始域,但只抓取外部域的1级深度?我不明白你的意思,你试过上面的代码吗?