Python Raise VALUERROR(';请求url中缺少方案:%s';%self.\u url)VALUERROR:请求url中缺少方案:javascript:void(0);

Python Raise VALUERROR(';请求url中缺少方案:%s';%self.\u url)VALUERROR:请求url中缺少方案:javascript:void(0);,python,web-scraping,scrapy,Python,Web Scraping,Scrapy,这是我的蜘蛛代码 spider.py import scrapy class ExampleSpider(scrapy.Spider): name = 'moneycontrol' # allowed_domains = ['moneycontrol.com'] start_urls = ['https://www.moneycontrol.com/india/stockpricequote/'] def parse(self, response):

这是我的蜘蛛代码

spider.py

import scrapy

class ExampleSpider(scrapy.Spider):
    name = 'moneycontrol'
    # allowed_domains = ['moneycontrol.com']
    start_urls = ['https://www.moneycontrol.com/india/stockpricequote/']

    def parse(self, response):
        stoke_link_list = response.css("table a::attr(href)").getall()

        if response.css("span.span_price_wrap::text").getall(): # value of this variable only present in first run
            stock_name =  response.css("h1.pcstname::text").get()
            bse_price, nse_price = response.css("span.span_price_wrap::text").getall()
            print(stock_name + ' ' + bse_price + ' ' + nse_price)
        else:
            print('stock_name bse_price nse_price')

        for link in stoke_link_list:
            if link is not None:
                next_page = response.urljoin(link)
                # yield scrapy.Request(next_page, callback=self.parse)
                yield response.follow(next_page, callback=self.parse)
运行此命令时,我遇到了一个奇怪的错误。它在运行时刮取一些网站时出错,在刮取不同的网站时再次出错(我的意思是可能会为以前的网站运行)

错误:

2020-08-20 19:52:49 [scrapy.core.scraper] ERROR: Spider error processing <GET https://www.moneycontrol.com/mutual-funds/nav/motilal-oswal-midcap-30-fund-regular-plan/MMO025> (referer: https://www.moneycontrol.com/india/stockpricequote/pharmaceuticals/abbottindia/AI51)
Traceback (most recent call last):


  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/utils/defer.py", line 120, in iter_errback
    yield next(it)
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/utils/python.py", line 346, in __next__
    return next(self.data)
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/utils/python.py", line 346, in __next__
    return next(self.data)
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
    for r in iterable:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/spidermiddlewares/offsite.py", line 29, in process_spider_output
    for x in result:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
    for r in iterable:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/spidermiddlewares/referer.py", line 340, in <genexpr>
    return (_set_referer(r) for r in result or ())
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
    for r in iterable:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/spidermiddlewares/urllength.py", line 37, in <genexpr>
    return (r for r in result or () if _filter(r))
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
    for r in iterable:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/spidermiddlewares/depth.py", line 58, in <genexpr>
    return (r for r in result or () if _filter(r))
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
    for r in iterable:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/moneycontrol/moneycontrol/spiders/my_spider.py", line 24, in parse
    yield scrapy.Request(next_page, callback=self.parse)
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/http/request/__init__.py", line 25, in __init__
    self._set_url(url)
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/http/request/__init__.py", line 69, in _set_url
    raise ValueError('Missing scheme in request url: %s' % self._url)
ValueError: Missing scheme in request url: javascript:void(0);
2020-08-20 19:52:49[scrapy.core.scraper]错误:蜘蛛错误处理(参考:https://www.moneycontrol.com/india/stockpricequote/pharmaceuticals/abbottindia/AI51)
回溯(最近一次呼叫最后一次):
iter\u errback中的文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/utils/defer.py”,第120行
下一个(it)
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/utils/python.py”,第346行,下一页__
返回下一个(self.data)
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/utils/python.py”,第346行,下一页__
返回下一个(self.data)
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/core/spidermw.py”,第64行,可编辑
对于iterable中的r:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/spidermiddleware/offsite.py”,第29行,进程中输出
对于结果中的x:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/core/spidermw.py”,第64行,可编辑
对于iterable中的r:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/spidermiddleware/referer.py”,第340行,在
返回(_set_referer(r)表示结果中的r或())
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/core/spidermw.py”,第64行,可编辑
对于iterable中的r:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/spidermiddleware/urlength.py”,第37行
返回(结果中的r表示r或()如果_过滤器(r))
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/core/spidermw.py”,第64行,可编辑
对于iterable中的r:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/spidermiddleware/depth.py”,第58行,in
返回(结果中的r表示r或()如果_过滤器(r))
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/core/spidermw.py”,第64行,可编辑
对于iterable中的r:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/moneycontrol/moneycontrol/spider/my_spider.py”,第24行,在parse中
生成scrapy.Request(下一页,callback=self.parse)
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/http/request/_____;init.py”,第25行__
自我设置url(url)
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/http/request/__init__.py”,第69行,在u set_uurl中
raise VALUERROR('请求url中缺少方案:%s'%self.\u url)
ValueError:请求url中缺少方案:javascript:void(0);
Run2

2020-08-20 19:55:15[scrapy.core.scraper]错误:蜘蛛错误处理(参考:https://www.moneycontrol.com/india/stockpricequote/pharmaceuticals/alkemlaboratories/AL05)
回溯(最近一次呼叫最后一次):
iter\u errback中的文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/utils/defer.py”,第120行
下一个(it)
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/utils/python.py”,第346行,下一页__
返回下一个(self.data)
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/utils/python.py”,第346行,下一页__
返回下一个(self.data)
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/core/spidermw.py”,第64行,可编辑
对于iterable中的r:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/spidermiddleware/offsite.py”,第29行,进程中输出
对于结果中的x:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/core/spidermw.py”,第64行,可编辑
对于iterable中的r:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/spidermiddleware/referer.py”,第340行,在
返回(_set_referer(r)表示结果中的r或())
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/core/spidermw.py”,第64行,可编辑
对于iterable中的r:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/spidermiddleware/urlength.py”,第37行
返回(结果中的r表示r或()如果_过滤器(r))
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/core/spidermw.py”,第64行,可编辑
对于iterable中的r:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/spidermiddleware/depth.py”,第58行,in
返回(结果中的r表示r或()如果_过滤器(r))
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site packages/scrapy/core/spidermw.py”,第64行,可编辑
对于iterable中的r:
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/moneycontrol/moneycontrol/spider/my_spider.py”,第24行,在parse中
生成scrapy.Request(下一页,callback=self.parse)
文件“/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scra
2020-08-20 19:55:15 [scrapy.core.scraper] ERROR: Spider error processing <GET https://www.moneycontrol.com/mutual-funds/nav/dsp-equity-opportunities-fund-regular-plan/MDS011> (referer: https://www.moneycontrol.com/india/stockpricequote/pharmaceuticals/alkemlaboratories/AL05)
Traceback (most recent call last):
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/utils/defer.py", line 120, in iter_errback
    yield next(it)
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/utils/python.py", line 346, in __next__
    return next(self.data)
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/utils/python.py", line 346, in __next__
    return next(self.data)
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
    for r in iterable:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/spidermiddlewares/offsite.py", line 29, in process_spider_output
    for x in result:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
    for r in iterable:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/spidermiddlewares/referer.py", line 340, in <genexpr>
    return (_set_referer(r) for r in result or ())
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
    for r in iterable:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/spidermiddlewares/urllength.py", line 37, in <genexpr>
    return (r for r in result or () if _filter(r))
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
    for r in iterable:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/spidermiddlewares/depth.py", line 58, in <genexpr>
    return (r for r in result or () if _filter(r))
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
    for r in iterable:
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/moneycontrol/moneycontrol/spiders/my_spider.py", line 24, in parse
    yield scrapy.Request(next_page, callback=self.parse)
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/http/request/__init__.py", line 25, in __init__
    self._set_url(url)
  File "/home/vishvajeet/Desktop/Programming/python/scrapy/env/lib/python3.6/site-packages/scrapy/http/request/__init__.py", line 69, in _set_url
    raise ValueError('Missing scheme in request url: %s' % self._url)
ValueError: Missing scheme in request url: javascript:void(0);
for link in stoke_link_list:
    if link is not None:
        if not link.startswith("https://moneycontrol.com/")
            page_url = ("https://moneycontrol.com/" + link)