Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/281.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Scrapy:尝试身份验证和抓取站点_Python_Authentication_Web Scraping_Scrapy - Fatal编程技术网

Python Scrapy:尝试身份验证和抓取站点

Python Scrapy:尝试身份验证和抓取站点,python,authentication,web-scraping,scrapy,Python,Authentication,Web Scraping,Scrapy,当我登录会话时,我正试图收集数据,但它一直在说 Traceback (most recent call last): File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 1201, in mainLoop self.runUntilCurrent() File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line

当我登录会话时,我正试图收集数据,但它一直在说

Traceback (most recent call last):
  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 1201, in mainLoop
    self.runUntilCurrent()
  File "/usr/lib/python2.7/dist-packages/twisted/internet/base.py", line 824, in runUntilCurrent
    call.func(*call.args, **call.kw)
  File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 382, in callback
    self._startRunCallbacks(result)
  File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 490, in _startRunCallbacks
    self._runCallbacks()
--- <exception caught here> ---
  File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 577, in _runCallbacks
    current.result = callback(current.result, *args, **kw)
  File "/home/suz/user_login/login_user/login_u/login_u/spiders/spiders.py", line 60, in check_login_response
    return self.initialized()
  File "/usr/local/lib/python2.7/dist-packages/scrapy/contrib/spiders/init.py", line 15, in initialized
    return self.__dict__.pop('_postinit_reqs')
exceptions.KeyError: '_postinit_reqs'
from scrapy.contrib.spiders.init import InitSpider
from scrapy.http import Request, FormRequest
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import Rule

from scrapy.spider import BaseSpider
from scrapy.selector import Selector

from login_u.items import myspiderBotItem

class LinkedPySpider(InitSpider):
    name = 'myspider'
    # allowed_domains = ['linkedin.com']
    login_page = 'https://www.linkedin.com/uas/login'
    # start_urls = ["http://www.linkedin.com/csearch/results?type=companies&keywords=&pplSearchOrigin=GLHD&pageKey=member-home&search=Search#facets=pplSearchOrigin%3DFCTD%26keywords%3D%26search%3DSubmit%26facet_CS%3DC%26facet_I%3D80%26openFacets%3DJO%252CN%252CCS%252CNFR%252CF%252CCCR%252CI"]
    start_urls = ["http://www.linkedin.com/"]

    def __init__(self, username, password, *args, **kwargs):
        global rules
        # query = query.replace(' ','-')
        self.rules = (Rule(SgmlLinkExtractor(allow=('/'+'*')), callback='parse_item',follow = True),)
        super(LinkedPySpider, self).__init__(*args, **kwargs)
        self.allowed_domains = ['linkedin.com']
        self.start_urls = [kwargs.get('start_url')]
        print(self.start_urls)
        self.username=username
        self.password=password

    def init_request(self):
        #"""This function is called before crawling starts."""
        print self.login_page
        return Request(url=self.login_page, callback=self.login)

    def login(self, response):
        #"""Generate a login request."""
        username=self.username
        password=self.password
        return FormRequest.from_response(response,
                    formdata={'session_key': username, 'session_password': password},
                    callback=self.check_login_response)

    def check_login_response(self, response):
        #"""Check the response returned by a login request to see if we aresuccessfully logged in."""
        if "Sign Out" in response.body:
            self.log("\n\n\nSuccessfully logged in. Let's start crawling!\n\n\n")
            # Now the crawling can begin..
            print "2222222222222222"
            print self.initialized()
            print "33333333333333333"

            return self.initialized() # ****THIS LINE FIXED THE LAST PROBLEM*****

        else:
            self.log("\n\n\nFailed, Bad times :(\n\n\n")
            # Something went wrong, we couldn't log in, so nothing happens.

    def parse(self, response):
        self.log("\n\n\n We got data! \n\n\n")
        # hxs = HtmlXPathSelector(response)
        sites = Selector(text=response.body).xpath('//div[contains(@id, "identity")]//section/div/div/h3/')
        items = []
        for site in sites:
            item = myspiderBotItem()
            item['title'] = site.select('a/text()').extract()
            item['link'] = site.select('a/@href').extract()
            items.append(item)
        return items