Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/javascript/389.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/5/tfs/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
使用嵌入Javascript的scrapy进行表单刮取_Javascript_Forms_Selenium_Webforms_Scrapy - Fatal编程技术网

使用嵌入Javascript的scrapy进行表单刮取

使用嵌入Javascript的scrapy进行表单刮取,javascript,forms,selenium,webforms,scrapy,Javascript,Forms,Selenium,Webforms,Scrapy,我正在努力刮网站。我的蜘蛛是功能性的,但网站在表单中嵌入了javascript以获得结果,我无法通过。 我已经阅读了有关selenium的内容,以及如何使用selenium,但我仍然了解如何在动态生成的HTML加载后进行刮取,同时仍然传递表单参数 这是我的蜘蛛的代码,欢迎任何帮助、推荐或代码片段。我浏览和阅读了很多帖子,但都没有用 from scrapy.spiders import BaseSpider from scrapy.http import FormRequest from scra

我正在努力刮网站。我的蜘蛛是功能性的,但网站在表单中嵌入了javascript以获得结果,我无法通过。 我已经阅读了有关selenium的内容,以及如何使用selenium,但我仍然了解如何在动态生成的HTML加载后进行刮取,同时仍然传递表单参数

这是我的蜘蛛的代码,欢迎任何帮助、推荐或代码片段。我浏览和阅读了很多帖子,但都没有用

from scrapy.spiders import BaseSpider
from scrapy.http import FormRequest
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from tax1.items import Tax1Item
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from selenium import webdriver
import time


class tax1Spider(BaseSpider):
    name = "tax1Spider"
    allowed_domains = ["http://taxesejour.impots.gouv.fr"]
    start_urls = ["http://taxesejour.impots.gouv.fr/DTS_WEB/UK/"]

def parse(self, response):
    yield FormRequest.from_response(response,
                                    formname='PAGE_DELIBV2',
                                    formdata={'A8':'05 - Hautes-alpes',
                                              'A10':'AIGUILLES'},
                                    callback = self.parse1)

rules = (
    Rule(SgmlLinkExtractor(allow=(), restrict_xpaths=('//div[@class="lh0 dzSpan dzA15"]',)), callback="parse1", follow= True))

   # lh0 dzSpan dzA15
def __init__(self):
    CrawlSpider.__init__(self)
    # use any browser you wish
    self.browser = webdriver.Firefox()

def __del__(self):
    self.browser.close()

def parse1(self, response):
    #hxs = HtmlXPathSelector(response)

    self.browser.get(response.url)
    time.sleep(3)

    Selector(text=self.browser.page_source)
    items = []
    #item = Tax1Item()
    item['message'] = hxs.select('//td[@id="tzA18"]').extract()
    print item['message']

    return item