Cookies 雅虎财经历史数据下载器url不工作

Cookies 雅虎财经历史数据下载器url不工作,cookies,session-cookies,finance,yahoo-api,yahoo-finance,Cookies,Session Cookies,Finance,Yahoo Api,Yahoo Finance,我使用以下url从yahoo finance获取历史数据。自2017年5月16日起,url不起作用 似乎他们更改了url,新的url是: 在上面更改的URL中有一个会话cookie,它是crump。你知道如何以编程方式(在JAVA中)获取此cookie吗?你可以在Chrome中手动保存crump/cookie对,或者使用类似的方法生成它。然后,只需在java中设置cookie头并在URL中传递相应的crump,就可以工作了,现在我只需解析csv。我想我会分享,因为我在语法上有问题 Dim c

我使用以下url从yahoo finance获取历史数据。自2017年5月16日起,url不起作用

似乎他们更改了url,新的url是:


在上面更改的URL中有一个会话cookie,它是crump。你知道如何以编程方式(在JAVA中)获取此cookie吗?

你可以在Chrome中手动保存crump/cookie对,或者使用类似的方法生成它。然后,只需在java中设置cookie头并在URL中传递相应的crump,就可以工作了,现在我只需解析csv。我想我会分享,因为我在语法上有问题

Dim crumb As String:    crumb = "xxxx"
Dim cookie As String:   cookie = "yyyy"

Dim urlStock As String: urlStock = "https://query1.finance.yahoo.com/v7/finance/download/SIRI?" & _
    "period1=1274158800&" & _
    "period2=1495059477&" & _
    "interval=1d&events=history&crumb=" & crumb

Dim http As MSXML2.XMLHTTP:   Set http = New MSXML2.ServerXMLHTTP
http.Open "GET", urlStock, False
http.setRequestHeader "Cookie", cookie
http.send

我最近编写了一个简单的python脚本来下载单个股票的历史记录
下面是一个如何调用它的示例:
python get_quote_history.py--symbol=IBM--from=2017-01-01--to=2017-05-25-o IBM.csv
这将下载2017-01-01至2017-05-25的IBM历史价格,并将其保存在IBM.csv文件中

import re
import urllib2
import calendar
import datetime
import getopt
import sys
import time

crumble_link = 'https://finance.yahoo.com/quote/{0}/history?p={0}'
crumble_regex = r'CrumbStore":{"crumb":"(.*?)"}'
cookie_regex = r'Set-Cookie: (.*?); '
quote_link = 'https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval=1d&events=history&crumb={}'


def get_crumble_and_cookie(symbol):
    link = crumble_link.format(symbol)
    response = urllib2.urlopen(link)
    match = re.search(cookie_regex, str(response.info()))
    cookie_str = match.group(1)
    text = response.read()
    match = re.search(crumble_regex, text)
    crumble_str = match.group(1)
    return crumble_str, cookie_str


def download_quote(symbol, date_from, date_to):
    time_stamp_from = calendar.timegm(datetime.datetime.strptime(date_from, "%Y-%m-%d").timetuple())
    time_stamp_to = calendar.timegm(datetime.datetime.strptime(date_to, "%Y-%m-%d").timetuple())

    attempts = 0
    while attempts < 5:
        crumble_str, cookie_str = get_crumble_and_cookie(symbol)
        link = quote_link.format(symbol, time_stamp_from, time_stamp_to, crumble_str)
        #print link
        r = urllib2.Request(link, headers={'Cookie': cookie_str})

        try:
            response = urllib2.urlopen(r)
            text = response.read()
            print "{} downloaded".format(symbol)
            return text
        except urllib2.URLError:
            print "{} failed at attempt # {}".format(symbol, attempts)
            attempts += 1
            time.sleep(2*attempts)
    return ""

if __name__ == '__main__':
    print get_crumble_and_cookie('KO')
    from_arg = "from"
    to_arg = "to"
    symbol_arg = "symbol"
    output_arg = "o"
    opt_list = (from_arg+"=", to_arg+"=", symbol_arg+"=")
    try:
        options, args = getopt.getopt(sys.argv[1:],output_arg+":",opt_list)
    except getopt.GetoptError as err:
        print err

    for opt, value in options:
        if opt[2:] == from_arg:
            from_val = value
        elif opt[2:] == to_arg:
            to_val = value
        elif opt[2:] == symbol_arg:
            symbol_val = value
        elif opt[1:] == output_arg:
            output_val = value

    print "downloading {}".format(symbol_val)
    text = download_quote(symbol_val, from_val, to_val)

    with open(output_val, 'wb') as f:
        f.write(text)
    print "{} written to {}".format(symbol_val, output_val)
重新导入
导入urllib2
导入日历
导入日期时间
导入getopt
导入系统
导入时间
崩溃https://finance.yahoo.com/quote/{0}/历史?p={0}'
crumble_regex=r'crumpstore:{“crump”:“(.*?”}”
cookie_regex=r'Set-cookie:(.*);'
引用链接https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval=1d&events=history&crumb={}'
def get_CREAMP_和_cookie(符号):
link=crumble\u link.格式(符号)
response=urlib2.urlopen(链接)
match=re.search(cookie\u regex,str(response.info()))
cookie_str=match.group(1)
text=response.read()
匹配=重新搜索(crumble_regex,文本)
crumble_str=match.group(1)
返回crumble\u str,cookie\u str
def下载报价(符号、日期自、日期至):
time\u stamp\u from=calendar.timegm(datetime.datetime.strtime(date\u from,“%Y-%m-%d”).timetuple())
time\u stamp\u to=calendar.timegm(datetime.datetime.strtime(date\u to,“%Y-%m-%d”).timetuple())
尝试次数=0
当尝试次数小于5次时:
crumble_str,cookie_str=获取crumble_和cookie(符号)
link=quote\u link.format(符号、时间戳\u from、时间戳\u to、crumble\u str)
#打印链接
请求(链接,标题={'Cookie':Cookie\u str})
尝试:
response=urlib2.urlopen(r)
text=response.read()
打印“{}下载”。格式(符号)
返回文本
除urllib2.URLError外:
打印“{}在尝试{}时失败”。格式(符号,尝试)
尝试次数+=1
时间。睡眠(2次尝试)
返回“”
如果uuuu name uuuuuu='\uuuuuuu main\uuuuuuu':
打印“吃碎”和“饼干”(“KO”)
from_arg=“from”
to_arg=“to”
symbol\u arg=“symbol”
输出参数
opt_list=(从_arg+“=”,到_arg+“=”,符号_arg+“=”)
尝试:
选项,args=getopt.getopt(sys.argv[1:],output_arg+“:”,opt_list)
除getopt.GetOpError作为错误外:
打印错误
对于opt,选项中的值:
如果opt[2:::==from_arg:
from_val=值
elif opt[2::][=至参数:
to_val=值
elif opt[2::][=符号参数:
符号=值
elif opt[1::][=输出参数:
输出值=值
打印“下载{}”。格式(symbol_val)
text=下载报价(符号、从、到)
将open(output_val,'wb')作为f:
f、 书写(文本)
打印{}写入{}格式(符号值,输出值)

Andrea Galeazzi的优秀答案;添加了拆分和分红选项,并为python 3提供了扭曲选项

还更改为“to:date”包含在返回的结果中,以前的代码返回到,但不包括“to:date”。只是不同

请注意,雅虎在价格舍入、列顺序和拆分语法方面做了一些小改动

## Downloaded from
## https://stackoverflow.com/questions/44044263/yahoo-finance-historical-data-downloader-url-is-not-working
## Modified for Python 3
## Added --event=history|div|split   default = history
## changed so "to:date" is included in the returned results
## usage: download_quote(symbol, date_from, date_to, events).decode('utf-8')

import re
from urllib.request import urlopen, Request, URLError
import calendar
import datetime
import getopt
import sys
import time

crumble_link = 'https://finance.yahoo.com/quote/{0}/history?p={0}'
crumble_regex = r'CrumbStore":{"crumb":"(.*?)"}'
cookie_regex = r'Set-Cookie: (.*?); '
quote_link = 'https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval=1d&events={}&crumb={}'


def get_crumble_and_cookie(symbol):
    link = crumble_link.format(symbol)
    response = urlopen(link)
    match = re.search(cookie_regex, str(response.info()))
    cookie_str = match.group(1)
    text = response.read().decode("utf-8")
    match = re.search(crumble_regex, text)
    crumble_str = match.group(1)
    return crumble_str , cookie_str


def download_quote(symbol, date_from, date_to,events):
    time_stamp_from = calendar.timegm(datetime.datetime.strptime(date_from, "%Y-%m-%d").timetuple())
    next_day = datetime.datetime.strptime(date_to, "%Y-%m-%d") + datetime.timedelta(days=1)
    time_stamp_to = calendar.timegm(next_day.timetuple())

    attempts = 0
    while attempts < 5:
        crumble_str, cookie_str = get_crumble_and_cookie(symbol)
        link = quote_link.format(symbol, time_stamp_from, time_stamp_to, events,crumble_str)
        #print link
        r = Request(link, headers={'Cookie': cookie_str})

        try:
            response = urlopen(r)
            text = response.read()
            print ("{} downloaded".format(symbol))
            return text
        except URLError:
            print ("{} failed at attempt # {}".format(symbol, attempts))
            attempts += 1
            time.sleep(2*attempts)
    return b''

if __name__ == '__main__':
    print (get_crumble_and_cookie('KO'))
    from_arg = "from"
    to_arg = "to"
    symbol_arg = "symbol"
    event_arg = "event"
    output_arg = "o"
    opt_list = (from_arg+"=", to_arg+"=", symbol_arg+"=", event_arg+"=")
    try:
        options, args = getopt.getopt(sys.argv[1:],output_arg+":",opt_list)
    except getopt.GetoptError as err:
        print (err)

    symbol_val = ""
    from_val = ""
    to_val = ""
    output_val = ""
    event_val = "history"
    for opt, value in options:
        if opt[2:] == from_arg:
            from_val = value
        elif opt[2:] == to_arg:
            to_val = value
        elif opt[2:] == symbol_arg:
            symbol_val = value
        elif opt[2:] == event_arg:
            event_val = value
        elif opt[1:] == output_arg:
            output_val = value

    print ("downloading {}".format(symbol_val))
    text = download_quote(symbol_val, from_val, to_val,event_val)
    if text:
        with open(output_val, 'wb') as f:
            f.write(text)
        print ("{} written to {}".format(symbol_val, output_val))
##下载自
## https://stackoverflow.com/questions/44044263/yahoo-finance-historical-data-downloader-url-is-not-working
##为Python3修改
##添加--event=history | div | split default=history
##更改为在返回的结果中包含“至:日期”
##用法:下载报价(符号、日期、事件)。解码('utf-8')
进口稀土
从urllib.request导入urlopen、request、URLError
导入日历
导入日期时间
导入getopt
导入系统
导入时间
崩溃https://finance.yahoo.com/quote/{0}/历史?p={0}'
crumble_regex=r'crumpstore:{“crump”:“(.*?”}”
cookie_regex=r'Set-cookie:(.*);'
引用链接https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval=1d&events={}&crumb={}'
def get_CREAMP_和_cookie(符号):
link=crumble\u link.格式(符号)
响应=urlopen(链接)
match=re.search(cookie\u regex,str(response.info()))
cookie_str=match.group(1)
text=response.read().decode(“utf-8”)
匹配=重新搜索(crumble_regex,文本)
crumble_str=match.group(1)
返回crumble\u str,cookie\u str
def下载报价(符号、日期自、日期至、事件):
time\u stamp\u from=calendar.timegm(datetime.datetime.strtime(date\u from,“%Y-%m-%d”).timetuple())
下一天=datetime.datetime.strtime(日期到,“%Y-%m-%d”)+datetime.timedelta(天=1)
time\u stamp\u to=calendar.timegm(next\u day.timetuple())
尝试次数=0
当尝试次数小于5次时:
crumble_str,cookie_str=获取crumble_和cookie(符号)
link=quote\u link.format(符号、时间戳\u from、时间戳\u to、事件、崩溃\u str)
#打印链接
r=请求(链接,标题={'Cookie':Cookie\u str})
尝试:
响应=urlopen(r)
text=response.read()
打印(“{}下载”。格式(符号))
返回文本
除URL错误外:
打印(“{}尝试{}失败”。格式(符号,尝试次数))
尝试次数+=1
时间。睡眠(2次尝试)
返回b''
如果uuuu name uuuuuu='\uuuuuuu main\uuuuuuu':
打印(获取碎块和饼干('KO'))
from_arg=“from”
to_arg=“to”
Sub GetYahooRequest(strCrumb As String, strCookie As String)
'This routine will use a sample request to Yahoo to obtain a valid Cookie and Crumb

Dim strUrl                      As String: strUrl = "https://finance.yahoo.com/lookup?s=%7B0%7D"  
Dim objRequest                  As WinHttp.WinHttpRequest

Set objRequest = New WinHttp.WinHttpRequest

    With objRequest
        .Open "GET", strUrl, True
        .setRequestHeader "Content-Type", "application/x-www-form-urlencoded; charset=UTF-8"
        .send
        .waitForResponse
        strCrumb = strExtractCrumb(.responseText)
        strCookie = Split(.getResponseHeader("Set-Cookie"), ";")(0)
    End With

End Sub
import os

myfile = open("ticker.csv", "r")
lines = myfile.readlines()

for line in lines:
        ticker = line.strip();
        cmd = "python get_quote_history.py --symbol=%s --from=2017-01-01 --to=2017-05-25 -o %s.csv"  %(ticker,ticker)
        os.system(cmd)