Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/276.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
在Python中使用mnist库_Python_Http_Mnist - Fatal编程技术网

在Python中使用mnist库

在Python中使用mnist库,python,http,mnist,Python,Http,Mnist,我需要使用mnist Python库下载和读取mnist数据: 在我的大学集群上,我可以毫无问题地加载数据。但是,在我的PC上,我可以获得: --------------------------------------------------------------------------- HTTPError Traceback (most recent call last) <ipython-input-24-78f59b

我需要使用mnist Python库下载和读取mnist数据:

在我的大学集群上,我可以毫无问题地加载数据。但是,在我的PC上,我可以获得:

---------------------------------------------------------------------------
HTTPError                                 Traceback (most recent call last)
<ipython-input-24-78f59b728818> in <module>
      8 DATASET_SIZE = 512
      9 DIGIT_SIZE = 28
---> 10 mnist_dataset = mnist.test_images().astype(np.float32)
     11 np.random.shuffle(mnist_dataset)
     12 mnist_dataset = np.reshape(mnist_dataset[:DATASET_SIZE] / 255.0, newshape=(DATASET_SIZE, DIGIT_SIZE*DIGIT_SIZE))

~\anaconda3\lib\site-packages\mnist\__init__.py in test_images()
    174         columns of the image
    175     """
--> 176     return download_and_parse_mnist_file('t10k-images-idx3-ubyte.gz')
    177 
    178 

~\anaconda3\lib\site-packages\mnist\__init__.py in download_and_parse_mnist_file(fname, target_dir, force)
    141         Numpy array with the dimensions and the data in the IDX file
    142     """
--> 143     fname = download_file(fname, target_dir=target_dir, force=force)
    144     fopen = gzip.open if os.path.splitext(fname)[1] == '.gz' else open
    145     with fopen(fname, 'rb') as fd:

~\anaconda3\lib\site-packages\mnist\__init__.py in download_file(fname, target_dir, force)
     57     if force or not os.path.isfile(target_fname):
     58         url = urljoin(datasets_url, fname)
---> 59         urlretrieve(url, target_fname)
     60 
     61     return target_fname

~\anaconda3\lib\urllib\request.py in urlretrieve(url, filename, reporthook, data)
    245     url_type, path = splittype(url)
    246 
--> 247     with contextlib.closing(urlopen(url, data)) as fp:
    248         headers = fp.info()
    249 

~\anaconda3\lib\urllib\request.py in urlopen(url, data, timeout, cafile, capath, cadefault, context)
    220     else:
    221         opener = _opener
--> 222     return opener.open(url, data, timeout)
    223 
    224 def install_opener(opener):

~\anaconda3\lib\urllib\request.py in open(self, fullurl, data, timeout)
    529         for processor in self.process_response.get(protocol, []):
    530             meth = getattr(processor, meth_name)
--> 531             response = meth(req, response)
    532 
    533         return response

~\anaconda3\lib\urllib\request.py in http_response(self, request, response)
    639         if not (200 <= code < 300):
    640             response = self.parent.error(
--> 641                 'http', request, response, code, msg, hdrs)
    642 
    643         return response

~\anaconda3\lib\urllib\request.py in error(self, proto, *args)
    567         if http_err:
    568             args = (dict, 'default', 'http_error_default') + orig_args
--> 569             return self._call_chain(*args)
    570 
    571 # XXX probably also want an abstract factory that knows when it makes

~\anaconda3\lib\urllib\request.py in _call_chain(self, chain, kind, meth_name, *args)
    501         for handler in handlers:
    502             func = getattr(handler, meth_name)
--> 503             result = func(*args)
    504             if result is not None:
    505                 return result

~\anaconda3\lib\urllib\request.py in http_error_default(self, req, fp, code, msg, hdrs)
    647 class HTTPDefaultErrorHandler(BaseHandler):
    648     def http_error_default(self, req, fp, code, msg, hdrs):
--> 649         raise HTTPError(req.full_url, code, msg, hdrs, fp)
    650 
    651 class HTTPRedirectHandler(BaseHandler):

HTTPError: HTTP Error 403: Forbidden

我已经通过inspect模块检查了功能。本地版本和群集版本调用的HTTP地址相同,即。我可以从我的网络浏览器下载它,没有问题


在Python中,我可以对此做些什么?

此模块非常陈旧且已存档

我预计它可能会使用新的服务器和新的安全系统,代码可能需要一些设置,如头用户代理,以正确访问数据

使用@wwii comment的建议,我下载了源代码并添加了用户代理 现在我可以下载图片了

薄荷糖

测试代码:

import mnist
import numpy as np

print(mnist.__file__)         # to see if I uses local version with changes
print(mnist.datasets_url)     
print(mnist.temporary_dir())  # to see where it is downloaded

mnist_dataset = mnist.test_images().astype(np.float32)
print(mnist_dataset)

仅使用Python3.8和Python2.x进行测试将需要

另一个解决方案是自行下载文件。使用我找到的函数和来自


这很好,它只是将zip文件下载到当前的工作目录。您仍然需要。

使用其他答案,我已经能够构建一个允许直接使用软件包的解决方案

以下代码必须执行一次并在全局范围内工作:

from six.moves import urllib

opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)

有了它,所有mnist功能都可以使用,文件将根据需要下载。在调用mnist_dataset=mnist.test_images.astypenp.float32之前,我在Jupyter笔记本的一个单元格中直接执行上面的代码。

错误可能意味着下载它有问题-可能它等待数据的时间太长,或者是互联网速度太慢的问题,或者文件太大,当连接松动时,它无法重新连接并继续下载。但我认为应该有功能使用已经下载的文件。或者您应该解压缩.gz以更正文件夹并跳过下载。此模块已有4年历史,可能使用了错误的URL。或者服务器更改某些设置,出于安全原因,它会阻止来自bots/scripts/spamers/hacker的连接。使用mnist进行搜索并显示错误消息:找到了相关的SO Q&a:-接下来,如果不调整下载和解压缩数据的调用,您可能无法使用mnist包。
import mnist
import numpy as np

print(mnist.__file__)         # to see if I uses local version with changes
print(mnist.datasets_url)     
print(mnist.temporary_dir())  # to see where it is downloaded

mnist_dataset = mnist.test_images().astype(np.float32)
print(mnist_dataset)
import  requests, gzip

urls = [(r'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz','training_images.gz'),
        (r'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz','training_labels.gz'),
        (r'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz','test_images.gz'),
        (r'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz','test_labels.gz')]

def download_url(url, save_path, chunk_size=128):
    r = requests.get(url, stream=True)
    with open(save_path, 'wb') as fd:
        for chunk in r.iter_content(chunk_size=chunk_size):
            fd.write(chunk)
for url,path in urls:
    download_url(url,path)
from six.moves import urllib

opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)