下载MNIST数据集脚本input_data源码
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tensorflow.python.platform
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False, one_hot=False,
dtype=tf.float32):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`.
"""
dtype = tf.as_dtype(dtype).base_dtype
if dtype not in (tf.uint8, tf.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == tf.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=tf.float32):
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
def fake():
return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
data_sets.train = fake()
data_sets.validation = fake()
data_sets.test = fake()
return data_sets
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels, dtype=dtype)
data_sets.validation = DataSet(validation_images, validation_labels,
dtype=dtype)
data_sets.test = DataSet(test_images, test_labels, dtype=dtype)
return data_sets
下载MNIST数据集脚本input_data源码的更多相关文章
- mnist 数据集的识别源码解析
在基本跑完识别代码后,再来谈一谈自己对代码的理解: 1 前向传播过程文件(mnist_forward.py) 第一个函数get_weight(shape, regularizer); 定义了 ...
- 下载 LFS所需要的源码包的脚本程序及检验方法
下载 LFS所需要的源码包的脚本程序及检验方法 http://blog.csdn.net/yygydjkthh/article/details/45315143
- openJDK之如何下载各个版本的openJDK源码
如果我们需要阅读openJDK的源码,那么需要下载,那么该去哪下载呢? 现在JDK已经发展到版本10了,11已经处于计划中,如果需要特定版本的openJDK,它们的下载链接在哪呢? 1.openJDK ...
- scikit-learn使用fetch_mldata无法下载MNIST数据集的问题
scikit-learn使用fetch_mldata无法下载MNIST数据集的问题 0. 写在前面 参考书 <Python数据科学手册> 工具 python3.5.1,Jupyter La ...
- 下载Google官方/CM Android源码自己主动又一次開始的Shell脚本
国内因为某种原因,下载CM或Google官方的Android源码总easy中断.总看着机器.一中断就又一次运行repo sync还太麻烦,所以我特意编写了一段shell脚本(download.sh). ...
- GitHub超详细图文攻略 - Git客户端下载安装 GitHub提交修改源码工作流程 Git分支 标签 过滤 Git版本工作流
最近听同事说他都在使用GitHub,GitHub是程序员的社区,在里面可以学到很多书上学不到的东西,所以最近在准备入手这方面的知识去尝试学习,正好碰到这么详细完整的文章,就转载了,希望对自己和大家有帮 ...
- 【代码管理】GitHub超详细图文攻略 - Git客户端下载安装 GitHub提交修改源码工作流程 Git分支 标签 过滤 Git版本工作流
GitHub操作总结 : 总结看不明白就看下面的详细讲解. . 作者 :万境绝尘 转载请注明出处 : http://blog.csdn.net/shulianghan/article/details ...
- python脚本使用源码安装不同版本的python
# coding=utf-8 import os import sys # 判断是否是root用户 if os.getuid() == 0: pass else: print('当前用户不是root用 ...
- openWRT自学---自己编译的第一个 backfire10.03 版本的过程记录 --- 实际是由于下载了错误的backfire源码包导致的
基于 backfire10.03(从http://downloads.openwrt.org/backfire/10.03/ 中下砸的源码包backfire_10.03_source.tar.bz2: ...
随机推荐
- 整合SSH框架实现简单登录
SSH整合的大体结构:我们将Struts2和hibernate交给spring来管理 创建好web项目之后,首先当然是先引入需要的pom节点,需要的pom的节点可以在Maven官方仓库中下载https ...
- julia .文档
https://docs.julialang.org/en/v1/manual/getting-started/
- .net core json序列化 long类型转化成字符串
实现类 using System; using System.ComponentModel; using System.Linq; using Newtonsoft.Json; namespace H ...
- c++ 线程间通信方式
一:两个进程间的两个线程通信,相当于进程间通信 二:一个进程中的两个线程间通信 通信方式: 1.互斥锁 mutex; lock_guard (在构造函数里加锁,在析构函数里解锁) unique_loc ...
- 文献导读 | Single-Cell Sequencing of iPSC-Dopamine Neurons Reconstructs Disease Progression and Identifies HDAC4 as a Regulator of Parkinson Cell Phenotypes
文献编号:19Mar - 11 2019年04月23日三读,会其精髓: 相信这种方法的话,那么它的精髓是什么,如何整合出这个core gene set. 首先要考虑样本的选择,样本里是否存在明显的分层 ...
- 8.1 GOF 设计模式:关于设计模式
关于设计模式…Design Pattern 追求永恒的美1.1 “模式”一词的起源 “每个模式描述了: 一个在我们周围反复出现的问题, 然后是针对这个问题的解决方案. 这样,其他人可以无数次地反复 ...
- licode测试
https://github.com/lynckia/licode/tree/master/test 使用js模拟客户端调用,也可以使用mocha来进行同样的测试
- Ubuntu LNMP系统搭建Zabbix监控
系统环境 操作系统类型:Ubuntu 系统环境版本:4.4.0-122-generic IP地址:192.168.152.118 第一步:选择适当的操作系统类型与各项的版本要求,我这边直接使用LNMP ...
- vue-实例生命周期钩子(不太明白)
每个 Vue 应用都是通过用 Vue 函数创建一个新的 Vue 实例开始的: var vm = new Vue({ // 选项}) 每个 Vue 实例在被创建时都要经过一系列的初始化过程——例如,需要 ...
- 泛在电力物联网建设大纲ppt
“三型两网”,国家电网在2019年提出的新战略目标.其中,“两网”分别代表着,“坚强智能电网”和“泛在电力物联网”.“坚强智能电网”的概念已经随着特高压的持续建设而被大家渐渐熟知,那么“泛在电力物联网 ...