#
# http://www.cnblogs.com/mydebug/
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function import gzip
import os
import sys
sys.path.append("这里是numpy的路径")//提示:如果没有这句import numpy会报错
import tensorflow.python.platform import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = '/TensorFlow/data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10 tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.")
FLAGS = tf.app.flags.FLAGS def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(WORK_DIRECTORY):
os.mkdir(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels]. Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data def extract_labels(filename, num_images):
"""Extract the labels into a 1-hot matrix [image index, label index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
# Convert to dense 1-hot representation.
return (numpy.arange(NUM_LABELS) == labels[:, None]).astype(numpy.float32) def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = numpy.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=numpy.float32)
labels = numpy.zeros(shape=(num_images, NUM_LABELS), dtype=numpy.float32)
for image in xrange(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image, label] = 1.0
return data, labels def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and 1-hot labels."""
return 100.0 - (
100.0 *
numpy.sum(numpy.argmax(predictions, 1) == numpy.argmax(labels, 1)) /
predictions.shape[0]) def main(argv=None): # pylint: disable=unused-argument
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(16)
test_data, test_labels = fake_data(256)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz') # Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000) # Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, :, :, :]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, :, :, :]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0] # This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
tf.float32,
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.float32,
shape=(BATCH_SIZE, NUM_LABELS))
# For the validation and test data, we'll just hold the entire dataset in
# one constant node.
validation_data_node = tf.constant(validation_data)
test_data_node = tf.constant(test_data) # The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when when we call:
# {tf.initialize_all_variables().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64],
stddev=0.1,
seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(
tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS])) # We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list()
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases # Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits, train_labels_node)) # L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers # Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch) # Predictions for the minibatch, validation set and test set.
train_prediction = tf.nn.softmax(logits)
# We'll compute them only once in a while by calling their {eval()} method.
validation_prediction = tf.nn.softmax(model(validation_data_node))
test_prediction = tf.nn.softmax(model(test_data_node)) # Create a local session to run this computation.
with tf.Session() as s:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
print('Initialized!')
# Loop through training steps.
for step in xrange(num_epochs * train_size // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph is should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = s.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
if step % 100 == 0:
print('Epoch %.2f' % (float(step) * BATCH_SIZE / train_size))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
print('Validation error: %.1f%%' %
error_rate(validation_prediction.eval(), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(test_prediction.eval(), test_labels)
print('Test error: %.1f%%' % test_error)
if FLAGS.self_test:
print('test_error', test_error)
assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
test_error,) if __name__ == '__main__':
tf.app.run()

在Ming IDE中新建python文件,复制以上代码,点击Debug,运行成功。

注:注意源代码中的提示部分。

Wing IDE编译TesorFlow中Mnist convolutional 实例的更多相关文章

  1. Python开发环境Wing IDE 5.0测试第八版发布

    Wing IDE是著名的Python开发工具,是Wingware公司的主要产品.从1999年起,Wingware公司便开始专注于Python开发设计.Wing IDE在十几年的发展中,不管完善.其强大 ...

  2. Python开发环境Wing IDE如何检查Python集成

    在使用Wing IDE开始代码编辑之前,必须先确保Wing IDE已经成功地找到用户的Python安装位置(如果用户同时安装有多个版本,那么Wing IDE将有限选择最新版).要对这个进行检查,需要调 ...

  3. Wing IDE 5 的破解

    Wing IDE 百度百科 1.安装好Python,已测的是Python 2.7.10: 2.新建一个py文件CalcActivationCode.py(名字自己随便取): 3.CalcActivat ...

  4. Wing IDE 5 for Python 安装及破解方法

    安装Wing IDE 官网下载deb安装文件 开始安装程序 dpkg -i 文件名.deb 安装完成后打开注册界面,输入下面的License ID 后得到RequestCode,将RequestCod ...

  5. 将 Wing IDE 与 Maya 结合使用(摘自Maya用户指南)

    1. 将 wingdbstub.py 从 Wing IDE 安装目录复制到 Maya Python 脚本路径. 2. 确保已在“Wing IDE > 编辑 > 首选项 > 调试器”中 ...

  6. Python开发环境Wing IDE使用教程:部分调试功能介绍

    下面是用户应该了解的Wing IDE的其它一些调试功能: Main Debug File—用户可以指定项目中的一个文件作为调试的主入口点.当完成这个设置之后,调试总是从这个文件开始,除非用户使用Deb ...

  7. vue生命周期图示中英文版Vue实例生命周期钩子

    vue生命周期图示中英文版Vue实例生命周期钩子知乎上近日有人发起了一个 “react 是不是比 vue 牛皮,为什么?” 的问题,Vue.js 作者尤雨溪12月4日正面回应了该问题.以下是尤雨溪回复 ...

  8. 超具体Windows版本号编译执行React Native官方实例UIExplorer项目(多图慎入)

    ),React Native技术交流4群(458982758).请不要反复加群! 欢迎各位大牛,React Native技术爱好者加入交流!同一时候博客右側欢迎微信扫描关注订阅号,移动技术干货,精彩文 ...

  9. Python开发环境Wing IDE的Blender的Python代码调试技巧

    Wing IDE是一个集成开发环境,可用于开发.测试和调试为Blender编写的Python代码,Blender是一个开源的3 D内容创建系统.Wing IDE提供自动完成.调用提示.强大的调试器.以 ...

随机推荐

  1. 安卓升级提示 phoneGap APK软件更新提示

    以下代码由PHP200 阿杜整理 package com.example.syzx;   import java.io.BufferedReader; import java.io.File; imp ...

  2. 关于MySQL数据类型timestamp的讨论

    在项目中用到了timestamp这个类型,该字段本意是用于存储改行记录的创建时间的,实际上这是一个很危险的设置: mysql官方文档上有这么一段话: The TIMESTAMP data type p ...

  3. Spring EL Lists, Maps example

    In this article, we show you how to use Spring EL to get value from Map and List. Actually, the way ...

  4. 生成Base58格式的UUID(Hibernate Base64格式的UUID续)

    Base58简介 Base58采用的字符集合为“123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ”,从这不难看出,Base58是纯数 ...

  5. BestCoder Round #69 (div.2)(hdu5611)

    Baby Ming and phone number Time Limit: 3000/1500 MS (Java/Others)    Memory Limit: 65536/65536 K (Ja ...

  6. [C语言 - 1] C语言数据类型

    基本数据类型: byte short int unsigned int long long long unsigned long float double char char * The size ( ...

  7. hive 子查询特别分析

      Hive只支持在FROM子句中使用子查询,子查询必须有名字,并且列必须唯一:SELECT ... FROM(subquery) name ... 确认下是否一定要求列必须唯一?      建表语句 ...

  8. HttpWebRequest和HttpWebResponse用法小结

    http://www.cnblogs.com/willpan/archive/2011/09/26/2176475.html http://www.cnblogs.com/lip0121/p/4539 ...

  9. Centos6.3 jekyll环境安装

    yum install ruby yum install rubygems yum install ruby-devel gem install rdiscount yum install pytho ...

  10. Spring和Hibernate集成的HibernateTemplate的一些常用方法总结

    1:get/load存取单条数据 public Teacher getTeacherById(Long id) { return (Teacher)this.hibernateTemplate.get ...