吴裕雄 python 神经网络——TensorFlow训练神经网络:MNIST最佳实践
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500 def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
return weights def inference(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2 BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "E:\\MNIST_model\\"
MODEL_NAME = "mnist_model" def train(mnist):
# 定义输入输出placeholder。
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input') regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = inference(x, regularizer)
global_step = tf.Variable(0, trainable=False) # 定义损失函数、学习率、滑动平均操作以及训练过程。
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train') # 初始化TensorFlow持久化类。
saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) def main(argv=None):
mnist = input_data.read_data_sets("E:\\MNIST_data\\", one_hot=True)
train(mnist) if __name__ == '__main__':
main()

import os
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500 BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "E:\\MNIST_model\\"
MODEL_NAME = "mnist_model" def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
return weights def inference(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2 # 加载的时间间隔。
EVAL_INTERVAL_SECS = 10 def evaluate(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels} y = inference(x, None)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore) while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
else:
print('No checkpoint file found')
return
time.sleep(EVAL_INTERVAL_SECS) def main(argv=None):
mnist = input_data.read_data_sets("E:\\MNIST_data\\", one_hot=True)
evaluate(mnist) if __name__ == '__main__':
main()

吴裕雄 python 神经网络——TensorFlow训练神经网络:MNIST最佳实践的更多相关文章
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用滑动平均
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用隐藏层
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用激活函数
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用指数衰减的学习率
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用正则化
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:全模型
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- TensorFlow入门之MNIST最佳实践
在上一篇<TensorFlow入门之MNIST样例代码分析>中,我们讲解了如果来用一个三层全连接网络实现手写数字识别.但是在实际运用中我们需要更有效率,更加灵活的代码.在TensorFlo ...
- TensorFlow入门之MNIST最佳实践-深度学习
在上一篇<TensorFlow入门之MNIST样例代码分析>中,我们讲解了如果来用一个三层全连接网络实现手写数字识别.但是在实际运用中我们需要更有效率,更加灵活的代码.在TensorFlo ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:花瓣识别
import os import glob import os.path import numpy as np import tensorflow as tf from tensorflow.pyth ...
随机推荐
- date时间比较
比较前先要对时间判断不能为空 int result = tk.getCloseTime().compareTo(tk.getPlanEndTime()); result = 1: 代表 closeT ...
- Oracle空表的分配segment
1.查询相关参数deferred_segment_creation select * from v$parameter where name='deferred_segment_creation' ...
- 用Emmet写前端代码
Emmet插件:可以用 emmet代码+Tap 写出更多并快捷的html代码,主流编辑器均可安装,安装方法也均不相同! <!-- html:5或者!可以生成html5文档 --> < ...
- SpringMVC--使用hibernate validator数据校验
JSR 303 Spring3开始支持JSR 303 验证框架,JSR303是Java为Bean数据合法性校验所提供的标准框架.JSR 303 支持XML和注解风格的验证,通过在Bean属性上标注类似 ...
- 吴裕雄 python 机器学习——半监督学习LabelSpreading模型
import numpy as np import matplotlib.pyplot as plt from sklearn import metrics from sklearn import d ...
- mysql 随笔
(select GROUP_CONCAT(car_brand_name separator ',') carBrandName,supplier_id from ycej_supplier_carbr ...
- 数据库程序接口——JDBC——API解读第一篇——建立连接的核心对象
结构图 核心对象 Driver Java通过Driver接口表示驱动,每种类型的数据库通过实现Driver接口提供自己的Driver实现类. Driver由属性,操作,事件三部分组成. 属性 公共属性 ...
- 其他 - 02. poolmon 安装
1. 概述 遇到 win10 的内存泄露 32G 内存都能给吃光 2. 思路 rammap 对整体内存做一个诊断 主要是内存分配 用途 状态 poolmon 确认内存的用途 比 rammap 更精确 ...
- 在线直播: .NET与物联网主流技术探秘 初识IoT!
DNT精英论坛暨.NET北京俱乐部是由资深.NET专家和社区活跃分子发起的技术论坛,以“分享.成长.合作.共赢”为原则,致力于打造一个领先的技术分享平台和成长交流生态.本次活动由aelf赞助支持,刘洪 ...
- JS高级---原型的引入,原型添加的方法解决数据共享
原型的引入:解决:通过构造函数创建对象带来的问题,即浪费内存(一个对象开一个内存,多个对象开多个内存) 通过原型来添加方法,解决数据共享,节省内存空间 <script> function ...