吴裕雄 python 神经网络——TensorFlow训练神经网络:MNIST最佳实践
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500 def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
return weights def inference(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2 BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "E:\\MNIST_model\\"
MODEL_NAME = "mnist_model" def train(mnist):
# 定义输入输出placeholder。
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input') regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = inference(x, regularizer)
global_step = tf.Variable(0, trainable=False) # 定义损失函数、学习率、滑动平均操作以及训练过程。
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train') # 初始化TensorFlow持久化类。
saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) def main(argv=None):
mnist = input_data.read_data_sets("E:\\MNIST_data\\", one_hot=True)
train(mnist) if __name__ == '__main__':
main()

import os
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500 BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "E:\\MNIST_model\\"
MODEL_NAME = "mnist_model" def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
return weights def inference(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2 # 加载的时间间隔。
EVAL_INTERVAL_SECS = 10 def evaluate(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels} y = inference(x, None)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore) while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training step(s), validation accuracy = %g" % (global_step, accuracy_score))
else:
print('No checkpoint file found')
return
time.sleep(EVAL_INTERVAL_SECS) def main(argv=None):
mnist = input_data.read_data_sets("E:\\MNIST_data\\", one_hot=True)
evaluate(mnist) if __name__ == '__main__':
main()

吴裕雄 python 神经网络——TensorFlow训练神经网络:MNIST最佳实践的更多相关文章
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用滑动平均
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用隐藏层
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用激活函数
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用指数衰减的学习率
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:不使用正则化
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:全模型
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data INPUT_NODE = 784 ...
- TensorFlow入门之MNIST最佳实践
在上一篇<TensorFlow入门之MNIST样例代码分析>中,我们讲解了如果来用一个三层全连接网络实现手写数字识别.但是在实际运用中我们需要更有效率,更加灵活的代码.在TensorFlo ...
- TensorFlow入门之MNIST最佳实践-深度学习
在上一篇<TensorFlow入门之MNIST样例代码分析>中,我们讲解了如果来用一个三层全连接网络实现手写数字识别.但是在实际运用中我们需要更有效率,更加灵活的代码.在TensorFlo ...
- 吴裕雄 python 神经网络——TensorFlow训练神经网络:花瓣识别
import os import glob import os.path import numpy as np import tensorflow as tf from tensorflow.pyth ...
随机推荐
- 题解【CJOJ2608】[JZOJ 100043]第k小数
P2608 - [JZOJ 100043]第k小数 Description 有两个非负整数数列,元素个数分别为N和M.从两个数列中分别任取一个数相乘,这样一共可以得到N*M个数,询问这N*M个数中第K ...
- curl模板----php发送post,get请求
function _grab($curl,$ip='',$referer='',$postInfo='',$cookie=''){ $ch = curl_init(); curl_setopt($ch ...
- .NTE Core Web API Example
Source from :https://www.codeproject.com/Articles/1260600/Speed-up-ASP-NET-Core-WEB-API-application- ...
- python的类定义与实例化
理解类属性和实例属性: 直接在类里面定义的变量叫类属性,类属性是公有的,每个类实例化就自动拥有类的属性,且实例化对象的这个属性的初始地址指向类属性的地址 如果直接给实例化对象的属性赋值这样会改变该属性 ...
- 命名元组nametuple
# coding:utf-8 from collections import namedtuple Student = namedtuple('Student', ['no', 'name', 'ag ...
- Mysql实现级联操作(级联更新、级联删除)(转)
一.首先创建两张表stu,sc create table stu( sid int UNSIGNED primary key auto_increment, name varchar(20) not ...
- List 数据分批入库
直接贴代码,主要运用 List<E> subList(int fromIndex, int toIndex); 把 List 分割 /** * 保存批价结果 * * @param pric ...
- 列表与数组 Perl入门第三章
列表List 是标量的有序集合.数组array则是存储列表的变量.数组/列表的每个元素element都是单独的标量变量,拥有独立的标量值. 1. 数组: 访问数组中的元素: $fred[0]=&quo ...
- JS中的解构
先看看数组解构: function fn(){ return [1,2,3]; } var [a,b,c] = fn(); console.log(a,b,c); // 1 2 3 var [d,,f ...
- vue去掉地址栏# 方法
超简单 export default new Router({ //将mode 设置为‘history‘ 即可.默认情况是’hash’ 所以会有丑陋的# mode: 'history', routes ...