主要是四个文件

mnist_train.py

#coding: utf-8
import os import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data import mnist_inference BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS =10000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./mobilenet_v1_model/"
MODEL_NAME = "model.ckpt"
channels = 1 def train_MLP(mnist):
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE) y = mnist_inference.inference_MLP(x, regularizer) global_step = tf.Variable(0, trainable=False) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train') saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run() for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
# print os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) def train_mobilenet(mnist):
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE) #mobilenet 把输入数据变成与w矩阵同纬度的
x_image = tf.reshape(x, [-1,28,28,1])
x_image = tf.image.resize_image_with_crop_or_pad(x_image, 28*4,28*4)
y = mnist_inference.inference_mobilenet(x_image, regularizer) global_step = tf.Variable(0, trainable=False) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean #+ tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train') saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run() for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
# print os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
else:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value)) def main(argv=None):
mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
train_mobilenet(mnist) if __name__ == '__main__':
tf.app.run()

mnist_eval.py

#coding: utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data import mnist_inference
import mnist_train #every 10 sec load the newest model
EVAL_INTERVAL_SECS = 10 def evaluate_MLP(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels} y = mnist_inference.inference(x, None) correcgt_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correcgt_prediction, tf.float32)) variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variable_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variable_to_restore) #while True:
if 1:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
#load the model
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training steps, validation accuracy = %g" % (global_step, accuracy_score)) else:
print('No checkpoint file found')
return
#time.sleep(EVAL_INTERVAL_SECS) def evaluate_mobilenet(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input') #mobilenet 把输入数据变成与w矩阵同纬度的
x_image = tf.reshape(x, [-1,28,28,1])
x_image = tf.image.resize_image_with_crop_or_pad(x_image, 28*4,28*4)
y = mnist_inference.inference_mobilenet(x_image, None) correcgt_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correcgt_prediction, tf.float32)) variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variable_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variable_to_restore) input = mnist.validation.images
label = mnist.validation.labels
batch_size = 100
TEST_STEPS = input.shape[0] / batch_size
sum_accury = 0.0
#while True:
if 1:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
#load the model
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
for i in range(int(TEST_STEPS)):
input_batch = input[i*batch_size : (i + 1)*batch_size, :]
label_batch = label[i*batch_size : (i + 1)*batch_size, :]
validate_feed = {x: input_batch, y_: label_batch}
# 取出部分数据测试
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
sum_accury += accuracy_score
print("test %s batch steps, validation accuracy = %g" % (i, accuracy_score)) else:
print('No checkpoint file found')
return
#time.sleep(EVAL_INTERVAL_SECS)
print("After %s training steps, all validation accuracy = %g" % (global_step, sum_accury / TEST_STEPS)) def main(argv=None):
mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
evaluate_mobilenet(mnist) if __name__ == '__main__':
tf.app.run()

mnist_inference.py

#coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function import numpy as np
import tensorflow as tf import mobilenet_v1 slim = tf.contrib.slim #define the variables of nerual network
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500 def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1)) if regularizer != None:
tf.add_to_collection('losses', regularizer(weights)) return weights #define the forward network with MLPnet
def inference_MLP(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases) with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases return layer2 #define the forward network with mobilenet_v1
def inference_mobilenet(input_tensor, regularizer):
#inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
logits, end_points = mobilenet_v1.mobilenet_v1(
input_tensor,
num_classes=OUTPUT_NODE,
dropout_keep_prob=0.8,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=tf.contrib.layers.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False
) return logits

mobilenet_v1.py

从此处下载

https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py

TensorFlow基础笔记(13) Mobilenet训练测试mnist数据的更多相关文章

  1. TensorFlow基础笔记(13) tf.name_scope tf.variable_scope学习

    转载http://blog.csdn.net/jerr__y/article/details/60877873 1. 首先看看比较简单的 tf.name_scope(‘scope_name’). tf ...

  2. TensorFlow基础笔记(0) 参考资源学习文档

    1 官方文档 https://www.tensorflow.org/api_docs/ 2 极客学院中文文档 http://www.tensorfly.cn/tfdoc/api_docs/python ...

  3. TensorFlow基础笔记(3) cifar10 分类学习

    TensorFlow基础笔记(3) cifar10 分类学习 CIFAR-10 is a common benchmark in machine learning for image recognit ...

  4. 机器学习实战 - 读书笔记(13) - 利用PCA来简化数据

    前言 最近在看Peter Harrington写的"机器学习实战",这是我的学习心得,这次是第13章 - 利用PCA来简化数据. 这里介绍,机器学习中的降维技术,可简化样品数据. ...

  5. TensorFlow学习笔记——LeNet-5(训练自己的数据集)

    在之前的TensorFlow学习笔记——图像识别与卷积神经网络(链接:请点击我)中了解了一下经典的卷积神经网络模型LeNet模型.那其实之前学习了别人的代码实现了LeNet网络对MNIST数据集的训练 ...

  6. Tensorflow学习笔记(一):MNIST机器学习入门

    学习深度学习,首先从深度学习的入门MNIST入手.通过这个例子,了解Tensorflow的工作流程和机器学习的基本概念. 一  MNIST数据集 MNIST是入门级的计算机视觉数据集,包含了各种手写数 ...

  7. TensorFlow基础笔记(2) minist分类学习

    (1) 最简单的神经网络分类器 # encoding: UTF-8 import tensorflow as tf from tensorflow.examples.tutorials.mnist i ...

  8. tensorflow学习笔记3:写一个mnist rpc服务

    本篇做一个没有实用价值的mnist rpc服务,重点记录我在调试整合tensorflow和opencv时遇到的问题: 准备模型 mnist的基础模型结构就使用tensorflow tutorial给的 ...

  9. TensorFlow基础笔记(14) 网络模型的保存与恢复_mnist数据实例

    http://blog.csdn.net/huachao1001/article/details/78502910 http://blog.csdn.net/u014432647/article/de ...

随机推荐

  1. TCP/IP协议栈与数据报封装

    一.ISO/OSI参考模型 OSI(open system interconnection)开放系统互联模型是由ISO(International Organization for Standardi ...

  2. Snail—OC学习之类别Category

    类别就是向类加入一些实用的功能或者方法 利于开发 类能够是系统类.能够是自己定义类 类别跟子类是不一样的.类别仅仅能加入一些方法 属性变量什么的不能够加入 不创建新类,就可以对已有类进行扩展 做项目的 ...

  3. js 倒计时 (时分秒版本)

    <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/ ...

  4. C# POST与参数的字符串格式

    参数拼接方法:&  类似url参数.然后转化为字节型 string postdate = "Submit=" + Submit + "&dopost=&q ...

  5. Composer更新慢的终极解决方案

    Packagist 镜像 请各位使用本镜像的同学注意: 本镜像已经依照 composer 官方的数据源安全策略完全升级并支持 https 协议!请各位同学 按照下面所示的两个方法将  还没安装 Com ...

  6. 如何用Latex合并多个pdf文件?

    如何用Latex合并多个pdf文件?   用TeX合并pdf, 用LaTeX合并pdf 代码: \documentclass[a4paper]{article} \usepackage{pdfpage ...

  7. 机器学习 Top 20 Python 开源项目

    转自:http://mp.weixin.qq.com/s?__biz=MzA4MjEyNTA5Mw==&mid=2652565022&idx=1&sn=9aa035097120 ...

  8. cocos2d-x开发记录:二,基本概念(骨骼动画)

    九,骨骼动画 1.骨骼动画vs Sprite sheets 你能使用sprite sheets 创建动画,它很快又容易.直到你意识到你的游戏需要大量的动画并且内存消耗越来越高,并且需要时间载入全部数据 ...

  9. Set 集合论

    https://en.m.wikipedia.org/wiki/De_Morgan%27s_laws https://plato.stanford.edu/entries/set-theory/ ht ...

  10. 微信wap开发---页面自适应大小

    <meta name="viewport" content="width=device-width, initial-scale=0.5, minimum-scal ...