MNIST数据集下载:

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) #one_hot 独热编码,也叫一位有效编码。在任意时候只有一位为1,其他位都是0

1 使用逻辑回归:

import tensorflow as tf

# 导入数据集
#from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # 变量
batch_size = 50 #训练的x(image),y(label)
# x = tf.Variable()
# y = tf.Variable()
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10]) # 模型权重
#[55000,784] * W = [55000,10]
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10])) # 用softmax构建逻辑回归模型
pred = tf.nn.softmax(tf.matmul(x, W) + b) # 损失函数(交叉熵)
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), 1)) # 低度下降
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost) # 初始化所有变量
init = tf.global_variables_initializer() # 加载session图
with tf.Session() as sess:
sess.run(init) # 开始训练
for epoch in range(25):
avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, {x: batch_xs,y: batch_ys})
#计算损失平均值
avg_cost += sess.run(cost,{x: batch_xs,y: batch_ys}) / total_batch
if (epoch+1) % 5 == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) print("运行完成") # 测试求正确率
correct = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
print("正确率:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

结果:

Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
Epoch: 0005 cost= 0.394426425
Epoch: 0010 cost= 0.344705163
Epoch: 0015 cost= 0.323814137
Epoch: 0020 cost= 0.311426675
Epoch: 0025 cost= 0.302971779
运行完成
正确率: 0.9188

2 使用神经网络:

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01)) def model(X, w_h, w_o):
h = tf.nn.sigmoid(tf.matmul(X, w_h)) # this is a basic mlp, think 2 stacked logistic regressions
return tf.matmul(h, w_o) # note that we dont take the softmax at the end because our cost fn does that for us mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels X = tf.placeholder("float", [None, 784])
Y = tf.placeholder("float", [None, 10]) w_h = init_weights([784, 625]) # create symbolic variables
w_o = init_weights([625, 10]) py_x = model(X, w_h, w_o) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y)) # compute costs
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) # construct an optimizer
predict_op = tf.argmax(py_x, 1) # Launch the graph in a session
with tf.Session() as sess:
# you need to initialize all variables
tf.global_variables_initializer().run() for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})
print(i, np.mean(np.argmax(teY, axis=1) ==
sess.run(predict_op, feed_dict={X: teX})))

结果:

0 0.6898
1 0.8244
2 0.8635
3 0.881
4 0.8881
5 0.8931
6 0.8972
7 0.9005
8 0.9042
9 0.9062

3 使用卷积神经网络:

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data batch_size = 128
test_size = 256 def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01)) def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
l1a = tf.nn.relu(tf.nn.conv2d(X, w, # l1a shape=(?, 28, 28, 32)
strides=[1, 1, 1, 1], padding='SAME'))
l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1], # l1 shape=(?, 14, 14, 32)
strides=[1, 2, 2, 1], padding='SAME')
l1 = tf.nn.dropout(l1, p_keep_conv) l2a = tf.nn.relu(tf.nn.conv2d(l1, w2, # l2a shape=(?, 14, 14, 64)
strides=[1, 1, 1, 1], padding='SAME'))
l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1], # l2 shape=(?, 7, 7, 64)
strides=[1, 2, 2, 1], padding='SAME')
l2 = tf.nn.dropout(l2, p_keep_conv) l3a = tf.nn.relu(tf.nn.conv2d(l2, w3, # l3a shape=(?, 7, 7, 128)
strides=[1, 1, 1, 1], padding='SAME'))
l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1], # l3 shape=(?, 4, 4, 128)
strides=[1, 2, 2, 1], padding='SAME')
l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]]) # reshape to (?, 2048)
l3 = tf.nn.dropout(l3, p_keep_conv) l4 = tf.nn.relu(tf.matmul(l3, w4))
l4 = tf.nn.dropout(l4, p_keep_hidden) pyx = tf.matmul(l4, w_o)
return pyx mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
trX = trX.reshape(-1, 28, 28, 1) # 28x28x1 input img
teX = teX.reshape(-1, 28, 28, 1) # 28x28x1 input img X = tf.placeholder("float", [None, 28, 28, 1])
Y = tf.placeholder("float", [None, 10]) w = init_weights([3, 3, 1, 32]) # 3x3x1 conv, 32 outputs
w2 = init_weights([3, 3, 32, 64]) # 3x3x32 conv, 64 outputs
w3 = init_weights([3, 3, 64, 128]) # 3x3x32 conv, 128 outputs
w4 = init_weights([128 * 4 * 4, 625]) # FC 128 * 4 * 4 inputs, 625 outputs
w_o = init_weights([625, 10]) # FC 625 inputs, 10 outputs (labels) p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1) # Launch the graph in a session
with tf.Session() as sess:
# you need to initialize all variables
tf.global_variables_initializer().run() for i in range(10):
training_batch = zip(range(0, len(trX), batch_size),
range(batch_size, len(trX)+1, batch_size))
for start, end in training_batch:
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
p_keep_conv: 0.8, p_keep_hidden: 0.5}) test_indices = np.arange(len(teX)) # Get A Test Batch
np.random.shuffle(test_indices)
test_indices = test_indices[0:test_size] print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
sess.run(predict_op, feed_dict={X: teX[test_indices],
Y: teY[test_indices],
p_keep_conv: 1.0,
p_keep_hidden: 1.0})))

结果:

0 0.9453125
1 0.9765625
2 0.9921875
3 0.98828125
4 0.984375
5 0.9921875
6 0.984375
7 0.9921875
8 0.98828125
9 0.99609375

三种方法实现MNIST 手写数字识别的更多相关文章

  1. 【TensorFlow-windows】(三) 多层感知器进行手写数字识别(mnist)

    主要内容: 1.基于多层感知器的mnist手写数字识别(代码注释) 2.该实现中的函数总结 平台: 1.windows 10 64位 2.Anaconda3-4.2.0-Windows-x86_64. ...

  2. 基于tensorflow的MNIST手写数字识别(二)--入门篇

    http://www.jianshu.com/p/4195577585e6 基于tensorflow的MNIST手写字识别(一)--白话卷积神经网络模型 基于tensorflow的MNIST手写数字识 ...

  3. Android+TensorFlow+CNN+MNIST 手写数字识别实现

    Android+TensorFlow+CNN+MNIST 手写数字识别实现 SkySeraph 2018 Email:skyseraph00#163.com 更多精彩请直接访问SkySeraph个人站 ...

  4. Tensorflow之MNIST手写数字识别:分类问题(1)

    一.MNIST数据集读取 one hot 独热编码独热编码是一种稀疏向量,其中:一个向量设为1,其他元素均设为0.独热编码常用于表示拥有有限个可能值的字符串或标识符优点:   1.将离散特征的取值扩展 ...

  5. 基于TensorFlow的MNIST手写数字识别-初级

    一:MNIST数据集    下载地址 MNIST是一个包含很多手写数字图片的数据集,一共4个二进制压缩文件 分别是test set images,test set labels,training se ...

  6. mnist手写数字识别——深度学习入门项目(tensorflow+keras+Sequential模型)

    前言 今天记录一下深度学习的另外一个入门项目——<mnist数据集手写数字识别>,这是一个入门必备的学习案例,主要使用了tensorflow下的keras网络结构的Sequential模型 ...

  7. Pytorch入门——手把手教你MNIST手写数字识别

    MNIST手写数字识别教程 要开始带组内的小朋友了,特意出一个Pytorch教程来指导一下 [!] 这里是实战教程,默认读者已经学会了部分深度学习原理,若有不懂的地方可以先停下来查查资料 目录 MNI ...

  8. TensorFlow——MNIST手写数字识别

    MNIST手写数字识别 MNIST数据集介绍和下载:http://yann.lecun.com/exdb/mnist/   一.数据集介绍: MNIST是一个入门级的计算机视觉数据集 下载下来的数据集 ...

  9. Tensorflow实现MNIST手写数字识别

    之前我们讲了神经网络的起源.单层神经网络.多层神经网络的搭建过程.搭建时要注意到的具体问题.以及解决这些问题的具体方法.本文将通过一个经典的案例:MNIST手写数字识别,以代码的形式来为大家梳理一遍神 ...

随机推荐

  1. notepad++去换行(简单、快捷)

    文本处理问题 这个方式可以快捷处理, 不用Linux命令, linux 与window之间的文件转换很烦人, 这个方法可以处理

  2. tp5 查询本年、本月、本周的方法

    tp5自带了一些查询的方法,今天说一下查询本年.本月以及本周的方法 whereTime()//此方法代替了between and 方法 实际用法如下: ->whereTime('时间字段','y ...

  3. Yii2配置

    最外层:配置文件,params Yii2导航 <?php NavBar::begin([ 'brandLabel' => '大海', 'brandUrl' => Yii::$app- ...

  4. 暑期集训日志(Day6~Day17)

    章·十七:2019-07-28:为谁辛苦为谁甜 ·昨日小结 颓爆了QAQ,昨天又垫底了. 最简单一道题弃疗的我直接被甩倒了总榜垫底…… 我……不想说啥…… 我是渣比. 我不能颓废了. 醒来啊麦克白! ...

  5. NX二次开发-UFUN获取显示在NX交互界面的对象UF_OBJ_is_displayable

    NX9+VS2012 #include <uf.h> #include <uf_disp.h> #include <uf_obj.h> #include <u ...

  6. NX二次开发-UFUN计算两点距离UF_VEC3_distance

    NX11+VS2013 #include <uf.h> #include <uf_curve.h> #include <uf_vec.h> UF_initializ ...

  7. NX二次开发-UFUN获取块的参数UF_MODL_ask_block_parms

    NX11+VS2013 #include <uf.h> #include <uf_modl.h> #include <uf_ui.h> UF_initialize( ...

  8. hexo next主题深度优化(七),cdn加速。

    文章目录 注: 正题: 免费cdn 收费cdn 个人博客:https://mmmmmm.me 源码:https://github.com/dataiyangu/dataiyangu.github.io ...

  9. 20140425 malloc和new不同 dynamic何时返回0

    1.malloc/free和new/delete区别 http://blog.csdn.net/hackbuteer1/article/details/6789164 相同点:都可用于申请动态内存和释 ...

  10. Java父类强制转换子类原则

    最近,微信群友在讨论子类父类的转换问题,其实不难,给大家用实例来说明一下就很明了了. 我们知道Java中子类转换成父类是没有任何问题的,那父类可以转换成子类吗? 来看下面这段程序: public cl ...