import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os # do not print irrelevant information
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# x: [60k,28,28], [10,28,28]
# y: [60k], [10k]
(x, y), (x_test, y_test) = datasets.mnist.load_data()
# transform Tensor
# x: [0~255] ==》 [0~1.]
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32) x_test = tf.convert_to_tensor(x_test, dtype=tf.float32) / 255.
y_test = tf.convert_to_tensor(y_test, dtype=tf.int32)
# batch of 128
train_db = tf.data.Dataset.from_tensor_slices((x, y)).batch(128)
test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(128)
train_iter = iter(train_db)
sample = next(train_iter)
# [b,784] ==> [b,256] ==> [b,128] ==> [b,10]
# [dim_in,dim_out],[dim_out]
w1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1))
b1 = tf.Variable(tf.zeros([256]))
w2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1))
b2 = tf.Variable(tf.zeros([128]))
w3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10]))
# learning rate
lr = 1e-3
for epoch in range(10):  # iterate db for 10
# tranin every train_db
for step, (x, y) in enumerate(train_db):
# x: [128,28,28]
# y: [128] # [b,28,28] ==> [b,28*28]
x = tf.reshape(x, [-1, 28 * 28]) with tf.GradientTape(
) as tape: # only data types of tf.variable are logged
# x: [b,28*28]
# h1 = x@w1 + b1
# [b,784]@[784,256]+[256] ==> [b,256] + [256] ==> [b,256] + [b,256]
h1 = x @ w1 + tf.broadcast_to(b1, [x.shape[0], 256])
h1 = tf.nn.relu(h1)
# [b,256] ==> [b,128]
# h2 = x@w2 + b2 # b2 can broadcast automatic
h2 = h1 @ w2 + b2
h2 = tf.nn.relu(h2)
# [b,128] ==> [b,10]
out = h2 @ w3 + b3 # compute loss
# out: [b,10]
# y:[b] ==> [b,10]
y_onehot = tf.one_hot(y, depth=10) # mse = mean(sum(y-out)^2)
# [b,10]
loss = tf.square(y_onehot - out)
# mean:scalar
loss = tf.reduce_mean(loss) # compute gradients
grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])
# w1 = w1 - lr * w1_grad
# w1 = w1 - lr * grads[0] # not in situ update
# in situ update
w1.assign_sub(lr * grads[0])
b1.assign_sub(lr * grads[1])
w2.assign_sub(lr * grads[2])
b2.assign_sub(lr * grads[3])
w3.assign_sub(lr * grads[4])
b3.assign_sub(lr * grads[5]) if step % 100 == 0:
print(f'epoch:{epoch}, step: {step}, loss:{float(loss)}') # [w1,b1,w2,b2,w3,b3]
total_correct, total_num = 0, 0
for step, (x, y) in enumerate(test_db):
# [b,28,28] ==> [b,28*28]
x = tf.reshape(x, [-1, 28 * 28]) # [b,784] ==> [b,256] ==> [b,128] ==> [b,10]
h1 = tf.nn.relu(x @ w1 + b1)
h2 = tf.nn.relu(h1 @ w2 + b2)
out = h2 @ w3 + b3 # out: [b,10] ~ R
# prob: [b,10] ~ (0,1)
prob = tf.nn.softmax(out, axis=1)
# [b,10] ==> [b]
pred = tf.argmax(prob, axis=1)
pred = tf.cast(pred, dtype=tf.int32)
# y: [b]
# [b], int32
correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
correct = tf.reduce_sum(correct) total_correct += int(correct)
total_num += x.shape[0]
acc = total_correct / total_num
print(f'test acc: {acc}')

吴裕雄--天生自然TensorFlow2教程:测试(张量)- 实战的更多相关文章

  1. 吴裕雄--天生自然TensorFlow2教程:张量限幅

    import tensorflow as tf a = tf.range(10) a # a中小于2的元素值为2 tf.maximum(a, 2) # a中大于8的元素值为8 tf.minimum(a ...

  2. 吴裕雄--天生自然TensorFlow2教程:张量排序

    import tensorflow as tf a = tf.random.shuffle(tf.range(5)) a tf.sort(a, direction='DESCENDING') # 返回 ...

  3. 吴裕雄--天生自然TensorFlow2教程:前向传播(张量)- 实战

    手写数字识别流程 MNIST手写数字集7000*10张图片 60k张图片训练,10k张图片测试 每张图片是28*28,如果是彩色图片是28*28*3-255表示图片的灰度值,0表示纯白,255表示纯黑 ...

  4. 吴裕雄--天生自然TensorFlow2教程:手写数字问题实战

    import tensorflow as tf from tensorflow import keras from keras import Sequential,datasets, layers, ...

  5. 吴裕雄--天生自然TensorFlow2教程:函数优化实战

    import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def himme ...

  6. 吴裕雄--天生自然TensorFlow2教程:反向传播算法

  7. 吴裕雄--天生自然TensorFlow2教程:链式法则

    import tensorflow as tf x = tf.constant(1.) w1 = tf.constant(2.) b1 = tf.constant(1.) w2 = tf.consta ...

  8. 吴裕雄--天生自然TensorFlow2教程:多输出感知机及其梯度

    import tensorflow as tf x = tf.random.normal([2, 4]) w = tf.random.normal([4, 3]) b = tf.zeros([3]) ...

  9. 吴裕雄--天生自然TensorFlow2教程:单输出感知机及其梯度

    import tensorflow as tf x = tf.random.normal([1, 3]) w = tf.ones([3, 1]) b = tf.ones([1]) y = tf.con ...

随机推荐

  1. metasploit练习

    复现ms08_067_netapi 使用模块 msf5 > use exploit/windows/smb/ms08_067_netapi 查看配置 msf5 exploit(windows/s ...

  2. pytesseract 识别率低提升方法

    pytesseract 识别率低提升方法 一.跟换识别语言包 下载地址https://github.com/tesseract-ocr/tessdata 二.修改图片的灰度 from PIL impo ...

  3. Netty 异步模型

    简介 Netty中的 I/O 操作是异步的, 包括 Bind.Write.Connect 等操作会简单的返回一个ChannelFuture. 调用者不能立刻获得结果, 而是通过Future-Liste ...

  4. 吴裕雄--天生自然java开发常用类库学习笔记:foreach及Enumeration接口

    import java.util.Vector; import java.util.Enumeration; public class EnumerationDemo01{ public static ...

  5. Day 28:SAX解析原理

    SAX解析 回顾DOM解析 DOM解析原理:一次性把xml文档加载进内存,然后在内存中构建Document树. 缺点: 不适合读取大容量的xml文件,容易导致内存溢出. SAX解析原理: 加载一点,读 ...

  6. LeetCode题解汇总(包括剑指Offer和程序员面试金典,持续更新)

    LeetCode题解汇总(持续更新,并将逐步迁移到本博客列表中) LeetCode题解分类汇总(包括剑指Offer和程序员面试金典) 剑指Offer 序号 题目 难度 03 数组中重复的数字 简单 0 ...

  7. mqtt已断开连接(32109)

    在rabbitmq下使用mqtt协议时,如果服务质量(qos)设置为2,在发布服务时会出现[已断开连接 (32109) - java.io.EOFException]的报错. 出现该报错的其他情况还有 ...

  8. POJ 3692:Kindergarten 求补图的最大点独立集 头一次接触这样的做法

    Kindergarten Time Limit: 2000MS   Memory Limit: 65536K Total Submissions: 5884   Accepted: 2877 Desc ...

  9. c++链表演示

    #include<iostream> #include<string.h> #include<conio.h> using namespace std; #defi ...

  10. Spring Boot2(007):关于Spring beans、依赖注入 和 @SpringBootApplication 注解

    一.关于Spring beans 和 依赖注入(Dependency Injection) spring boot 和 Spring 全家桶无缝衔接,开发过程中可以很轻松地使用 Spring 全家桶的 ...