import tensorflow as tf from tensorflow import keras from keras import Sequential,datasets, layers, optimizers, metrics def preprocess(x, y): """数据处理函数""" x = tf.cast(x, dtype=tf.float32) / 255. y = tf.cast(y, dtype=tf.int32)
import tensorflow as tf x = tf.random.normal([2, 4]) w = tf.random.normal([4, 3]) b = tf.zeros([3]) y = tf.constant([2, 0]) with tf.GradientTape() as tape: tape.watch([w, b]) # axis=1,表示结果[b,3]中的3这个维度为概率 prob = tf.nn.softmax(x @ w + b, axis=1) # 2 --
import tensorflow as tf x = tf.random.normal([1, 3]) w = tf.ones([3, 1]) b = tf.ones([1]) y = tf.constant([1]) with tf.GradientTape() as tape: tape.watch([w, b]) prob = tf.sigmoid(x @ w + b) loss = tf.reduce_mean(tf.losses.MSE(y, prob)) grads = tape.
import tensorflow as tf x = tf.random.normal([2, 4]) w = tf.random.normal([4, 3]) b = tf.zeros([3]) y = tf.constant([2, 0]) with tf.GradientTape() as tape: tape.watch([w, b]) prob = tf.nn.softmax(x @ w + b, axis=1) loss = tf.reduce_mean(tf.losses.MSE
import tensorflow as tf a = tf.linspace(-10., 10., 10) a with tf.GradientTape() as tape: tape.watch(a) y = tf.sigmoid(a) grads = tape.gradient(y, [a]) grads a = tf.linspace(-5.,5.,10) a tf.tanh(a) a = tf.linspace(-1.,1.,10) a tf.nn.relu(a) tf.nn.leak
import tensorflow as tf w = tf.constant(1.) x = tf.constant(2.) y = x * w with tf.GradientTape() as tape: tape.watch([w]) y2 = x * w grad1 = tape.gradient(y, [w]) grad1 with tf.GradientTape() as tape: tape.watch([w]) y2 = x * w grad2 = tape.gradient(
out = f(X@W + b) out = relut(X@W + b) import tensorflow as tf x = tf.random.normal([4, 784]) net = tf.keras.layers.Dense(512) out = net(x) out.shape net.kernel.shape, net.bias.shape net = tf.keras.layers.Dense(10) try: net.bias except Exception as e:
import tensorflow as tf from tensorflow import keras from tensorflow.keras import datasets import os # do not print irrelevant information # os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # x: [60k,28,28], [10,28,28] # y: [60k], [10k] (x, y), (x_test, y_te
import tensorflow as tf a = tf.random.normal([3, 3]) a mask = a > 0 mask # 为True元素,即>0的元素的索引 indices = tf.where(mask) indices # 取回>0的值 tf.gather_nd(a, indices) A = tf.ones([3, 3]) B = tf.zeros([3, 3]) # True的元素会从A中选值,False的元素会从B中选值 tf.where(mask,
import tensorflow as tf a = tf.range(10) a # a中小于2的元素值为2 tf.maximum(a, 2) # a中大于8的元素值为8 tf.minimum(a, 8) # a中的元素值限制在[2,8]区间内 tf.clip_by_value(a, 2, 8) a = a - 5 a tf.nn.relu(a) tf.maximum(a, 0) 缩放时不改变梯度方向 a = tf.random.normal([2, 2], mean=10) a tf.no