代码拉取完成,页面将自动刷新
import tensorflow as tf
from tensorflow.keras import datasets, optimizers
# 加载数据集
# mnist 60k数据, 10k测试
(x, y), (x_test, y_test) = datasets.mnist.load_data()
print('---' * 100)
x = tf.convert_to_tensor(x, dtype=tf.float32)
y = tf.convert_to_tensor(y, dtype=tf.int32)
print('x: ', x.shape, x.dtype)
print('y: ', y.shape, y.dtype)
# 查看最大最小值
print('x(min/max): ', tf.reduce_min(x), tf.reduce_max(x))
print('y(min/max): ', tf.reduce_min(y), tf.reduce_max(y))
print('---' * 100)
x_test = tf.convert_to_tensor(x_test, dtype=tf.float32)
y_test = tf.convert_to_tensor(y_test, dtype=tf.int64)
print('x_test: ', x_test.shape, x_test.dtype)
print('y_test: ', y_test.shape, y_test.dtype)
# 查看最大最小值
print('x_test(min/max): ', tf.reduce_min(x_test), tf.reduce_max(x_test))
print('y_test(min/max): ', tf.reduce_min(y_test), tf.reduce_max(y_test))
print('===' * 100)
# x归一化
x = x / 255.
x_test = x_test / 255.
# onehot编码y
y = tf.one_hot(y, depth=10)
# y_test = tf.one_hot(y_test, depth=10)
# [b, 28, 28] => [b, 768]
x = tf.reshape(x, [-1, 28 * 28])
x_test = tf.reshape(x_test, [-1, 28 * 28])
# 确定数据集和batch
train_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(128)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(128)
# 初始化权值
# 784 => 256 => 128 => 10
w1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1))
b1 = tf.Variable(tf.zeros([256]))
w2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1))
b2 = tf.Variable(tf.zeros([128]))
w3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10]))
# 学习率(learning rate)
lr = 1e-3
# optimizer = optimizers.SGD(lr=lr)
for epoch in range(100):
for step, (x, y) in enumerate(train_ds):
with tf.GradientTape() as tape:
# [b, 28 * 28] * [784, 256] + [256] => [b, 256] + [b, 256]
# hd1 = x @ w1 + tf.broadcast_to(b1, [x.shape[0], 256])
hd1 = x @ w1 + b1
hd1 = tf.nn.relu(hd1)
# [b, 256] * [256, 128] + [128] => [b, 128] + [b, 128]
hd2 = tf.nn.relu(hd1 @ w2 + b2)
# [b, 128] * [128, 10] + [10] => [b, 10] + [b, 10]
out = hd2 @ w3 + b3
# 计算loss (MSE)
loss = tf.reduce_mean(tf.square(y - out))
# 计算梯度
grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])
# for g in grads:
# print(tf.norm(g))
# weight = weight - lr * gradient
# bias = bias - lr * gradient
w1.assign_sub(lr * grads[0])
b1.assign_sub(lr * grads[1])
w2.assign_sub(lr * grads[2])
b2.assign_sub(lr * grads[3])
w3.assign_sub(lr * grads[4])
b3.assign_sub(lr * grads[5])
if step % 100 == 0:
print('epoch: ', epoch, ', step: ', step, ', loss: ', loss)
# test
total_correct = 0
total_num = 0
for step, (x, y) in enumerate(test_ds):
hd1 = tf.nn.relu(x @ w1 + b1)
hd2 = tf.nn.relu(hd1 @ w2 + b2)
out = hd2 @ w3 + b3
probability = tf.nn.softmax(out, axis=1)
prediction = tf.argmax(probability, axis=1)
correct = tf.cast(tf.equal(prediction, y), dtype=tf.int32)
correct = tf.reduce_sum(correct)
total_correct = total_correct + float(correct)
total_num += x.shape[0]
print(total_correct / total_num)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。