参考论文:Gradient-Based Learning Applied to Document Recognition
网络虽然很小,但是它包含了深度学习的基本模块:卷积层,池化层,全连接层。
输入层:32×32(输入层不计入网络层数)
卷积层1:6个5×5的卷积核,步长为1——>(6,28,28)
池化层1:MaxPooling,(2,2)——>(6,14,14)
卷积层2:16个5×5的卷积核,步长为1——>(16,10,10)
池化层2:MaxPooling,(2,2)——>(16,5,5)
全连接层1:120,——>16×5×5×120+120
全连接层2:84,——>120×84+84
全连接层3:10,——>84*10+10
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Dense, Activation, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.datasets import mnist
# 加载mnist数据
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(type(x_train)) #
print(x_train.shape) # (60000, 28, 28)
print(x_test.shape) # (10000, 28, 28)
print(y_train.shape)#(60000,)
print(y_test.shape)#(10000,)
# 数据类型转换,uint8 to float32,(60000, 28, 28) to (60000, 28, 28, 1)
x_train = x_train.reshape(-1, 28, 28, 1)
print(x_train.dtype) # uint8
x_train = x_train.astype('float32')
print(x_train.dtype) # float32
print(x_train.shape)#uint8类型的(60000, 28, 28)变成float32的(60000, 28, 28, 1)
y_train = y_train.astype('float32')
x_test = x_test.reshape(-1, 28, 28, 1)
x_test = x_test.astype('float32')
y_test = y_test.astype('float32')
print(y_train)
# x数据归一化,255为像素最大值
x_train /= 255
x_test /= 255
#label为0~9共10个类别,将其转换成one-hot编码
from tensorflow.python.keras.utils.np_utils import to_categorical
y_train_new = to_categorical(num_classes=10, y=y_train)
print(y_train_new)
y_test_new = to_categorical(num_classes=10, y=y_test)
# LeNet_5网络结构搭建
def LeNet_5():
model = Sequential()
model.add(Conv2D(filters=6, kernel_size=(5, 5), padding='valid', activation='tanh', input_shape=[28, 28, 1]))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=16, kernel_size=(5, 5), padding='valid', activation='tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(120, activation='tanh'))
model.add(Dense(84, activation='tanh'))
model.add(Dense(10, activation='softmax'))
return model
# LeNet_5模型训练和编译
def train_model(xtr,ytr,batch_size,epochs,n_val):
model = LeNet_5()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
tensorboard = tf.keras.callbacks.TensorBoard(histogram_freq=1)
model.fit(xtr, ytr, batch_size=batch_size, epochs=epochs, validation_split=n_val, shuffle=True,callbacks=[tensorboard])
#shuffle=True用于打乱数据集,每次都会以不同的顺序返回。
return model,tensorboard
model,tensorboard = train_model(x_train, y_train_new,64,20,0.2)
tf.saved_model.save(model,'LeNet_5-1')
# 返回测试集损失函数值和准确率
loss1, accuracy1 = model.evaluate(x_train, y_train_new)
loss2, accuracy2 = model.evaluate(x_test, y_test_new)
print(loss1, accuracy1)#0.027162624523043633 0.9929999709129333
print(loss2, accuracy2)#0.061963751912117004 0.9837999939918518
tensorboard --logdir=C:\Users\ThinkStation\Desktop\logs\train