此段程序,详解请参见之前的博客
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
from tensorflow.keras import layers, optimizers, datasets
datapath = r'I:\Pycharm2019\project\project_TF\.idea\data\mnist.npz'
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data(datapath)
x_test1 = x_test
x_train = x_train.reshape(x_train.shape[0],28,28,1).astype('float32')
x_test = x_test.reshape(x_test.shape[0],28,28,1).astype('float32')
# 归一化
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1) #归一化
model = keras.models.Sequential([
layers.Conv2D(filters=16, kernel_size=(5,5), padding='same', # 卷积核个数为16,same代表输出图像和输入图像大小一致
input_shape=(28,28,1), activation='relu'),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(filters=36, kernel_size=(5,5), padding='same',
activation='relu'),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Dropout(0.25),
layers.Flatten(),
#layers.GlobalAveragePooling2D(),
layers.Dense(128, activation='relu'),
layers.Dropout(0.5),
layers.Dense(10,activation='softmax')
])
#打印模型
print(model.summary())
#训练配置
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam', metrics=['accuracy']) #Adam默认学习率为0.001
#开始训练
model.fit(x=x_train, y=y_train, validation_split=0.2,
epochs=30,batch_size=128, verbose=1)
test_loss, test_acc = model.evaluate(x_test, y_test)
#%%
print('Test Loss:{:.6f}'.format(test_loss))
print('Test Acc:{:.6f}'.format(test_acc))
(接上段)此段程序主要是实现模型的保存,并测试模型是否能正常工作。
# 保存模型
model.save('./my_models/MNIST_CNN_model.h5') # creates a HDF5 file 'my_model.h5'
i = 0 #第0个是数字7
plt.imshow(x_test1[i],cmap=plt.cm.binary)
plt.show()
# 删除模型
del model
#%%
# 重载模型
Saved_model = tf.keras.models.load_model('./my_models/MNIST_CNN_model.h5')
#%%
loss2, acc2 = Saved_model.evaluate(x_test,y_test)
print('Test2 Loss:{:.6f}'.format(loss2))
print('Test2 acc:{:.6f}'.format(acc2))