__author__ = 'LXY'
import numpy as np
import tensorflow as tf
from tensorflow.keras import datasets,layers,optimizers,Sequential,metrics
tf.random.set_seed(1)
#数据预处理
def preprocess(x,y):
#转换x,y的数据类型为张量
x = tf.cast(x,dtype=tf.float32)/255.
x = tf.reshape(x,[28*28])
y = tf.cast(y,dtype=tf.int32)
y = tf.one_hot(y,depth=10)
return x,y
def load_mnist_data():
batchsz = 128
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()
db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
db_train = db_train.map(preprocess).shuffle(60000).batch(batch_size=batchsz)
db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.map(preprocess).batch(batch_size=batchsz)
return db_train, db_test
def build_model():
db_train, db_test = load_mnist_data()
sample = next(iter(db_train))
# 设计网络模型
model = Sequential([
layers.Dense(256, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(10)
])
# 建立模型
model.build(input_shape=[None, 28 * 28])
#model.summary() # 输出网络结构
# 编译
model.compile(
optimizer=optimizers.Adam(lr=0.001), # 能够直接重定义lr
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy', 'mse'] # 通过运行,训练数据后返回的history
)
history = model.fit(db_train, epochs=2, validation_data=db_train, validation_freq=2, verbose=0)
acc = history.history['accuracy']
mse = history.history['mse']
# 从训练结果中拿想看的字段,比如输出每次训练
print(history.history, '\nacc', acc, '\nmse', mse)
# 输出是每次迭代的acc和mse
# test
index_test = model.evaluate(db_test, verbose=0)
print("index_test", model.metrics_names, index_test)
print("acc_test", index_test[0], "\nmse_test", index_test[1])
sample = next(iter(db_test))
x = sample[0]
y = sample[1]
predict = model.predict(x)
y = tf.argmax(y, axis=1)
predict = tf.argmax(predict, axis=1)
result = tf.equal(y, predict)
result = tf.cast(result, tf.int32)
num_correct = tf.reduce_sum(result)
print("正确率", float(num_correct / predict.shape[0])) # 预测正确的概率
return model
if __name__=='__main__':
model = build_model()
#保存模型
model.save('mnist_model')
#删除model
del model
#重新加载模型
network = tf.keras.models.load_model('mnist_model',compile=True)
train_data,test_data = load_mnist_data()
result_test = network.evaluate(test_data)
print('result_test',network.metrics_names, result_test)
模型保存 model.save("name")
model.save('mnist_model')
当训练好了,拿出来再用的话:
network = tf.keras.models.load_model('mnist_model',compile=True)
想要test数据,直接使用模型就可以了
输出结果:
{'loss': [0.28666120767593384, 0.10383852571249008], 'accuracy': [0.9153000116348267, 0.968833327293396], 'mse': [14.382641792297363, 20.249595642089844], 'val_loss': [0.07012375444173813], 'val_accuracy': [0.9796333312988281], 'val_mse': [19.037185668945312]}
acc [0.9153000116348267, 0.968833327293396]
mse [14.382641792297363, 20.249595642089844]
index_test ['loss', 'accuracy', 'mse'] [0.09124262630939484, 0.972100019454956, 19.534870147705078]
acc_test 0.09124262630939484
mse_test 0.972100019454956
正确率 0.984375
#下面是使用模型evaluate的结果
79/79 [==============================] - 0s 4ms/step - loss: 0.0912 - accuracy: 0.9721 - mean_squared_error: 19.5349
result_test ['loss', 'accuracy', 'mean_squared_error'] [0.09124262630939484, 0.972100019454956, 19.534870147705078]
如果在main函数当中,要重新修改模型的优化函数,或者返回的性能指标的话,令compile=false,
if __name__=='__main__':
model = build_model()
#保存模型
model.save('mnist_model')
#删除model
del model
#重新加载模型
network = tf.keras.models.load_model('mnist_model',compile=False)
#重新定义compile
network.compile(
optimizer = tf.keras.optimizers.Adam(lr=0.02),
loss = tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy', 'mse'] # 通过运行,训练数据后返回的history
)
train_data,test_data = load_mnist_data()
result_test = network.evaluate(test_data)
print('result_test',network.metrics_names, result_test)
输出结果
{'loss': [0.28666120767593384, 0.10383852571249008], 'accuracy': [0.9153000116348267, 0.968833327293396], 'mse': [14.382641792297363, 20.249595642089844], 'val_loss': [0.07012375444173813], 'val_accuracy': [0.9796333312988281], 'val_mse': [19.037185668945312]}
acc [0.9153000116348267, 0.968833327293396]
mse [14.382641792297363, 20.249595642089844]
index_test ['loss', 'accuracy', 'mse'] [0.09124262630939484, 0.972100019454956, 19.534870147705078]
acc_test 0.09124262630939484
mse_test 0.972100019454956
正确率 0.984375
#compile修改后的输出结果
79/79 [==============================] - 0s 4ms/step - loss: 0.0912 - accuracy: 0.9721 - mse: 19.5349
result_test ['loss', 'accuracy', 'mse'] [0.09124262630939484, 0.972100019454956, 19.534870147705078]
可以看到,输出结果两次都一毛一样,哈哈哈哈哈哈哈哈
因为新改的compile()函数并没有被执行到,
再加一句:
history = network.fit(train_data, epochs=2, validation_data=test_data, validation_freq=2, verbose=0)
main函数变成:
if __name__=='__main__':
model = build_model()
#保存模型
model.save('mnist_model')
#删除model
del model
#重新加载模型
network = tf.keras.models.load_model('mnist_model',compile=False)
network.compile(
optimizer = tf.keras.optimizers.Adam(lr=0.02),
loss = tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy', 'mse'] # 通过运行,训练数据后返回的history
)
train_data,test_data = load_mnist_data()
history = network.fit(train_data, epochs=2, validation_data=test_data, validation_freq=2, verbose=0)
result_test = network.evaluate(test_data)
print('result_test',network.metrics_names, result_test)
输出结果:
{'loss': [0.28666120767593384, 0.10383852571249008], 'accuracy': [0.9153000116348267, 0.968833327293396], 'mse': [14.382641792297363, 20.249595642089844], 'val_loss': [0.07012375444173813], 'val_accuracy': [0.9796333312988281], 'val_mse': [19.037185668945312]}
acc [0.9153000116348267, 0.968833327293396]
mse [14.382641792297363, 20.249595642089844]
index_test ['loss', 'accuracy', 'mse'] [0.09124262630939484, 0.972100019454956, 19.534870147705078]
acc_test 0.09124262630939484
mse_test 0.972100019454956
正确率 0.984375
#变了
79/79 [==============================] - 0s 3ms/step - loss: 0.1924 - accuracy: 0.9463 - mse: 45.6422
result_test ['loss', 'accuracy', 'mse'] [0.19236354529857635, 0.9463000297546387, 45.64221954345703]
前一段数据没变,是因为输出的一直都是原来保存的模型的输出结果,后面新改的compile()要fit一下才起作用,否则就只是把训练好的模型用来测试了。
如果再把模型的参数保存下,用的时候再提出来:
if __name__=='__main__':
model = build_model()
#保存模型
model.save('mnist_model')
model.save_weights('weights')
#删除model
del model
#重新加载模型
network = tf.keras.models.load_model('mnist_model',compile=False)
network.compile(
optimizer = tf.keras.optimizers.Adam(lr=0.02),
loss = tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy', 'mse'] # 通过运行,训练数据后返回的history
)
train_data,test_data = load_mnist_data()
network.load_weights('weights')
history = network.fit(train_data, epochs=2, validation_data=test_data, validation_freq=2, verbose=0)
result_test = network.evaluate(test_data)
print('result_test',network.metrics_names, result_test)
输出结果:
#又变了的结果
79/79 [==============================] - 0s 4ms/step - loss: 0.0718 - accuracy: 0.9794 - mse: 28.5494
result_test ['loss', 'accuracy', 'mse'] [0.07176566123962402, 0.9793999791145325, 28.549360275268555]
这部分的loss比上面更低,更精准了,连mse都低了很多。