相比原文,增加了目录编号,以便更好的理解记忆。
from future import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
from tensorflow import keras
print(tf.version.VERSION)
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_labels = train_labels[:1000]
test_labels = test_labels[:1000]
train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0
test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0
定义一个简单的序列模型
def create_model():
model = tf.keras.models.Sequential([
keras.layers.Dense(512, activation=‘relu’, input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation=‘softmax’)
])
model.compile(optimizer=‘adam’,
loss=‘sparse_categorical_crossentropy’,
metrics=[‘accuracy’])
return model
创建一个基本的模型实例
model = create_model()
显示模型的结构
model.summary()
checkpoint_path = “training_1/cp.ckpt”
checkpoint_dir = os.path.dirname(checkpoint_path)
创建一个保存模型权重的回调
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
使用新的回调训练模型
model.fit(train_images,
train_labels,
epochs=10,
validation_data=(test_images,test_labels),
callbacks=[cp_callback]) # 通过回调训练
这可能会生成与保存优化程序状态相关的警告。
这些警告(以及整个笔记本中的类似警告)是防止过时使用,可以忽略
。
#ls {checkpoint_dir}
创建一个基本模型实例
model = create_model()
评估模型
loss, acc = model.evaluate(test_images, test_labels)
print(“Untrained model, accuracy: {:5.2f}%”.format(100*acc))
加载权重
model.load_weights(checkpoint_path)
重新评估模型
loss,acc = model.evaluate(test_images, test_labels)
print(“Restored model, accuracy: {:5.2f}%”.format(100*acc))
str.format
)checkpoint_path = “training_2/cp-{epoch:04d}.ckpt”
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
period=5)
model = create_model()
checkpoint_path
格式保存权重model.save_weights(checkpoint_path.format(epoch=0))
model.fit(train_images,
train_labels,
epochs=50,
callbacks=[cp_callback],
validation_data=(test_images,test_labels),
verbose=0)
#ls {checkpoint_dir}
latest = tf.train.latest_checkpoint(checkpoint_dir)
latest
model = create_model()
model.load_weights(latest)
loss, acc = model.evaluate(test_images, test_labels)
print(“Restored model, accuracy: {:5.2f}%”.format(100*acc))
model.save_weights(’./checkpoints/my_checkpoint’)
model = create_model()
model.load_weights(’./checkpoints/my_checkpoint’)
loss,acc = model.evaluate(test_images, test_labels)
print(“Restored model, accuracy: {:5.2f}%”.format(100*acc))
model = create_model()
model.fit(train_images, train_labels, epochs=5)
model.save(‘my_model.h5’)
new_model = keras.models.load_model(‘my_model.h5’)
new_model.summary()
loss, acc = new_model.evaluate(test_images, test_labels)
print(“Restored model, accuracy: {:5.2f}%”.format(100*acc))
model = create_model()
model.fit(train_images, train_labels, epochs=5)
import time
saved_model_path = “./saved_models/{}”.format(int(time.time()))
tf.keras.experimental.export_saved_model(model, saved_model_path)
saved_model_path
#ls saved_models/
new_model = tf.keras.experimental.load_from_saved_model(saved_model_path)
new_model.summary()
model.predict(test_images).shape
new_model.compile(optimizer=model.optimizer, # 保留已加载的优化程序
loss=‘sparse_categorical_crossentropy’,
metrics=[‘accuracy’])
loss, acc = new_model.evaluate(test_images, test_labels)
print(“Restored model, accuracy: {:5.2f}%”.format(100*acc))