# 保存权值
model.save_weights("./checkpoints/my_checkpoint")
# 加载权值
model = create_model() # 重新创建装载模型
model.load_weights("./checkpoints/my_checkpoint")
loss, acc = model.evaluate(test_images, test_labels)
print("重新加载模型, 正确率: {:5.2f}%".format(100*acc))
实际使用状态
model.save_weights("./checkpoints/weights.ckpt")
print("保存模型成功!")
# 删除模型
del model
# 重新创建模型
model = Sequential([layers.Dense(256, activation="relu"),
layers.Dense(128, activation="relu"),
layers.Dense(64, activation="relu"),
layers.Dense(32, activation="relu"),
layers.Dense(10)])
model.compile(optimizer=optimizers.Adam(lr=1e-3),
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=["accuracy"])
# 加载模型
model.load_weights("./checkpoints/weights.ckpt")
# 模型验证
loss, acc = model.evaluate(ds_val)
print("重新加载模型, 正确率: {:5.2f}%".format(100*acc))
不需要创建网络
model.save("model.h5")
print("保存模型与权值成功!")
del model
print(“加载模型与权值”)
model = tf.keras.models.load_model("model.h5")
model.evaluate(ds_val)
tf.saved_model.save(m, "/tmp/saved_model/")
imported = tf.saved_model.load(path)
f = imported.signatures["serving_default"]
print(f(x = tf.ones([1, 28, 28, 3])))
import tensorflow as tf
from tensorflow.keras import Sequential, layers, optimizers
import numpy as np
# 获取物理GPU的个数
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
# 设置内存增长方式 自增长
tf.config.experimental.set_memory_growth(gpu, True)
batch_size = 128
# 导入数据集
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
# 数据的预处理
def pre_process(x, y):
x = tf.cast(x, tf.float32) / 255.0
x = tf.reshape(x, (28 * 28,))
y = tf.cast(y, tf.int32)
y = tf.one_hot(y, depth=10)
return x, y
# 数据映射 打乱 分批 重复处理
db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
db_train = db_train.map(pre_process).shuffle(60000).batch(batch_size).repeat()
db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.map(pre_process).batch(batch_size)
db_sample = iter(db_train)
sample = next(db_sample)
print("X:", sample[0].shape, "Y:", sample[1].shape)
print(np.max(sample[0]), np.min(sample[0]))
# 创建网络
model = Sequential([layers.Dense(256, activation="relu"),
layers.Dense(128, activation="relu"),
layers.Dense(64, activation="relu"),
layers.Dense(32, activation="relu"),
layers.Dense(10)])
model.build(input_shape=(None, 28 * 28))
model.summary()
# 装载 设置 损失函数、优化器、评估指标
model.compile(optimizer=optimizers.Adam(lr=1e-3),
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=["accuracy"])
# 循环训练
model.fit(db_train, # 指定数据集
epochs=5, # 循环次数
validation_data=db_test, # 测试数据集
validation_freq=2, # 验证频次
steps_per_epoch=x_train.shape[0] // batch_size) # 多余数据
# 评估测试
model.evaluate(db_test)
model.save_weights("./checkpoints/weights.ckpt")
print("保存模型成功!")
# 删除模型
del model
# 重新创建模型
model = Sequential([layers.Dense(256, activation="relu"),
layers.Dense(128, activation="relu"),
layers.Dense(64, activation="relu"),
layers.Dense(32, activation="relu"),
layers.Dense(10)])
model.compile(optimizer=optimizers.Adam(lr=1e-3),
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=["accuracy"])
# 加载模型
model.load_weights("./checkpoints/weights.ckpt")
# 模型验证
loss, acc = model.evaluate(db_test)
print("重新加载模型, 正确率: {:5.2f}%".format(100 * acc))
import tensorflow as tf
from tensorflow.keras import Sequential, layers, optimizers
import numpy as np
# 获取物理GPU的个数
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
# 设置内存增长方式 自增长
tf.config.experimental.set_memory_growth(gpu, True)
batch_size = 128
# 导入数据集
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
# 数据的预处理
def pre_process(x, y):
x = tf.cast(x, tf.float32) / 255.0
x = tf.reshape(x, (28 * 28,))
y = tf.cast(y, tf.int32)
y = tf.one_hot(y, depth=10)
return x, y
# 数据映射 打乱 分批 重复处理
db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
db_train = db_train.map(pre_process).shuffle(60000).batch(batch_size).repeat()
db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.map(pre_process).batch(batch_size)
db_sample = iter(db_train)
sample = next(db_sample)
print("X:", sample[0].shape, "Y:", sample[1].shape)
print(np.max(sample[0]), np.min(sample[0]))
# 创建网络
model = Sequential([layers.Dense(256, activation="relu"),
layers.Dense(128, activation="relu"),
layers.Dense(64, activation="relu"),
layers.Dense(32, activation="relu"),
layers.Dense(10)])
model.build(input_shape=(None, 28 * 28))
model.summary()
# 装载 设置 损失函数、优化器、评估指标
model.compile(optimizer=optimizers.Adam(lr=1e-3),
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=["accuracy"])
# 循环训练
model.fit(db_train, # 指定数据集
epochs=3, # 循环次数
validation_data=db_test, # 测试数据集
validation_freq=2, # 验证频次
steps_per_epoch=x_train.shape[0] // batch_size) # 多余数据
# 评估测试
model.evaluate(db_test)
save_path = "./model/model.h5"
model.save(save_path)
print("保存模型成功!")
# 删除模型
del model
# 加载模型
model = tf.keras.models.load_model(save_path)
# 模型验证
loss, acc = model.evaluate(db_test)
print("重新加载模型, 正确率: {:5.2f}%".format(100 * acc))