CIFAR-10数据集由10个类的60000个32x32彩色图像组成,每个类有6000个图像,有50000个训练图像和10000个测试图像。
以下代码实现基于tensorflow对此数据集的训练和验证,采用的是自定义的网络,并把相应的参数(权重)保存起来
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics
import os
# 避免出现一些不必要的警告
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def preprocess(x, y):
# [0,255] => [-1 ,1]
x = 2 *tf.cast(x, dtype=tf.float32) / 255. - 1.
y = tf.cast(y, dtype=tf.int32)
return x, y
batchsz = 128
# [32, 32, 3], [10k, 1]
(x, y), (x_val, y_val) = datasets.cifar10.load_data()
y = tf.squeeze(y) # [10k]
y_val = tf.squeeze(y_val)
y = tf.one_hot(y, depth=10) # [50k,10]
y_val = tf.one_hot(y_val, depth=10) # [10k, 10]
print(x.shape, y.shape, x_val.shape, y_val.shape)
# 数据集预处理
train_db = tf.data.Dataset.from_tensor_slices((x, y))
train_db = train_db.map(preprocess).shuffle(10000).batch(batchsz)
test_db = tf.data.Dataset.from_tensor_slices((x_val, y_val))
test_db = test_db.map(preprocess).batch(batchsz)
# sample一个对象看它的shape,是不是和想的一样
sample = next(iter(train_db))
print('batch:', sample[0].shape, sample[1].shape)
# 继承
class MyDense(layers.Layer):
# 继承函数
# to replace standard layers.Dense()
def __init__(self, inp_dim, outp_dim):
# 初始化函数
super(MyDense, self).__init__()
self.kernel = self.add_variable('w', [inp_dim, outp_dim])
# self.bias = self.add_variable('b', [outp_dim])
def call(self, inputs, training=None):
x = inputs @ self.kernel
return x
class MyNetwork(keras.Model):
def __init__(self):
super(MyNetwork, self).__init__()
self.fc1 = MyDense(32 * 32 * 3, 256)
self.fc2 = MyDense(256, 128)
self.fc3 = MyDense(128, 64)
self.fc4 = MyDense(64, 32)
self.fc5 = MyDense(32, 10)
def __call__(self, inputs, training=None):
"""
:param inputs:[b,32,32,3]
:param training:
:return:
"""
x = tf.reshape(inputs, [-1, 32 * 32 * 3])
# [b, 32*32*3] => [b, 256]
x = self.fc1(x)
x = tf.nn.relu(x)
# [b, 256] => [b, 128]
x = self.fc2(x)
x = tf.nn.relu(x)
# [b, 128] => [b, 64]
x = self.fc3(x)
x = tf.nn.relu(x)
# [b, 64] => [b, 32]
x = self.fc4(x)
x = tf.nn.relu(x)
# [b, 32] => [b, 10]
x = self.fc5(x)
return x
network = MyNetwork()
network.compile(optimizers=optimizers.Adam(lr=1e-3),
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
network.fit(train_db, epochs=10, validation_data=test_db, validation_freq=1)
# 把训练好的模型保存下来
network.evaluate(test_db)
network.save_weights('ckpt/weights.ckpt')
del network
print('saved to ckpt/weights.ckpt')
network = MyNetwork()
network.compile(optimizers=optimizers.Adam(lr=1e-3),
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
network.load_weights('ckpt/weights.ckpt')
print('loaded weights from file.')
network.evaluate(test_db)
实现结果如下
可见准确率只有50%左右,在没有使用CNN情况下,这是很正常的,后续还会更新采用CNN进行此数据集的训练
如下模型的权重加载,准确率也是在50%左右,没有问题
可以在文件夹中查看权重保存的位置