在keras中有创建模型时有两种模型:
顺序模型是多个网络层的线性堆叠。
可以通过将网络层实例的列表传递给 Sequential 的构造器,来创建一个 Sequential 模型:
from keras.models import Sequential
from keras.layers import Dense, Activation
model = Sequential([
Dense(32, input_shape=(784,)),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
也可以简单地使用 model.add() 方法将各层添加到模型中:
model = Sequential()
model.add(Dense(32, input_dim=784))
model.add(Activation('relu'))
通用模型可以设计比较复杂的网络
from tensorflow.keras import Model,Sequential,optimizers
from tensorflow.keras.layers import Activation,Dense,Conv2D,MaxPool2D,Input,Dropout,Flatten
inputs=Input([32,32,3])
x=Conv2D(6,kernel_size=(5,5),activation='relu')(inputs)
x=Dropout(0.5)(x)
x=MaxPool2D(pool_size=(2,2))(x)
x=Dense(84)(x)
out=Dense(10,activation='softmax')(x)
#实例化导入输入输出
model=Model(inputs,out)
本次实验使用Model模型,所以在函数编写上会和Sequential不太一样。
x=Dense(64,activation='relu')(x)
x=Conv2D(64,kernel_size=(5,5),activation='relu',strides=2,padding='same')
#导入数据
data=tf.keras.datasets.cifar10
(x_train,y_train),(x_test,y_test)=data.load_data()
#对训练标签和测试标签进行one-hot编码
y_train=tf.keras.utils.to_categorical(y_train, num_classes=10)
y_test=tf.keras.utils.to_categorical(y_test, num_classes=10)
#对样本进行归一化,减小计算量
x_train=x_train/255.0
x_test=x_test/255.0
注:这里要对训练标签和测试标签进行升维,因为cifar10是个10分类的问题,我们要将标签数字[0-9]转化为one-hot向量。
做个展示:
data=tf.keras.datasets.cifar10
(x_train,y_train),(x_test,y_test)=data.load_data()
print("y_train",y_train.shape)
y_train=tf.keras.utils.to_categorical(y_train, num_classes=10)
y_test=tf.keras.utils.to_categorical(y_test, num_classes=10)
print("y_train",y_train.shape)
#result
y_train (50000, 1)
y_train (50000, 10)
#建立模型
inputs=Input([32,32,3])
x=Conv2D(6,kernel_size=(5,5),activation='relu')(inputs)
x=Dropout(0.5)(x)
x=MaxPool2D(pool_size=(2,2))(x)
x=Conv2D(16,kernel_size=(5,5),activation='relu')(x)
x=Dropout(0.5)(x)
x=MaxPool2D(pool_size=(2,2))(x)
x=Conv2D(120,kernel_size=(5,5),activation='relu')(x)
x=Flatten()(x)
x=Dense(120)(x)
x=Dense(84)(x)
out=Dense(10,activation='softmax')(x)
model=Model(inputs,out)
编译需要设定优化器,损失函数,度量环节。
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['sparse_categorical_accuracy'])
如果想改变优化器的参数比如学习率等,可以自己设定优化器,不用按照系统默认参数,如下:
adam=optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=16,epochs=5,validation_split=0.25)
model.summary()
"""
Author: Wang Qiang
Date:2020/12/11
"""
import tensorflow as tf
from tensorflow.keras import Model,Sequential,optimizers
from tensorflow.keras.layers import Activation,Dense,Conv2D,MaxPool2D,Input,Dropout,Flatten
import os
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.inf)
#导入数据
data=tf.keras.datasets.cifar10
(x_train,y_train),(x_test,y_test)=data.load_data()
#对训练标签和测试标签进行one-hot编码
y_train=tf.keras.utils.to_categorical(y_train, num_classes=10)
y_test=tf.keras.utils.to_categorical(y_test, num_classes=10)
#对样本进行归一化,减小计算量
x_train=x_train/255.0
x_test=x_test/255.0
#建立模型
inputs=Input([32,32,3])
x=Conv2D(6,kernel_size=(5,5),activation='relu')(inputs)
x=Dropout(0.5)(x)
x=MaxPool2D(pool_size=(2,2))(x)
x=Conv2D(16,kernel_size=(5,5),activation='relu')(x)
x=Dropout(0.5)(x)
x=MaxPool2D(pool_size=(2,2))(x)
x=Conv2D(120,kernel_size=(5,5),activation='relu')(x)
x=Flatten()(x)
x=Dense(120)(x)
x=Dense(84)(x)
out=Dense(10,activation='softmax')(x)
model=Model(inputs,out)
#设定优化器
adam=optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
#保存模型
# 先定义出存放模型的路径和文件名,命名为ckpt文件
# 生成ckpt文件时,会同步生成索引表,所以通过判断是否已经有索引表,就知道是不是已经保存过模型参数
save_path = "./checkpoint/lenet5.ckpt"
if os.path.exists(save_path + '.index'):
print('-------------load the model-----------------')
# 如果有索引表,就说明保存过模型,则可以调用load_weights读取模型参数
model.load_weights(save_path)
cp_callback=tf.keras.callbacks.ModelCheckpoint(filepath=save_path,monitor='val_loss',verbose=0,
save_best_only=True,save_weights_only=True)
history = model.fit(x_train, y_train, validation_split=0.25, epochs=5, batch_size=16,
callbacks=[cp_callback])
model.summary()
#print(model.trainable_variables)
file = open('./weights.txt', 'w')
for v in model.trainable_variables:
file.write(str(v.name) + '\n')
file.write(str(v.shape) + '\n')
file.write(str(v.numpy()) + '\n')
file.close()
# 绘制训练 & 验证的准确率值
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# 绘制训练 & 验证的损失值
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
Epoch 1/5
2344/2344 [==============================] - 17s 7ms/step - loss: 1.1881 - accuracy: 0.5783 - val_loss: 1.4333 - val_accuracy: 0.4962
Epoch 2/5
2344/2344 [==============================] - 16s 7ms/step - loss: 1.1655 - accuracy: 0.5863 - val_loss: 1.4670 - val_accuracy: 0.4942
Epoch 3/5
2344/2344 [==============================] - 15s 6ms/step - loss: 1.1525 - accuracy: 0.5939 - val_loss: 1.4527 - val_accuracy: 0.4910
Epoch 4/5
2344/2344 [==============================] - 15s 7ms/step - loss: 1.1376 - accuracy: 0.5993 - val_loss: 1.5371 - val_accuracy: 0.4561
Epoch 5/5
2344/2344 [==============================] - 15s 6ms/step - loss: 1.1290 - accuracy: 0.5981 - val_loss: 1.4024 - val_accuracy: 0.5110