本人原来的笔记本配置不行,跑不起来,换了台式电脑运行正常,具体台式配置会另外写一篇安装tensorflow2.4.0教程
直接上代码,一共重复训练7次
from tensorflow.python.keras.datasets import cifar100
from tensorflow import keras
import tensorflow
class CifarCNN(object):
#编写模型 两层卷积层+两层池化层+全连接神经网络层
model = keras.models.Sequential([
#原输入图片[None,32,32,3]
#卷积层1: 32个 5*5*3的filter, 步长设置1,填充设same
#输出[None,32,32,3]
keras.layers.Conv2D(32, kernel_size=5, strides=1,padding='same',data_format='channels_last',activation='relu'),
#池化:1: 2*2窗口, 步长2,
#池化层输出[None,16,16,32]
keras.layers.MaxPool2D(pool_size=2, strides=2, padding='same'),
#卷积层2
#输入[None,16,16,32] 输出[None 16,16,64]
keras.layers.Conv2D(64, kernel_size=5,strides=1, padding='same',data_format='channels_last',activation='relu'),
#池化层,输入[None,16,16,64],因为观察窗口是2*2
# 输出[None, 8,8,64]
keras.layers.MaxPool2D(pool_size=2, strides=2, padding='same'),
#把形状拉平[None, 8,8,64] ->[None,8*8*64],展平
keras.layers.Flatten(),
#全连接层神经网络,1024个神经元,激活函数relu
keras.layers.Dense(1024,activation='relu'),
#100种种类,100个神经元神经网络,激活函数softmax
keras.layers.Dense(100,activation='softmax')
])
#获取训练集
def __init__(self):
(self.x_train, self.y_train),(self.x_test, self.y_test) = cifar100.load_data()
print(self.x_train.shape)
print(self.x_test.shape)
#对数据归一化
self.x_train = self.x_train/255.0
self.x_test = self.x_test/255.0
#编译模型,梯度下降使用adam
def Cifa_compile(self):
CifarCNN.model.compile(optimizer='Adam',loss='sparse_categorical_crossentropy',metrics='accuracy')
return None
def Cifa_fit(self):
his = CifarCNN.model.fit(self.x_train,self.y_train,batch_size=16,epochs=7,)
return his.history
def evalue(self):
test_loss,test_acc = CifarCNN.model.evaluate(self.x_test,self.y_test)
return test_loss,test_acc
if __name__ == '__main__':
cifa = CifarCNN()
cifa.Cifa_compile()
his = cifa.Cifa_fit()
print(type(his))
print("history",his)
# loss,acc = cifa.evalue()
#print()
print(123)
# print(loss,acc)
print(cifa.model.summary())
迭代7次训练效果,准确率由0.099提升到0.812
(50000, 32, 32, 3)
(10000, 32, 32, 3)
Epoch 1/7
3125/3125 [==============================] - 18s 5ms/step - loss: 3.9635 - accuracy: 0.0997
Epoch 2/7
3125/3125 [==============================] - 16s 5ms/step - loss: 2.8395 - accuracy: 0.2975
Epoch 3/7
3125/3125 [==============================] - 16s 5ms/step - loss: 2.2998 - accuracy: 0.4071
Epoch 4/7
3125/3125 [==============================] - 16s 5ms/step - loss: 1.8054 - accuracy: 0.5158
Epoch 5/7
3125/3125 [==============================] - 16s 5ms/step - loss: 1.3521 - accuracy: 0.6278
Epoch 6/7
3125/3125 [==============================] - 16s 5ms/step - loss: 0.9408 - accuracy: 0.7327
Epoch 7/7
3125/3125 [==============================] - 16s 5ms/step - loss: 0.6392 - accuracy: 0.8126
history {'loss': [3.5707077980041504, 2.7866790294647217, 2.320335626602173, 1.8748992681503296, 1.4380824565887451, 1.041365146636963, 0.7357279062271118], 'accuracy': [0.16234000027179718, 0.3077999949455261, 0.40327998995780945, 0.4996599853038788, 0.6030799746513367, 0.7033799886703491, 0.7821199893951416]}
CNN模型各层情况:
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (16, 32, 32, 32) 2432
_________________________________________________________________
max_pooling2d (MaxPooling2D) (16, 16, 16, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (16, 16, 16, 64) 51264
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (16, 8, 8, 64) 0
_________________________________________________________________
flatten (Flatten) (16, 4096) 0
_________________________________________________________________
dense (Dense) (16, 1024) 4195328
_________________________________________________________________
dense_1 (Dense) (16, 100) 102500
=================================================================
Total params: 4,351,524
Trainable params: 4,351,524
Non-trainable params: 0
_________________________________________________________________
None
同时如果图片训练集太少,需要对图片增强,可以使用API:
ImageDataGenerator.flow()对图片增强,比如旋转、亮度增加、shift等操作,增加训练集数据量
具体代码如下:因为运行慢,被我注释掉了,如果使用知己打开即可。
from tensorflow.python.keras.datasets import cifar100
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
class CifarCNN(object):
#编写模型 两层卷积层+两层池化层+全连接神经网络层
model = keras.models.Sequential([
#原输入图片[None,32,32,3]
#卷积层1: 32个 5*5*3的filter, 步长设置1,填充设same
#输出[None,32,32,3]
#神经网络中加入正则化,防止过拟合,通常用L2,对卷积层所有权重L2正则化,减小权重,降低网络复杂度
#训练效果很好,但测试效果不好,此时代表过拟合,需要加入正则化参数
# keras.layers.Conv2D(64, kernel_size=5, strides=1,padding='same',data_format='channels_last',
# activation='relu',kernel_regularizer=keras.regularizers.L2(0.01)),
keras.layers.Conv2D(32, kernel_size=5, strides=1, padding='same', data_format='channels_last',
activation='relu'),
#池化:1: 2*2窗口, 步长2,
#池化层输出[None,16,16,32]
keras.layers.MaxPool2D(pool_size=2, strides=2, padding='same'),
#卷积层2
#输入[None,16,16,32] 输出[None 16,16,64]
keras.layers.Conv2D(64, kernel_size=5,strides=1, padding='same',
data_format='channels_last',activation='relu'),
#池化层,输入[None,16,16,64],因为观察窗口是2*2
# 输出[None, 8,8,64]
keras.layers.MaxPool2D(pool_size=2, strides=2, padding='same'),
#把形状拉平[None, 8,8,64] ->[None,8*8*64],展平
keras.layers.Flatten(),
#全连接层神经网络,1024个神经元,激活函数relu
keras.layers.Dense(1024,activation='relu'),
# keras.layers.Dense(1024, activation='relu', kernel_regularizer=keras.regularizers.L2(0.01)),
#100种种类,100个神经元神经网络,激活函数softmax
keras.layers.Dense(100,activation='softmax')
# keras.layers.Dense(100, activation='softmax', kernel_regularizer=keras.regularizers.L2(0.01))
])
#获取训练集
def __init__(self):
(self.x_train, self.y_train),(self.x_test, self.y_test) = cifar100.load_data()
print(self.x_train.shape)
print(self.x_test.shape)
#对数据归一化
self.x_train = self.x_train/255.0
self.x_test = self.x_test/255.0
#编译模型,梯度下降使用adam
def Cifa_compile(self):
CifarCNN.model.compile(optimizer='Adam',loss='sparse_categorical_crossentropy',metrics='accuracy')
return None
#模型训练
def Cifa_fit(self):
his = CifarCNN.model.fit(self.x_train,self.y_train,batch_size=32,epochs=5,)
print(his)
return None
# #模型训练增强,这边对输入的图片数据增强后再训练
# #模型训练增强,通常用于训练图片数量少的情况下使用,否则没有必要,速度太慢。。。
# #Image DataGenerator对图片增强,比如旋转、亮度增加、shift等操作
# def Cifa_fit(self):
# datagen = ImageDataGenerator(
# featurewise_center=True,
# featurewise_std_normalization=True,
# rotation_range=20, #20度内随机旋转
# width_shift_range=0.2, #宽方向平移0.2
# height_shift_range=0.2,
# horizontal_flip=True #翻转
# )
# #迭代次数
# epochs = 1
# num=0
# for e in range(epochs):
# #发现个问题,ImageDataGenerator.flow()每跑一次,速度超级慢,不知道是什么原因?
# for x_batch, y_batch in datagen.flow(self.x_train,self.y_train, batch_size=64):
# print('epochs: %d /%d' % (e+1,epochs))
# num+=1
# print('flow后会有多少次?num: ', num)
# his = CifarCNN.model.fit(x_batch, y_batch, batch_size=32)
# print(his)
# return None
def evalue(self):
test_loss,test_acc = CifarCNN.model.evaluate(self.x_test,self.y_test)
return test_loss,test_acc
if __name__ == '__main__':
cifa = CifarCNN()
cifa.Cifa_compile()
cifa.Cifa_fit()
# loss,acc = cifa.evalue()
#print()
print(123)
# print(loss,acc)
print(cifa.model.summary())