深度学习2 使用keras构建卷积神经网络

训练过程保存与绘图 支持minist数据集和自定义数据集

1、函数库的导入

from keras.models import Sequential
from keras.layers import Dense,Flatten,GlobalAveragePooling2D
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.models import load_model

2、模型的构建与加载

if True:
    base_num=16
    model = Sequential()
    model.add(Conv2D(base_num, (3, 3), input_shape=(28,28,1), padding='same', activation='relu'))
    model.add(Conv2D(base_num, (3, 3),  padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(2*base_num, (3, 3),  padding='same', activation='relu'))
    model.add(Conv2D(2*base_num, (3, 3),  padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(2*base_num, (3, 3),  padding='same', activation='relu'))
    model.add(GlobalAveragePooling2D())#(Flatten())#
    model.add(Dense(80, activation='relu'))
    model.add(Dense(10, activation='softmax'))#tf.nn.log_softmax
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
    model.summary()#显示模型
else:
    model=load_model('mnist.h5')

3、数据的加载与预处理  

使数据的格式满足卷积神经网络的输入和输出需求

输入:batch_size,w,h,c【原来的数据是灰度图像,不包含通道数,格式是batch_size,w,h,因此进行格式转换】

输出:独热码  【原来的标签格式是具体的数字,因此进行格式转换】

import tensorflow as tf
#minist手写数据集的加载
train, test = tf.keras.datasets.mnist.load_data()
trainimg, trainlabel = train
testimg, testlabel = test
print(trainimg.shape,testimg.shape)
#数据预处理
trainimg=trainimg.reshape(trainimg.shape[0],28,28,1)
testimg=testimg.reshape(testimg.shape[0],28,28,1)
trainlabel = tf.keras.utils.to_categorical(trainlabel, num_classes=10)
testlabel = tf.keras.utils.to_categorical(testlabel, num_classes=10)

4、模型的训练与评估、保存

1、模型训练

2、EarlyStopping 提前终止训练

3、ModelCheckpoint  模型检测点保存最佳模型

4、History 历史数据保存

from keras.callbacks import History,ModelCheckpoint,EarlyStopping
ModelCheckpoint=ModelCheckpoint('best_model.h5', monitor='acc', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
EarlyStopping=EarlyStopping(monitor='val_loss', patience=2, verbose=2, mode='auto')
loss_history = History()
#模型训练
model.fit(trainimg,trainlabel,epochs=100,batch_size=32,
          callbacks=[ModelCheckpoint,EarlyStopping,loss_history],
          validation_data=(testimg, testlabel ),
          verbose=2)
#模型评估
loss,acc=model.evaluate(testimg, testlabel)
print('loss:',loss,'acc:',acc)
#模型保存
model.save('mnist.h5')
#历史数据绘图
import pandas as pd
d=pd.DataFrame(data=history.history)
d.to_csv('history.csv')

5、训练过程绘图

History 历史数据绘图

draw_history=True
if draw_history:
    from matplotlib import pyplot as plt
    plt.plot()
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['acc_train', 'acc_val'], loc='upper left')
    plt.savefig('train.png', dpi=300, pad_inches = 0)
    plt.show()

6、全部代码

from keras.models import Sequential
from keras.layers import Dense,Flatten,GlobalAveragePooling2D
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.models import load_model
if True:
    base_num=16
    model = Sequential()
    model.add(Conv2D(base_num, (3, 3), input_shape=(28,28,1), padding='same', activation='relu'))
    model.add(Conv2D(base_num, (3, 3),  padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(2*base_num, (3, 3),  padding='same', activation='relu'))
    model.add(Conv2D(2*base_num, (3, 3),  padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(2*base_num, (3, 3),  padding='same', activation='relu'))
    model.add(GlobalAveragePooling2D())#(Flatten())#
    model.add(Dense(80, activation='relu'))
    model.add(Dense(10, activation='softmax'))#tf.nn.log_softmax
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
    model.summary()#显示模型
else:
    model=load_model('mnist.h5')
import tensorflow as tf
#minist手写数据集的加载
train, test = tf.keras.datasets.mnist.load_data()
trainimg, trainlabel = train
testimg, testlabel = test

#数据预处理
trainimg=trainimg.reshape(trainimg.shape[0],28,28,1)
testimg=testimg.reshape(testimg.shape[0],28,28,1)
trainlabel = tf.keras.utils.to_categorical(trainlabel, num_classes=10)
testlabel = tf.keras.utils.to_categorical(testlabel, num_classes=10)

print(trainimg.shape, trainlabel.shape)
print(testimg.shape, testlabel.shape)

#---------------------------------------------------------------------------------
from keras.callbacks import History,ModelCheckpoint,EarlyStopping
modelCheckpoint=ModelCheckpoint('best_model.h5', monitor='acc', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
earlyStopping=EarlyStopping(monitor='val_loss', patience=2, verbose=2, mode='auto')
history = History()
#模型训练
model.fit(trainimg,trainlabel,epochs=100,batch_size=32,
          callbacks=[modelCheckpoint,earlyStopping,history],
          validation_data=(testimg, testlabel ),
          verbose=2)
#模型评估
loss,acc=model.evaluate(testimg, testlabel)
print('loss:',loss,'acc:',acc)
#模型保存
model.save('mnist.h5')

#历史数据保存
import pandas as pd
d=pd.DataFrame(data=history.history)
d.to_csv('history.csv')

#历史数据绘图
draw_history=True
if draw_history:
    from matplotlib import pyplot as plt
    plt.plot()
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['acc_train', 'acc_val'], loc='upper left')
    plt.savefig('train.png', dpi=300, pad_inches = 0)
    plt.show()

7、代码2 体系完整便于调参的卷积神经网络 读取自定义图片

其中数据集的划分可以查看https://blog.csdn.net/a486259/article/details/100511399,代码复制后即可运行


from keras.models import Sequential
from keras.layers import Dense,Flatten,GlobalAveragePooling2D
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.models import load_model
data_path='目标数据集'
shape=(300,300)
batch_size=100
model_file='trained model.h5'
seed=12
#---------------------------------------------------------------------------------
#数据读取
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(rescale=1.0 / 255)#()#
train_gen = datagen.flow_from_directory(data_path+'-训练',
                                                target_size=shape,
                                                batch_size=batch_size,
                                                class_mode='categorical',
                                                seed=seed,
                                                shuffle=True)
test_gen = datagen.flow_from_directory(data_path+'-测试',
                                                target_size=shape,
                                                batch_size=batch_size,
                                                class_mode='categorical',
                                                seed=seed,
                                                shuffle=False)
val_gen = datagen.flow_from_directory(data_path+'-训练',
                                                target_size=shape,
                                                batch_size=batch_size,
                                                class_mode='categorical',
                                                seed=seed,
                                                shuffle=False)
#---------------------------------------------------------------------------------
#模型构建
if True:
    base_num=16
    model = Sequential()
    model.add(Conv2D(base_num, (3, 3), input_shape=(*shape,3), padding='same', activation='relu'))
    model.add(Conv2D(base_num, (3, 3),  padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(2*base_num, (3, 3),  padding='same', activation='relu'))
    model.add(Conv2D(2*base_num, (3, 3),  padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(2*base_num, (3, 3),  padding='same', activation='relu'))
    model.add(GlobalAveragePooling2D())#(Flatten())#
    model.add(Dense(80, activation='relu'))
    model.add(Dense(11, activation='softmax'))#tf.nn.log_softmax
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
    model.summary()#显示模型
else:
    model=load_model(model_file)
#---------------------------------------------------------------------------------
#训练过程
from keras.callbacks import History,ModelCheckpoint,EarlyStopping
modelCheckpoint=ModelCheckpoint('best %s'%model_file, monitor='acc', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
earlyStopping=EarlyStopping(monitor='val_loss', patience=2, verbose=2, mode='auto')
history = History()
callbacks=[modelCheckpoint,earlyStopping,history]
#模型训练
model.fit_generator(generator=train_gen,    
                    steps_per_epoch=len(train_gen.classes)/train_gen.batch_size,
                    validation_data=val_gen,
                    validation_steps=len(val_gen.classes)/val_gen.batch_size,
                    callbacks=callbacks,
                    epochs=30)

#---------------------------------------------------------------------------------
#训练结果
#模型评估
loss,acc=model.evaluate_generator(test_gen)
print('loss:',loss,'acc:',acc)
#模型保存
model.save(model_file)
 
#历史数据保存
import pandas as pd
d=pd.DataFrame(data=history.history)
d.to_csv('%s history.csv'%model_file)
 
#历史数据绘图
draw_history=True
if draw_history:
    from matplotlib import pyplot as plt
    plt.plot()
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['acc_train', 'acc_val'], loc='upper left')
    plt.savefig('%s train.png'%model_file, dpi=300, pad_inches = 0)
    plt.show()

 

你可能感兴趣的:(tf2与keras深度学习)