一、基础网络
#-*- coding: UTF-8 -*-
import numpy as np
from matplotlib import pyplot as plt
#-*- coding: UTF-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers import Conv2D
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
# 加载MNIST数据集
from keras.datasets import mnist
(X_train, y_train),(X_test, y_test) = mnist.load_data()
# 数据集格式规范化(单通道图像)
img_rows, img_cols = X_train[0].shape[0], X_train[0].shape[1]
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# 标准化输入数据
X_train = X_train.astype(‘float32’)/255.0
X_test = X_test.astype(‘float32’)/255.0
# 对标签进行独热编码
n_classes = len(set(y_train))
y_train = to_categorical(y_train, n_classes)
y_test = to_categorical(y_test, n_classes)
# 定义网络结构
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation=‘relu’, input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3,3), activation=‘relu’))
model.add(Conv2D(128, kernel_size=(3,3), activation=‘relu’))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(n_classes, activation=‘softmax’))
opt = Adam()
model.compile(loss=‘categorical_crossentropy’, optimizer=opt, metrics=[‘accuracy’])
# 设置网络超参数与回调函数
batch_size = 128
n_epochs = 11
callbacks = [EarlyStopping(monitor=‘val_acc’, patience=5)]
# 训练模型
model.fit(X_train,
y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_split=0.2,
callbacks=callbacks,
verbose=1
)
# 保存参数
model.save_weights(‘result.model’, overwrite=True)
# 在测试集上显示结果
score = model.evaluate(X_test, y_test, verbose=0)
print(‘Test loss:’, score[0])
print(‘Test accuracy:’, score[1])
# 模型预测
preds = model.predict(X_test)
n_examples = 10
for i in range(n_examples):
ax = plt.subplot(2, n_examples, i+1)
plt.imshow(X_test[i,:,:,0], cmap=‘gray’)
plt.title(‘label:{}\nPredicted: {}’.format(np.argmax(y_test[i]), np.argmax(preds[i])))
plt.axis(‘off’)
plt.show()
# 绘制10个错误分类的图像及其标签
plt.figure(figsize=(15,15))
j = 1
for i in range(len(y_test)):
if(j==10):
break
label = np.argmax(y_test[i])
pred = np.argmax(pred[i])
if label != pred:
ax = plt.subplot(2, n_examples, j+1)
plt.imshow(X_test[i,:,:,0], cmap=‘gray’)
plt.title(‘label:{}\nPredicted: {}’.format(label, pred))
plt.axis(‘off’)
plt.show()
一种流行的CNN优化技术是池化。池化是一种用智能方法减少可训练参数的方法。两个最常用的池化技术是平均池化和最大池化。
池化技术有两大好处,一方面限制了网络的复杂性,以防止过拟合出现;另一方面将大大减少训练时间和推理时间。
#-*- coding: UTF-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint,EarlyStopping
# 加载MNIST数据集
from keras.datasets import mnist
(X_train, y_train),(X_test, y_test) = mnist.load_data()
# 数据集格式规范化(单通道图像)
img_rows, img_cols = X_train[0].shape[0], X_train[0].shape[1]
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# 标准化输入数据
X_train = X_train.astype(‘float32’)/255.0
X_test = X_test.astype(‘float32’)/255.0
# 对标签进行独热编码
n_classes = len(set(y_train))
y_train = to_categorical(y_train, n_classes)
y_test = to_categorical(y_test, n_classes)
# 定义网络结构
model = Sequential()
model.add(Conv2D(64, kernel_size=(3,3), activation=‘relu’, input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128, kernel_size=(3,3), activation=‘relu’, padding=‘same’))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(256, kernel_size=(3,3), activation=‘relu’, padding=‘same’))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation=‘relu’))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation=‘softmax’))
opt = Adam()
model.compile(loss=‘categorical_crossentropy’, optimizer=opt, metrics=[‘accuracy’])
# 设置网络超参数与回调函数
batch_size = 128
n_epochs = 1
callbacks = [EarlyStopping(monitor=‘val_acc’, patience=5)]
# 训练模型
model.fit(X_train,
y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_split=0.2,
callbacks=callbacks,
verbose=1
)
# 保存参数
model.save_weights(‘result.model’, overwrite=True)
# 在测试集上显示结果
score = model.evaluate(X_test, y_test, verbose=0)
print(‘Test loss:’, score[0])
print(‘Test accuracy:’, score[1])
在该方案中增加了对已实现的CNN添加池化层,同时增加卷积层中滤波器数量。
CNN的另一个众所周知的优化技术是批量归一化。该技术使得每批数据的输入分布对网络的影响较小,因此模型可以更好地泛化和更快地训练网络。
#-*- coding: UTF-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
# 加载MNIST数据集
from keras.datasets import mnist
(X_train, y_train),(X_test, y_test) = mnist.load_data()
# 数据集格式规范化(单通道图像)
img_rows, img_cols = X_train[0].shape[0], X_train[0].shape[1]
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# 标准化输入数据
X_train = X_train.astype(‘float32’)/255.0
X_test = X_test.astype(‘float32’)/255.0
# 对标签进行独热编码
n_classes = len(set(y_train))
y_train = to_categorical(y_train, n_classes)
y_test = to_categorical(y_test, n_classes)
# 定义网络结构
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation=‘relu’, input_shape=input_shape, padding=‘same’))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size=(3,3), activation=‘relu’, padding=‘same’))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=(3,3), activation=‘relu’, padding=‘same’))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation=‘relu’, padding=‘same’))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation=‘relu’))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation=‘softmax’))
opt = Adam()
model.compile(loss=‘categorical_crossentropy’, optimizer=opt, metrics=[‘accuracy’])
# 设置网络超参数与回调函数
batch_size = 128
n_epochs = 10
callbacks = [EarlyStopping(monitor=‘val_acc’, patience=5)]
# 训练模型
model.fit(X_train,
y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_split=0.2,
callbacks=callbacks,
verbose=1
)
# 保存参数
model.save_weights(‘result.model’, overwrite=True)
# 在测试集上显示结果
score = model.evaluate(X_test, y_test, verbose=0)
print(‘Test loss:’, score[0])
print(‘Test accuracy:’, score[1])
通常,具有批量归一化的模型在多个周期之后在验证精度上领先于没有批量归一化的模型,而且收敛更快(训练周期少)。
当数据集在像素级别上含有较少的粒度信息时,可以尝试用更大的值作为步长。通过增加步长,卷积层在每个轴上跳过更多的输入变量,因此可以加速收敛,而不会有太大的性能损失。
#-*- coding: UTF-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
# 加载MNIST数据集
from keras.datasets import mnist
(X_train, y_train),(X_test, y_test) = mnist.load_data()
# 数据集格式规范化(单通道图像)
img_rows, img_cols = X_train[0].shape[0], X_train[0].shape[1]
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# 标准化输入数据
X_train = X_train.astype(‘float32’)/255.0
X_test = X_test.astype(‘float32’)/255.0
# 对标签进行独热编码
n_classes = len(set(y_train))
y_train = to_categorical(y_train, n_classes)
y_test = to_categorical(y_test, n_classes)
# 定义网络结构
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation=‘relu’, input_shape=input_shape, padding=‘same’))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size=(3,3), activation=‘relu’, padding=‘same’, strides=(2,2)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=(3,3), activation=‘relu’, padding=‘same’, strides=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation=‘relu’, padding=‘same’, strides=(2,2)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation=‘relu’))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation=‘softmax’))
opt = Adam()
model.compile(loss=‘categorical_crossentropy’, optimizer=opt, metrics=[‘accuracy’])
# 设置网络超参数与回调函数
batch_size = 128
n_epochs = 10
callbacks = [EarlyStopping(monitor=‘val_acc’, patience=5)]
# 训练模型
model.fit(X_train,
y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_split=0.2,
callbacks=callbacks,
verbose=1
)
# 保存参数
model.save_weights(‘result.model’, overwrite=True)
# 在测试集上显示结果
score = model.evaluate(X_test, y_test, verbose=0)
print(‘Test loss:’, score[0])
print(‘Test accuracy:’, score[1])
对CNN来说,权重和偏置的初始化是非常重要的。通过选择正确的初始化可以加速网络的收敛。
#-*- coding: UTF-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
# 加载MNIST数据集
from keras.datasets import mnist
(X_train, y_train),(X_test, y_test) = mnist.load_data()
# 数据集格式规范化(单通道图像)
img_rows, img_cols = X_train[0].shape[0], X_train[0].shape[1]
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# 标准化输入数据
X_train = X_train.astype(‘float32’)/255.0
X_test = X_test.astype(‘float32’)/255.0
# 对标签进行独热编码
n_classes = len(set(y_train))
y_train = to_categorical(y_train, n_classes)
y_test = to_categorical(y_test, n_classes)
# 定义网络结构
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation=‘relu’, input_shape=input_shape, padding=‘same’, kernel_initializer=‘glorot_uniform’))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size=(3,3), activation=‘relu’, padding=‘same’, strides=(2,2), kernel_initializer=‘glorot_uniform’))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=(3,3), activation=‘relu’, padding=‘same’, strides=(2,2), kernel_initializer=‘glorot_uniform’))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation=‘relu’, padding=‘same’, strides=(2,2), kernel_initializer=‘glorot_uniform’))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation=‘relu’))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation=‘softmax’))
opt = Adam()
model.compile(loss=‘categorical_crossentropy’, optimizer=opt, metrics=[‘accuracy’])
# 设置网络超参数与回调函数
batch_size = 128
n_epochs = 10
callbacks = [EarlyStopping(monitor=‘val_acc’, patience=5)]
# 训练模型
model.fit(X_train,
y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_split=0.2,
callbacks=callbacks,
verbose=1
)
# 保存参数
# model.save_weights(‘result.model’, overwrite=True)
# 在测试集上显示结果
score = model.evaluate(X_test, y_test, verbose=0)
print(‘Test loss:’, score[0])
print(‘Test accuracy:’, score[1])
对于二维卷积层,Glorot均匀分布权重(也称Xavier均匀初始化)通常用作默认值。
了解更多初始化