Note
def inception(x, filter_size, layer_number):
"""由1x1,3x3,5x5以及Maxpool构成的Inception模块
Args:
x(Tesnsor): 输入张量
filter_size: 卷积核列表,总共有6个卷积核。
layer_number: Inception序号
Returns:
经过Inception模块后的Tensor
"""
layer_number = str(layer_number)
with K.name_scope('Inception_' + layer_number):
# 1x1卷积
with K.name_scope("conv_1x1"):
conv_1x1 = Conv2D(filters=filter_size[0], kernel_size=(1, 1),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='conv_1x1' + layer_number)(x)
# 3x3 Bottleneck layer(瓶颈模块)和 3x3 卷积
with K.name_scope('conv_3x3'):
conv_3x3 = Conv2D(filters=filter_size[1], kernel_size=(1, 1),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='conv_3x3_bottleneck' + layer_number)(x)
conv_3x3 = Conv2D(filters=filter_size[2], kernel_size=(3, 3),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='conv_3x3' + layer_number)(conv_3x3)
with K.name_scope('conv_5x5'):
# 5x5 Bottleneck layer(瓶颈层)和 5x5 卷积
conv_5x5 = Conv2D(filters=filter_size[3], kernel_size=(1, 1),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='conv_5x5_bottleneck' + layer_number)(x)
conv_5x5 = Conv2D(filters=filter_size[4], kernel_size=(5, 5),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='conv_5x5' + layer_number)(conv_5x5)
with K.name_scope('Max_Conv'):
# Max pooling(最大池化层) 和 Bottleneck layer(瓶颈层)
max_pool = MaxPooling2D(pool_size=3, strides=1, padding='same',
name='maxpool'+layer_number)(x)
max_pool = Conv2D(filters=filter_size[5], kernel_size=(1, 1),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='maxpool_conv1x1' + layer_number)(max_pool)
with K.name_scope('concatenate'):
# 将所有张量拼接在一起
x = concatenate([conv_1x1, conv_3x3, conv_5x5, max_pool], axis=-1)
# high/width上相同,channels拼接在一起
return x
ef aux_classifier(x, filter_size, layer_number):
"""辅助轴,输出softmax分类结果
Args:
x: 输入张量
filter_size: 卷积核列表,长度为3
layer_number: 序列
Returns: 辅助轴分类结果
"""
layer_number = str(layer_number)
with K.name_scope('aux_ckassifier'+layer_number):
# 平均池化层
x = AveragePooling2D(pool_size=3, strides=2, padding='same',
name='AveragePooling2D'+layer_number)(x)
# (0)1x1 卷积层
x = Conv2D(filters=filter_size[0], kernel_size=1, strides=1,
padding='valid', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='aux_conv' + layer_number)(x)
# 展平
x = Flatten()(x)
# (1)全连接层1
x = Dense(units=filter_size[1], activation='relu',
kernel_regularizer=l2(L2_RATE),
name='aux_dense1_' + layer_number)(x)
x = Dropout(0.7)(x)
# (3)softmax输出层
x = Dense(units=NUM_CLASS, activation='softmax',
kernel_regularizer=l2(L2_RATE),
name='aux_output' + layer_number)(x)
return x
def front(x, filter_size):
# (0)conv2d
x = Conv2D(filters=filter_size[0], kernel_size=5, strides=1,
padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE))(x)
x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x)
x = BatchNormalization(axis=-1)(x)
# (1)conv2d
x = Conv2D(filters=filter_size[1], kernel_size=1, strides=1,
padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE))(x)
# (2)conv2d
x = Conv2D(filters=filter_size[2], kernel_size=3, strides=1,
padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE))(x)
x = BatchNormalization(axis=-1)(x)
x = MaxPooling2D(pool_size=3,strides=1,padding='same')(x)
return x
知识点
K.name_scope()
Note
fit_generator
比较麻烦,可以自己百度一下,在这里就不用了K.name_scope
是可以用的,利用它组织好网络结构,使用时只需要训练的时候将TensorBoard
回调对象传回去import keras.backend as K
from keras.datasets import cifar10
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import concatenate, BatchNormalization, Flatten, Dropout
from keras.regularizers import l2
from keras.utils import to_categorical
from keras.models import Model
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
L2_RATE = 0.002
NUM_CLASS = 10
BATCH_SIZE = 128
EPOCH = 10
def inception(x, filter_size, layer_number):
"""由1x1,3x3,5x5以及Maxpool构成的Inception模块
Args:
x(Tesnsor): 输入张量
filter_size: 卷积核列表,总共有6个卷积核。
layer_number: Inception序号
Returns:
经过Inception模块后的Tensor
"""
layer_number = str(layer_number)
with K.name_scope('Inception_' + layer_number):
# 1x1卷积
with K.name_scope("conv_1x1"):
conv_1x1 = Conv2D(filters=filter_size[0], kernel_size=(1, 1),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='conv_1x1' + layer_number)(x)
# 3x3 Bottleneck layer(瓶颈模块)和 3x3 卷积
with K.name_scope('conv_3x3'):
conv_3x3 = Conv2D(filters=filter_size[1], kernel_size=(1, 1),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='conv_3x3_bottleneck' + layer_number)(x)
conv_3x3 = Conv2D(filters=filter_size[2], kernel_size=(3, 3),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='conv_3x3' + layer_number)(conv_3x3)
with K.name_scope('conv_5x5'):
# 5x5 Bottleneck layer(瓶颈层)和 5x5 卷积
conv_5x5 = Conv2D(filters=filter_size[3], kernel_size=(1, 1),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='conv_5x5_bottleneck' + layer_number)(x)
conv_5x5 = Conv2D(filters=filter_size[4], kernel_size=(5, 5),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='conv_5x5' + layer_number)(conv_5x5)
with K.name_scope('Max_Conv'):
# Max pooling(最大池化层) 和 Bottleneck layer(瓶颈层)
max_pool = MaxPooling2D(pool_size=3, strides=1, padding='same',
name='maxpool'+layer_number)(x)
max_pool = Conv2D(filters=filter_size[5], kernel_size=(1, 1),
strides=1, padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='maxpool_conv1x1' + layer_number)(max_pool)
with K.name_scope('concatenate'):
# 将所有张量拼接在一起
x = concatenate([conv_1x1, conv_3x3, conv_5x5, max_pool], axis=-1) # high/width上相同,channels拼接在一起
return x
def aux_classifier(x, filter_size, layer_number):
"""辅助轴,输出softmax分类结果
Args:
x: 输入张量
filter_size: 卷积核列表,长度为3
layer_number: 序列
Returns:
"""
layer_number = str(layer_number)
with K.name_scope('aux_ckassifier'+layer_number):
# 平均池化层
x = AveragePooling2D(pool_size=3, strides=2, padding='same',
name='AveragePooling2D'+layer_number)(x)
# (0)1x1 卷积层
x = Conv2D(filters=filter_size[0], kernel_size=1, strides=1,
padding='valid', activation='relu',
kernel_regularizer=l2(L2_RATE),
name='aux_conv' + layer_number)(x)
# 展平
x = Flatten()(x)
# (1)全连接层1
x = Dense(units=filter_size[1], activation='relu',
kernel_regularizer=l2(L2_RATE),
name='aux_dense1_' + layer_number)(x)
x = Dropout(0.7)(x)
# (3)softmax输出层
x = Dense(units=NUM_CLASS, activation='softmax',
kernel_regularizer=l2(L2_RATE),
name='aux_output' + layer_number)(x)
return x
def front(x, filter_size):
# (0)conv2d
x = Conv2D(filters=filter_size[0], kernel_size=5, strides=1,
padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE))(x)
x = MaxPooling2D(pool_size=3, strides=2, padding='same')(x)
x = BatchNormalization(axis=-1)(x)
# (1)conv2d
x = Conv2D(filters=filter_size[1], kernel_size=1, strides=1,
padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE))(x)
# (2)conv2d
x = Conv2D(filters=filter_size[2], kernel_size=3, strides=1,
padding='same', activation='relu',
kernel_regularizer=l2(L2_RATE))(x)
x = BatchNormalization(axis=-1)(x)
x = MaxPooling2D(pool_size=3, strides=1, padding='same')(x)
return x
def load():
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train, NUM_CLASS)
y_test = to_categorical(y_test, NUM_CLASS)
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
return x_train, y_train, x_test, y_test
def googlenet_model():
# 搭建模型
X_Input = Input(shape=input_shape, name='Input')
X = front(X_Input, [64, 64, 192])
# Inception_0
X = inception(X, filter_size=[64, 96, 128, 16, 32, 32], layer_number=0)
# Inception_1
X = inception(X, [128, 128, 192, 32, 96, 64], layer_number=1)
X = MaxPooling2D(pool_size=3, strides=2, padding='same')(X)
# Inception_2
X = inception(X, [192, 96, 208, 16, 48, 64], layer_number=2)
# aux1
aux_output_1 = aux_classifier(X, [128, 1024], layer_number=1)
# Inception_3
X = inception(X, [160, 112, 225, 24, 64, 64], layer_number=3)
# Inception_4
X = inception(X, [128, 128, 256, 24, 64, 64], layer_number=4)
# Incepetion_5
X = inception(X, [112, 144, 288, 32, 64, 64], layer_number=5)
# aux2
aux_output_2 = aux_classifier(X, [128, 1024], layer_number=2)
# Inception_6
X = inception(X, [256, 160, 320, 32, 128, 128], layer_number=6)
X = MaxPooling2D(pool_size=3, strides=2, padding='same')(X)
# Inception_7
X = inception(X, [256, 160, 320, 32, 128, 128], layer_number=7)
# Inception_8
X = inception(X, [386, 192, 384, 48, 128, 128], layer_number=8)
# 输出模型
X = AveragePooling2D(pool_size=4, strides=1, padding='valid')(X)
X = Flatten()(X)
X = Dropout(0.4)(X)
main_output = Dense(NUM_CLASS, activation='softmax', kernel_regularizer=l2(L2_RATE))(X)
# 定义多输入多输出模型
model = Model(inputs=X_Input, outputs=[main_output, aux_output_1, aux_output_2])
return model
if __name__ == '__main__':
x_train, y_train, x_test, y_test = load()
input_shape = x_train.shape[1:]
# 创建模型
GoogleNet = googlenet_model()
optimizer = Adam(epsilon=1e-08)
GoogleNet.compile(optimizer=optimizer, loss='categorical_crossentropy',
metrics=['accuracy'], loss_weights=[1, 0.3, 0.3])
GoogleNet.summary()
tfck = TensorBoard(log_dir='logs/GoogleNet')
GoogleNet.fit(x=x_train, y=[y_train, y_train, y_train], validation_data=(x_test, [y_test, y_test, y_test]),
epochs=EPOCH, callbacks=[tfck], batch_size=BATCH_SIZE)