keras 预训练模型finetune,多模型ensemble,修改loss函数,调节学习率

keras 预训练模型finetune,多模型ensemble,修改loss函数,调节学习率

  • 加载预训练模型并finetune
  • 修改loss函数
  • 两个网络做ensemble,进行网络训练,调节learning rate

加载预训练模型并finetune

这里使用的是keras库里的MobileNet模型,预训练权重也是官方自带的,最终finetune为自己需要的分类

from keras.layers import Activation, Convolution2D, Dropout, Conv2D, Average
from keras.layers import AveragePooling2D, BatchNormalization
from keras.layers import GlobalAveragePooling2D
from keras.models import Sequential
from keras.layers import Flatten
from keras.models import Model
from keras.layers import Input
from keras.layers import MaxPooling2D
from keras.layers import SeparableConv2D
from keras import layers
from keras.regularizers import l2
from keras.applications.mobilenet import MobileNet
from keras import models 
from keras import layers


input_shape = (224,224,3)
img_input = Input(input_shape)
num_classes = 7
def mobilenet(img_input, num_classes):
    #加载ImageNet的预训练权重
    base_model = MobileNet(weights='imagenet', include_top = False, input_tensor=img_input)
    #不冻结网络层,这里可以自己选择是否要训练某一层网络
    for layer in base_model.layers:
        layer.trainable = True
    last = base_model.output
    x = Conv2D(num_classes,(3,3),padding='same')(last)
    x = GlobalAveragePooling2D()(x)
    output = Activation('softmax',name='predictions1')(x)
    model = Model(img_input, output)
    return model

修改loss函数

from keras import backend as K
#这里可以自己修改想要的loss函数
def my_object(y_true,y_pred):
	l1 = -1*K.sum(y_true*K.log(y_pred))
	l2 = (1-y_true)*(y_pred)
	s = -1*K.sum(l2*l2*K.log(1-l2))
	return l1+s
#model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])
model.compile(optimizer='adam', loss=my_object,metrics=['accuracy'])
model.summary()

两个网络做ensemble,进行网络训练,调节learning rate

这里我做的是最简单的对两个网络的输出进行平均值操作
定义另外一个网络

def CNN(img_input, num_classes):
    x = Conv2D(8, (3, 3), strides=(1, 1),use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(8, (3, 3), strides=(1, 1),use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(num_classes, (3, 3), padding='same')(x)
    x = GlobalAveragePooling2D()(x)
    output = Activation('softmax',name='predictions')(x)
    model = Model(img_input, output)
    return model

还有一个就是上面提到的MobileNet,这里要注意一点,两个网络里面的结构命名不能重复,否则会报错

from keras.layers import Activation, Convolution2D, Dropout, Conv2D, Average
from keras.models import Model
def ensemble(models, img_input):
    outputs = [model.outputs[0] for model in models]
    y = Average()(outputs)
    model = Model(img_input,y,name='ensemble')
    return model

训练网络,并且调节学习速率

from keras.layers import Input
input_shape = (224,224,3)
img_input = Input(input_shape)
num_classes = 7
model1 = CNN(img_input,num_classes)
model2 = mobilenet(img_input,num_classes)
models = [model1,model2]
#将两个网络的结果融合起来
model = ensemble(models,img_input)
model.compile(optimizer='adam', loss=my_object,metrics=['accuracy'])
model.summary()
#开始训练 调节学习率
for index in range(45000):
	if index%15000 == 0 and index != 0:
		lr = K.get_value(model.optimizer.lr)
		K.set_value(model.optimizer.lr, lr*0.1)
		print("lr: ",lr)	
	#训练自己的数据
	log = model.train_on_batch(x_train,y_train)
	print(index,log)

你可能感兴趣的:(keras)