L1正则化、L2正则化以及dropout正则化的keras实现

# L2正则化
from keras import regularizers

model = models.Sequential()
# l2(0.001)是指该层权重矩阵每个系数都会使网络总损失增加0.001*weight_coefficient_value
# 由于这个惩罚项只在训练时添加,因此这个网络的训练损失会比测试损失大很多
model.add(layers.Dense(16,kernel_regularizer=regularizers.l2(0.001),
                       activation='relu',input_shape=(10000,)))
model.add(layers.Dense(16,kernel_regularizer=regularizers.l2(0.001),
                       activation='relu',input_shape=(10000,)))
model.add(layers.Dense(1,activation='sigmoid'))

# L1正则化
regularizers.l1(0.001)

# 同时做L1和L2正则化
regularizers.l1_l2(l1=0.001,l2=0.001)


# dropout正则化
model = models.Sequential()
model.add(layers.Dense(16,activation='relu',input_shape=(10000,))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(16,activation='relu')
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1,activation='sigmoid')

 

你可能感兴趣的:(Keras深度学习笔记)