KerasClassifier
或者KerasRegressor
进行打包供scikit-learn使用,利用fit()
进行训练,例如:def create_model():
...
return model
model = KerasClassifier(build_fn=create_model)
在打包时,可以提供模型需要的参数,这些参数包括:
fit(self, x, y, batch_size=32, epochs=10, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0)
中所有的参数。注意,参数名一定要一致。例如 def create_model():
...
return model
model = KerasClassifier(build_fn=create_model, epochs=10)
def create_model(dropout_rate=0.0):
...
return model
model = KerasClassifier(build_fn=create_model, dropout_rate=0.2)
GridSearchCV
可以为我们实现Grid Search。GridSearchCV
的评分标准,可以通过scoring
参数设置param_grid
是一个字典,表示为 [参数名:候选值],GridSearchCV将会组合这些参数进行评估最优。这些参数包括训练参数(epochs,batch_size等)以及模型参数(kernel_size, pool_size, num_filters等等等等)n_jobs
默认为1,表示将使用一个进程,将其设置为-1,表示将调用最大数量的进行(我在实验过程中,如果设置为-1,就在无限等待,所以以下代码n_jogs的值均为1)GridSearchCV
通过Cross validation来评估每个模型。#举个例子
param_grid = dict(epochs=[10,20,30])
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
GridSearchCV
进行调参,这些例子将会使用一个小型数据集Pima Indians onset of diabetes classification dataset,这是一个二元分类问题,判断是否有糖尿病,数据集说明请看这里。EarlyStopping
这里的回调函数来监控训练过程,因此Epochs参数的选择可能不是那么重要了。import numpy as np
from sklearn.model_selection import GridSearchCV
from keras import models
from keras import layers
from keras import optimizers
from keras.wrappers import scikit_learn
# 模型创建函数,KerasClassifier需要这个函数
def create_model():
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation='relu', input_shape=(8,)))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
return model
# 导入数据
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# 分割数据为输入X, 和目标Y
X = dataset[:, :8]
Y = dataset[:, 8]
# 归一化
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# 设置种子,为了可复现(这个无关紧要)
seed = 7
np.random.seed(seed)
# 创建模型
model = scikit_learn.KerasClassifier(build_fn=create_model, verbose=0)
# 设置参数候选值
batch_size = [8,16]
epochs = [10,50]
# 创建GridSearchCV,并训练
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
# 打印结果
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7799479166666666 using {'batch_size': 8, 'epochs': 50}
0.763021 (0.041504) with: {'batch_size': 8, 'epochs': 10}
0.779948 (0.034104) with: {'batch_size': 8, 'epochs': 50}
0.744792 (0.030647) with: {'batch_size': 16, 'epochs': 10}
0.769531 (0.039836) with: {'batch_size': 16, 'epochs': 50}
# 模型创建函数,KerasClassifier需要这个函数
def create_model(optimizer='adam'):
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation='relu', input_shape=(8,)))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['acc'])
return model
# 导入数据
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# 分割数据为输入X, 和目标Y
X = dataset[:, :8]
Y = dataset[:, 8]
# 归一化
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# 设置种子,为了可复现(这个无关紧要)
seed = 7
np.random.seed(seed)
# 创建模型
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
# 设置参数候选值
optimizer = ['sgd', 'rmsprop', 'adam', 'adagrad']
# 创建GridSearchCV,并训练
param_grid = dict(optimizer=optimizer)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
# 打印结果
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7682291666666666 using {'optimizer': 'rmsprop'}
0.765625 (0.037603) with: {'optimizer': 'sgd'}
0.768229 (0.025582) with: {'optimizer': 'rmsprop'}
0.764323 (0.031466) with: {'optimizer': 'adam'}
0.760417 (0.034104) with: {'optimizer': 'adagrad'}
# 模型创建函数,KerasClassifier需要这个函数
def create_model(learning_rate=0.01, momentum=0):
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation='relu', input_shape=(8,)))
model.add(layers.Dense(1, activation='sigmoid'))
optimizer = optimizers.SGD(lr=learning_rate, momentum=momentum)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['acc'])
return model
# 导入数据
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# 分割数据为输入X, 和目标Y
X = dataset[:, :8]
Y = dataset[:, 8]
# 归一化
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# 设置种子,为了可复现(这个无关紧要)
seed = 7
np.random.seed(seed)
# 创建模型
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
# 设置参数候选值
learning_rate = [0.001, 0.01]
momentum = [0.0, 0.2, 0.4]
# 创建GridSearchCV,并训练
param_grid = dict(learning_rate=learning_rate,momentum=momentum)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
# 打印结果
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7747395833333334 using {'learning_rate': 0.01, 'momentum': 0.0}
0.640625 (0.030425) with: {'learning_rate': 0.001, 'momentum': 0.0}
0.692708 (0.025780) with: {'learning_rate': 0.001, 'momentum': 0.2}
0.686198 (0.017566) with: {'learning_rate': 0.001, 'momentum': 0.4}
0.774740 (0.035132) with: {'learning_rate': 0.01, 'momentum': 0.0}
0.766927 (0.021710) with: {'learning_rate': 0.01, 'momentum': 0.2}
0.769531 (0.033299) with: {'learning_rate': 0.01, 'momentum': 0.4}
# 模型创建函数,KerasClassifier需要这个函数
def create_model(init_mode='random_uniform'):
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation='relu', kernel_initializer=init_mode,input_shape=(8,)))
model.add(layers.Dense(1, activation='sigmoid', kernel_initializer=init_mode))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
return model
# 导入数据
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# 分割数据为输入X, 和目标Y
X = dataset[:, :8]
Y = dataset[:, 8]
# 归一化
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# 设置种子,为了可复现(这个无关紧要)
seed = 7
np.random.seed(seed)
# 创建模型
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
# 设置参数候选值
init_mode = ['he_normal', 'he_uniform', 'glorot_normal', 'glorot_uniform', 'lecun_normal']
# 创建GridSearchCV,并训练
param_grid = dict(init_mode=init_mode)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
# 打印结果
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7760416666666666 using {'init_mode': 'he_normal'}
0.776042 (0.024360) with: {'init_mode': 'he_normal'}
0.764323 (0.025976) with: {'init_mode': 'he_uniform'}
0.769531 (0.025315) with: {'init_mode': 'glorot_normal'}
0.761719 (0.035943) with: {'init_mode': 'glorot_uniform'}
0.763021 (0.038582) with: {'init_mode': 'lecun_normal'}
# 模型创建函数,KerasClassifier需要这个函数
def create_model(activation='relu'):
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation=activation,input_shape=(8,)))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
return model
# 导入数据
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# 分割数据为输入X, 和目标Y
X = dataset[:, :8]
Y = dataset[:, 8]
# 归一化
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# 设置种子,为了可复现(这个无关紧要)
seed = 7
np.random.seed(seed)
# 创建模型
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
# 设置参数候选值
activation = ['relu', 'tanh', 'softmax', 'linear', 'hard_sigmoid', 'softplus', 'selu']
# 创建GridSearchCV,并训练
param_grid = dict(activation=activation)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
# 打印结果
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7786458333333334 using {'activation': 'softplus'}
0.773438 (0.035516) with: {'activation': 'relu'}
0.766927 (0.024774) with: {'activation': 'tanh'}
0.760417 (0.017566) with: {'activation': 'softmax'}
0.774740 (0.032106) with: {'activation': 'linear'}
0.760417 (0.033502) with: {'activation': 'hard_sigmoid'}
0.778646 (0.022628) with: {'activation': 'softplus'}
0.770833 (0.025780) with: {'activation': 'selu'}
# 模型创建函数,KerasClassifier需要这个函数
def create_model(dropout=0.0):
# create model
model = models.Sequential()
model.add(layers.Dense(12, activation='relu',input_shape=(8,)))
model.add(layers.Dropout(dropout))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
return model
# 导入数据
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# 分割数据为输入X, 和目标Y
X = dataset[:, :8]
Y = dataset[:, 8]
# 归一化
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# 设置种子,为了可复现(这个无关紧要)
seed = 7
np.random.seed(seed)
# 创建模型
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
# 设置参数候选值
dropout = [0.2, 0.5]
# 创建GridSearchCV,并训练
param_grid = dict(dropout=dropout)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
# 打印结果
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7708333333333334 using {'dropout': 0.5}
0.769531 (0.029232) with: {'dropout': 0.2}
0.770833 (0.032264) with: {'dropout': 0.5}
# 模型创建函数,KerasClassifier需要这个函数
def create_model(num_neurons=1):
# create model
model = models.Sequential()
model.add(layers.Dense(num_neurons, activation='relu',input_shape=(8,)))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
return model
# 导入数据
dataset = np.loadtxt('pima-indians-diabetes.csv', delimiter=',')
# 分割数据为输入X, 和目标Y
X = dataset[:, :8]
Y = dataset[:, 8]
# 归一化
means = np.mean(X, axis=0)
X -= means
stds = np.std(X, axis=0)
X /= stds
# 设置种子,为了可复现(这个无关紧要)
seed = 7
np.random.seed(seed)
# 创建模型
model = scikit_learn.KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose=0)
# 设置参数候选值
num_neurons = [1, 5, 10, 15, 20]
# 创建GridSearchCV,并训练
param_grid = dict(num_neurons=num_neurons)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=1)
grid_result = grid.fit(X, Y)
# 打印结果
print('Best: {} using {}'.format(grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, std, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, std, param))
Best: 0.7708333333333334 using {'num_neurons': 10}
0.651042 (0.024774) with: {'num_neurons': 1}
0.757812 (0.019918) with: {'num_neurons': 5}
0.770833 (0.038450) with: {'num_neurons': 10}
0.769531 (0.027251) with: {'num_neurons': 15}
0.764323 (0.032734) with: {'num_neurons': 20}