参数说明
属性说明
方法说明
from sklearn.neural_network import MLPClassifier
X = [[0., 0.], [1., 1.]]
y = [0, 1]
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(X, y)
MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=(5, 2), learning_rate='constant',
learning_rate_init=0.001, max_iter=200, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
print(clf.predict([[2., 2.], [-1., -2.]]))
# array([1, 0])
print(clf.predict_proba([[2., 2.], [-1.,- 2.]]))
# array([[ 1.96718015e-004, 9.99803282e-001],
#[ 1.00000000e+000, 4.67017947e-144]])
例子一:iris三分类器
from sklearn.neural_network import MLPClassifier
from BP_Net import normalized, load_csv
import numpy as np
if __name__ == '__main__':
X, Y = load_csv()
X = normalized(X)
Y[np.where(Y == 1)] = 2
Y[np.where(Y == 0.5)] = 1
# Y = normalized(Y)
"""训练集90个数据"""
train_x = np.hstack((X[:, 0:30], X[:, 50:80], X[:, 100:130]))
train_x = train_x.T
train_y = np.hstack((Y[:, 0:30], Y[:, 50:80], Y[:, 100:130]))
"""测试集60个数据"""
test_x = np.hstack((X[:, 30:50], X[:, 80:100], X[:, 130:150]))
test_x = test_x.T
test_y = np.hstack((Y[:, 30:50], Y[:, 80:100], Y[:, 130:150]))
# 首先,创建一个多分类模型对象 类似于Java的类调用
# 括号中填写多个参数,如果不写,则使用默认值,我们一般要构建隐层结构,调试正则化参数,设置最大迭代次数
mlp = MLPClassifier(hidden_layer_sizes=(10,), alpha=0.01, max_iter=10000)
# 调用fit函数就可以进行模型训练,一般的调用模型函数的训练方法都是fit()
# print(train_x.shape)
# print(train_y.ravel().shape)
mlp.fit(train_x, train_y.ravel()) # 这里y值需要注意,还原成一维数组
# 模型就这样训练好了,而后我们可以调用多种函数来获取训练好的参数
# 比如获取准确率
print('训练集的准确率是:', mlp.score(test_x, test_y.ravel()))
# 比如输出当前的代价值
print('训练集的代价值是:', mlp.loss_)
# 比如输出每个theta的权重
print('训练集的权重值是:', mlp.coefs_)
结果
训练集的准确率是: 1.0
训练集的代价值是: 0.05801992526160142
训练集的权重值是: [array([[ 1.31733465e+00, -2.93551147e-01, 1.03982794e+00,
-3.04550107e-04, -8.51421595e-01, 6.62205673e-29,
2.11835613e-15, 8.08139861e-01, -1.23505242e-06,
-5.51082075e-01],
[-5.79154193e-01, 1.36001217e+00, -1.12722611e+00,
1.33249760e-15, 9.80136514e-01, -2.05913410e-02,
-1.15404725e-02, -6.38918400e-01, 4.31038716e-05,
-2.21295744e-01],
[ 1.35044205e+00, -1.04249680e+00, 8.52670431e-01,
-8.03660618e-28, -1.06280029e+00, 6.88455552e-26,
-1.49624759e-06, 1.17238104e+00, 6.78174401e-05,
-1.43132703e-02],
[ 1.64364171e+00, -9.59131062e-01, 1.31785190e+00,
-3.96164651e-05, -9.56257701e-01, -1.44515932e-02,
3.95722198e-28, 6.42796159e-01, -1.20803943e-05,
-2.97102291e-04]], dtype=float32), array([[ 1.0823956e+00],
[-1.6924484e+00],
[ 1.4192194e+00],
[-3.2202732e-02],
[-1.4481077e+00],
[-1.8761203e-03],
[ 1.9695616e-04],
[ 1.6395217e+00],
[-4.0499594e-02],
[ 7.0304796e-02]], dtype=float32)]
例子二:手写数字识别
from sklearn.neural_network import MLPClassifier
# from sklearn.datasets import fetch_mldata
import numpy as np
import pickle
import gzip
# 加载数据
# mnist = fetch_mldata("MNIST original")
with gzip.open("mnist.pkl.gz") as fp:
training_data, valid_data, test_data = pickle.load(fp, encoding='bytes')
x_training_data, y_training_data = training_data
x_valid_data, y_valid_data = valid_data
x_test_data, y_test_data = test_data
classes = np.unique(y_test_data)
# 将验证集和训练集合并
x_training_data_final = np.vstack((x_training_data, x_valid_data))
y_training_data_final = np.append(y_training_data, y_valid_data)
# 设置神经网络模型参数
# mlp = MLPClassifier(solver='lbfgs', activation='relu',alpha=1e-4,hidden_layer_sizes=(50,50), random_state=1,max_iter=10,verbose=10,learning_rate_init=.1)
# 使用solver='lbfgs',准确率为79%,比较适合小(少于几千)数据集来说,且使用的是全训练集训练,比较消耗内存
# mlp = MLPClassifier(solver='adam', activation='relu',alpha=1e-4,hidden_layer_sizes=(50,50), random_state=1,max_iter=10,verbose=10,learning_rate_init=.1)
# 使用solver='adam',准确率只有67%
mlp = MLPClassifier(solver='sgd', activation='relu', alpha=1e-4, hidden_layer_sizes=(50, 50), random_state=1,
max_iter=100, verbose=True, learning_rate_init=.1)
# 使用solver='sgd',准确率为98%,且每次训练都会分batch,消耗更小的内存
# 训练模型
mlp.fit(x_training_data_final, y_training_data_final)
# 查看模型结果
print(mlp.score(x_test_data, y_test_data))
print(mlp.n_layers_)
print(mlp.n_iter_)
print(mlp.loss_)
print(mlp.out_activation_)
结果
Iteration 1, loss = 0.31443422
Iteration 2, loss = 0.13076474
Iteration 3, loss = 0.09742518
Iteration 4, loss = 0.08100330
Iteration 5, loss = 0.06801912
Iteration 6, loss = 0.06218105
Iteration 7, loss = 0.05417376
Iteration 8, loss = 0.04865488
Iteration 9, loss = 0.04225277
Iteration 10, loss = 0.03999533
Iteration 11, loss = 0.03581450
Iteration 12, loss = 0.03553377
Iteration 13, loss = 0.02851309
Iteration 14, loss = 0.02561775
Iteration 15, loss = 0.02522932
Iteration 16, loss = 0.02467297
Iteration 17, loss = 0.02161946
Iteration 18, loss = 0.02143663
Iteration 19, loss = 0.02414556
Iteration 20, loss = 0.02093072
Iteration 21, loss = 0.02043619
Iteration 22, loss = 0.02022548
Iteration 23, loss = 0.01801227
Iteration 24, loss = 0.01937727
Iteration 25, loss = 0.02075462
Iteration 26, loss = 0.01892496
Iteration 27, loss = 0.01754461
Iteration 28, loss = 0.01478817
Iteration 29, loss = 0.01456456
Iteration 30, loss = 0.01663158
Iteration 31, loss = 0.01425532
Iteration 32, loss = 0.01702378
Iteration 33, loss = 0.01619255
Iteration 34, loss = 0.01835025
Iteration 35, loss = 0.01920801
Iteration 36, loss = 0.01692277
Iteration 37, loss = 0.01762001
Iteration 38, loss = 0.01061955
Iteration 39, loss = 0.01233185
Iteration 40, loss = 0.01695161
Iteration 41, loss = 0.01152016
Iteration 42, loss = 0.01516701
Iteration 43, loss = 0.02044881
Iteration 44, loss = 0.01657610
Iteration 45, loss = 0.01598418
Iteration 46, loss = 0.01809941
Iteration 47, loss = 0.02124500
Iteration 48, loss = 0.01304665
Iteration 49, loss = 0.00769560
Iteration 50, loss = 0.01211744
Iteration 51, loss = 0.01232973
Iteration 52, loss = 0.01403902
Iteration 53, loss = 0.01696003
Iteration 54, loss = 0.01544194
Iteration 55, loss = 0.01286083
Iteration 56, loss = 0.01341688
Iteration 57, loss = 0.00990281
Iteration 58, loss = 0.00681194
Iteration 59, loss = 0.01107953
Iteration 60, loss = 0.01781647
Iteration 61, loss = 0.01664884
Iteration 62, loss = 0.01788967
Iteration 63, loss = 0.02161008
Iteration 64, loss = 0.01799941
Iteration 65, loss = 0.02034798
Iteration 66, loss = 0.01451698
Iteration 67, loss = 0.01982461
Iteration 68, loss = 0.01926753
Iteration 69, loss = 0.01591123
Training loss did not improve more than tol=0.000100 for 10 consecutive epochs. Stopping.
0.972
4
69
0.0159112293736664
softmax