sklearn中使用MLPRegressor实现回归

使用的数据集是上篇文章生成的test.txt文件经过一些处理后得到的数据集文件。

# -*- coding: utf-8 -*-
#--------------------------
#from sklearn.neural_network import MLPClassifier #用于多分类的情况
#SciKit-learn库 可以创建神经网络
#MLP是多层感知器,使用的是前馈神经网络
#只支持交叉熵损失,使用mlp.predict_proda(),最小化交叉熵,同时给似然估计p(y|x)
#支持多分类使用softmax
#mlp = MLPClassifier(hidden_layer_sizes=(13, 13, 13), max_iter=500)
#mlp.fit(xtrain, ytrain)
#--------------------------
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
a = np.loadtxt("./test.txt")
scaler = preprocessing.StandardScaler().fit(a)
a = scaler.transform(a)
#print(a.shape) #get value from txt
x = a[:,0] #get the first column from a
y = a[:,1] ##get the second column from a
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
X_train = X_train.reshape(-1, 1)
X_test = X_test.reshape(-1, 1)
y_train = y_train
y_test = y_test
# alpha:L2的参数:MLP是可以支持正则化的,默认为L2,具体参数需要调整
# hidden_layer_sizes=(5, 2) hidden层2层,第一层5个神经元,第二层2个神经元),2层隐藏层,也就有3层神经网络
#clf = MLPRegressor(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1)
#'identity',无操作激活,对实现线性瓶颈很有用,返回f(x)= x
#'logistic',logistic sigmoid函数,返回f(x)= 1 /(1 + exp(-x))。
#'tanh',双曲tan函数,返回f(x)= tanh(x)。
#'relu',整流后的线性单位函数,返回f(x)= max(0,x)
model_mlp = MLPRegressor(
    hidden_layer_sizes=(6,2),  activation='relu', solver='adam', alpha=0.0001, batch_size='auto',
    learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=5000, shuffle=True,
    random_state=1, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,
    early_stopping=False,beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model_mlp.fit(X_train, y_train)
import time
import datetime
startTime = time.time()
x1 = x.reshape(-1,1)
mlp_score=model_mlp.score(x1,y)
print('sklearn多层感知器-回归模型得分',mlp_score)#预测正确/总数
result = model_mlp.predict(x1)
stopTime = time.time()
sumTime = stopTime - startTime
print('总时间是:', sumTime)
# inp = [[ele] for ele in X_train]
# pre = clf.predict(inp)
# #print(pre)
plt.plot(X_train, y_train, 'bo')
plt.plot(x1, result, 'ro')
plt.show()

实验结果显示:

sklearn中使用MLPRegressor实现回归_第1张图片

sklearn多层感知器-回归模型得分 0.9969340019776348

你可能感兴趣的:(算法)