sklearn处理回归问题
import numpy as np
import matplotlib.pyplot as plt
# 生成数据
def gen_data(x1, x2):
y = np.sin(x1) * 1/2 + np.cos(x2) * 1/2 + 0.1 * x1
return y
def load_data():
x1_train = np.linspace(0, 50, 500)
x2_train = np.linspace(-10, 10, 500)
data_train = np.array([[x1, x2, gen_data(x1, x2) + np.random.random(1) - 0.5] for x1, x2 in zip(x1_train, x2_train)])
x1_test = np.linspace(0, 50, 100) + np.random.random(100) * 0.5
x2_test = np.linspace(-10, 10, 100) + 0.02 * np.random.random(100)
data_test = np.array([[x1, x2, gen_data(x1, x2)] for x1, x2 in zip(x1_test, x2_test)])
return data_train, data_test
train, test = load_data()
# train的前两列是x,后一列是y,这里的y有随机噪声
x_train, y_train = train[:, :2], train[:, 2]
x_test, y_test = test[:, :2], test[:, 2] # 同上,但这里的y没有噪声
# 回归部分
def try_different_method(model, method):
model.fit(x_train, y_train)
score = model.score(x_test, y_test)
result = model.predict(x_test)
plt.figure()
plt.plot(np.arange(len(result)), y_test, "go-", label="True value")
plt.plot(np.arange(len(result)), result, "ro-", label="Predict value")
plt.title(f"method:{method}---score:{score}")
plt.legend(loc="best")
plt.show()
# 方法选择
# 1.决策树回归
from sklearn import tree
model_decision_tree_regression = tree.DecisionTreeRegressor()
# 2.线性回归
from sklearn.linear_model import LinearRegression
model_linear_regression = LinearRegression()
# 3.SVM回归
from sklearn import svm
model_svm = svm.SVR()
# 4.kNN回归
from sklearn import neighbors
model_k_neighbor = neighbors.KNeighborsRegressor()
# 5.随机森林回归
from sklearn import ensemble
model_random_forest_regressor = ensemble.RandomForestRegressor(n_estimators=20) # 使用20个决策树
# 6.Adaboost回归
from sklearn import ensemble
model_adaboost_regressor = ensemble.AdaBoostRegressor(n_estimators=50) # 这里使用50个决策树
# 7.GBRT回归
from sklearn import ensemble
model_gradient_boosting_regressor = ensemble.GradientBoostingRegressor(n_estimators=100) # 这里使用100个决策树
# 8.Bagging回归
from sklearn import ensemble
model_bagging_regressor = ensemble.BaggingRegressor()
# 9.ExtraTree极端随机数回归
from sklearn.tree import ExtraTreeRegressor
model_extra_tree_regressor = ExtraTreeRegressor()
sklearn处理分类问题
import numpy as np
import pandas
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso,LassoCV,LassoLarsCV
from sklearn.svm import SVC
from sklearn.svm import LinearSVC #支持向量机
from sklearn.naive_bayes import MultinomialNB #朴素也贝斯
from sklearn.tree import DecisionTreeClassifier #决策树
from sklearn.ensemble import RandomForestClassifier #随机森林
from sklearn.ensemble import GradientBoostingClassifier #GBDT
from xgboost import XGBClassifier #xgboost
def modelReturn(model,name):
model = RandomForestClassifier();
model.fit(x_train,y_train);
predict = model.predict(x_test);
trueNum = 0;
for i in range(len(y_test)):
if(y_test[i]==predict[i]):
trueNum+=1;
print(name,":",trueNum/len(y_test));
# modelReturn(model,"随机森林")
## 特征工程
# 读取数据
dataframe = pandas.read_csv("export.csv");
#获取 CVS中的值
dataset = dataframe.values;
#本身数据有53列 下标我0开始 取52列 53列是标签
X = dataset[:,0:53].astype(np.float);
Y =dataset[:,53];
x_train,x_test,y_train,y_test = train_test_split(X,Y);
# 1 laoss
model = Lasso(alpha=0.005);
#调节aplha 可以实现对拟合的程度
modelReturn(model,"laoss")
# 2 决策树
model = DecisionTreeClassifier();
modelReturn(model,"决策树")
# 3 随机森林
model =RandomForestClassifier();
modelReturn(model,"随机森林")
# 4 朴素也贝斯
model = MultinomialNB();
modelReturn(model,"朴素也贝斯")
# 5 支持向量机
model = LinearSVC();
modelReturn(model,"支持向量机")
# 6 SVM
model = SVC()
modelReturn(model,"SVM")
# 7 KNN
model = KNeighborsClassifier(n_neighbors=11);
modelReturn(model,"KNN")
# 8 Logist
model =LogisticRegression();
modelReturn(model,"Logist")
# 线性回归
from sklearn import linear_model
linear = linear_model.LinearRegression()
linear.fit(x,y)
print ("linear‘s score: ",linear.score(x,y))
print ("w:",linear.coef_)
print ("b:",linear.intercept_)
print ("predict: ",linear.predict(test_vector))
# 9 xgboost
model =XGBClassifier();
modelReturn(model,"xgboost")
# 10 GBDT
model =GradientBoostingClassifier();
modelReturn(model,"GBDT")
"""
model.fit(x_train,y_train);
predict =model.predict(x_test);
trueNum =0;
print(predict)
for i in range(len(y_test)):
if ((abs(y_test[i])-abs(predict[i])< 0.5)):
trueNum += 1;
print(trueNum/len(y_test));
"""
"""
#降到27个维度
pca = PCA(n_components=27);
xTrainPca = pca.fit_transform(x_train);
xTestPca = pca.fit_transform(x_test);
log =LogisticRegression();
log.fit(xTrainPca,y_train);
print("准确率:",log.score(xTestPca,y_test));
"""
"""
#降到50个维度
pca = PCA(n_components=50);
xTrainPca = pca.fit_transform(x_train);
xTestPca = pca.fit_transform(x_test);
knn = KNeighborsClassifier(n_neighbors=11);
knn.fit(xTrainPca,y_train);
print(knn.score(xTestPca,y_test))
"""