sklearn LinearRegression线性回归函数

LinearRegression线性回归函数

  • 属性和参数
  • 示例
  • LinearRegression
    • Lasso
    • Lasso + alpha
    • Ridge
    • Ridge + alpha

属性和参数

  • regr.coef_:权值
  • regr.intercept_:偏置

示例

LinearRegression

import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model, model_selection

diabetes = datasets.load_diabetes()
X_train,X_test,y_train,y_test = model_selection.train_test_split(diabetes.data,diabetes.target,test_size=0.25,random_state=0) 
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
print('Coefficients:%s, intercept %.2f'%(regr.coef_,regr.intercept_))
print("Residual sum of squares: %.2f"% np.mean((regr.predict(X_test) - y_test) ** 2))
print('Score: %.2f' % regr.score(X_test, y_test))

Lasso

X_train, X_test, y_train, y_test = model_selection.train_test_split(diabetes.data, diabetes.target, test_size=0.25, random_state=0)
regr = linear_model.Lasso()
regr.fit(X_train, y_train)
print('Coefficients:%s, intercept %.2f' % (regr.coef_, regr.intercept_))
print("Residual sum of squares: %.2f" % np.mean((regr.predict(X_test) - y_test) ** 2))
print('Score: %.2f' % regr.score(X_test, y_test))

Lasso + alpha

X_train, X_test, y_train, y_test = model_selection.train_test_split(diabetes.data, diabetes.target, test_size=0.25, random_state=0)
alphas = [0.000001, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]
scores = []
for i, alpha in enumerate(alphas):
    regr = linear_model.Lasso(alpha=alpha)
    regr.fit(X_train, y_train)
    scores.append(regr.score(X_test, y_test))
# print(scores)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(alphas, scores)
ax.set_xlabel(r"$\alpha$")
ax.set_ylabel(r"score")
ax.set_xscale('log')
ax.set_title("Lasso")
plt.show()

sklearn LinearRegression线性回归函数_第1张图片

Ridge

X_train, X_test, y_train, y_test = model_selection.train_test_split(diabetes.data, diabetes.target, test_size=0.25, random_state=0)
regr = linear_model.Ridge()
regr.fit(X_train, y_train)
print('Coefficients:%s, intercept %.2f' % (regr.coef_, regr.intercept_))
Li = np.mean((regr.predict(X_test) - y_test) ** 2)
print("Residual sum of squares: %.2f" % Li)
print('Score: %.2f' % regr.score(X_test, y_test))

Ridge + alpha

X_train, X_test, y_train, y_test = model_selection.train_test_split(diabetes.data, diabetes.target, test_size=0.25, random_state=0)
alphas=[0.01,0.02,0.05,0.1,0.2,0.5,1,2,5,10,20,50,100,200,500,1000]
scores=[]
for i,alpha in enumerate(alphas):
    regr = linear_model.Ridge(alpha=alpha)
    regr.fit(X_train, y_train)
    scores.append(regr.score(X_test, y_test))

fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(alphas,scores)
ax.set_xlabel(r"$\alpha$")
ax.set_ylabel(r"score")
ax.set_xscale('log')
ax.set_title("Ridge")
plt.show()

sklearn LinearRegression线性回归函数_第2张图片

你可能感兴趣的:(sklearn)