《python机器学习基础教程》代码实现线性模型--回归

线性回归 (LinearRegression)

from sklearn.base import TransformerMixin
from sklearn.linear_model import LinearRegression
import mglearn
from sklearn.model_selection import train_test_split
X,y=mglearn.datasets.load_extended_boston()
X_train,X_test,y_train ,y_test=train_test_split(X,y,random_state=0)
lr= LinearRegression().fit(X_train,y_train)
print("lr.coef_:{}".format(lr.coef_))
print("lr.intercept_:{}".format(lr.intercept_))
print("lr.training_score:{}".format(lr.score(X_train,y_train)))
print("Lr.test_score:{}".format(lr.score(X_test,y_test)))

岭回归 (Ridge)

from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import mglearn
import matplotlib.pyplot as plt
X,y=mglearn.datasets.load_extended_boston()
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state=0)
ridge = Ridge().fit(X_train,y_train)
print("Train set score:{:.2f}".format(ridge.score(X_train,y_train)))
print("Test set score:{:.2f}".format(ridge.score(X_test,y_test)))
ridge10 = Ridge(alpha=10).fit(X_train,y_train)
print("Train set score:{:.2f}".format(ridge10.score(X_train,y_train)))
print("Test set score:{:.2f}".format(ridge10.score(X_test,y_test)))
ridge01 = Ridge(alpha=0.1).fit(X_train,y_train)
print("Train set score:{:.2f}".format(ridge01.score(X_train,y_train)))
print("Test set score:{:.2f}".format(ridge01.score(X_test,y_test)))
plt.plot(ridge.coef_,'s',label="Ridge alpha=1")
plt.plot(ridge10.coef_,'^',label="Ridge alpha=10")
plt.plot(ridge01.coef_,'v',label="Ridge alpha=0.1")
lr=LinearRegression().fit(X_train,y_train)
plt.plot(lr.coef_,'o',label='LinearRegession')
plt.xlabel("Coefficient index")
plt.ylabel("Coefficient magnitude")
plt.hlines(0,0,len(lr.coef_))
plt.ylim(-25,25)
plt.legend()
plt.show()

Lasso

from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import mglearn
import numpy as np
X,y=mglearn.datasets.load_extended_boston()
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state=0)
lasso=Lasso().fit(X_train,y_train)
print("Training set score:{:.2f}".format(lasso.score(X_train,y_train)))
print("Test set score:{:.2f}".format(lasso.score(X_test,y_test)))
print("NUmber of feature used:{}".format(np.sum(lasso.coef_!=0)))
lasso001=Lasso(alpha=0.01,max_iter=100000).fit(X_train,y_train)
print("Training set score:{:.2f}".format(lasso001.score(X_train,y_train)))
print("Test set score:{:.2f}".format(lasso001.score(X_test,y_test)))
print("NUmber of feature used:{}".format(np.sum(lasso001.coef_!=0)))
lasso00001=Lasso(alpha=0.0001,max_iter=100000).fit(X_train,y_train)
print("Training set score:{:.2f}".format(lasso00001.score(X_train,y_train)))
print("Test set score:{:.2f}".format(lasso00001.score(X_test,y_test)))
print("NUmber of feature used:{}".format(np.sum(lasso00001.coef_!=0)))
plt.plot(lasso.coef_,'s',label="Lasso alpha=1")
plt.plot(lasso001.coef_,'^',label="Lasso alpha=0.001")
plt.plot(lasso00001.coef_,'v',label="Lasso alpha=0.0001")
ridge01=Ridge(alpha=0.1).fit(X_train,y_train)
plt.plot(ridge01.coef_,'o',label="Ridge alpha=0.1")
plt.legend(ncol=2,loc=(0,1.05))
plt.ylim(-25,25)
plt.xlabel("Coefficient index")
plt.ylabel("Coefficient magnitude")
plt.show()

你可能感兴趣的:(python机器学习基础教程,机器学习,python,回归)