kaggle的一个基本回归问题。我们主要试验一下xgboost模型。下面是简单的调节部分至于超参数的选择其实是需要GridSearchCV等方法来选择,此处只是简单的
交叉验证目的是验证不同的模型融合方法
#这个题属性其实就两种 类别属性和数值属性 和Tantic不太一样我们可以统一处理
#处理类别属性但是值却是数字的特征
#MSSubClass 的值其实应该是一个category,是住宅属性 可是取值却是整数
#所以我们考虑将这个类的类型重新确定
def processMSSubClass(df):
df['MSSubClass']=df['MSSubClass'].astype(str)
return df
def processOverallQual(df):
df['OverallQual'] = df['OverallQual'].astype(str)
return df
def processOverallCond(df):
df['OverallCond'] = df['OverallCond'].astype(str)
return df
#这里属性比较多我们可以先用one-hot编码生成大量衍生数据
#将所有的我们把所有的category数据,都给One-Hot了
def processDummies(df):
df = processMSSubClass(df)
df = processOverallQual(df)
df = processOverallCond(df)
df = pd.get_dummies(df)
return df
#处理数值数据的缺失值
#由于缺失值比较多我们处理下缺失值同时要看缺失值具体含义
#此处这些缺失并没有实际意义所以用平均值来填充
def processMissData(df):
meancols = df.dropna().mean()
df = df.fillna(meancols)
#df.isnull().sum().sum()
return df
#标准化数据
#将numeric_cols = all_df.columns[all_df.dtypes != 'object']
#df.iloc[:,:10].describe()可以研究具体数值
def processDataScaled(df):
nummeric_cols = df.columns[df.dtypes!='object']
numeric_mean = df.loc[:,nummeric_cols].mean()
numeric_std = df.loc[:,nummeric_cols].std()
df.loc[:,nummeric_cols] = (df.loc[:,nummeric_cols]-numeric_mean)/numeric_std
return df
def processData(df):
df = processDummies(df)
df = processMissData(df)
df = processDataScaled(df)
return df
模型融合
最简单的Bagging:
def BaggingModel():
input_df = pd.read_csv('train.csv', index_col=0)
submit_df = pd.read_csv('test.csv', index_col=0)
train_y = np.log1p(input_df.pop('SalePrice'))#训练标签
df = pd.concat([input_df,submit_df])
df = dataprocess.processData(df)
input_df = df.loc[input_df.index]
submit_df = df.loc[submit_df.index]
train_X = input_df.values
test_X = submit_df.values
#在这里,我们用CV结果来测试不同的分类器个数对最后结果的影响。
# 注意,我们在部署Bagging的时候,要把它的函数base_estimator里填上你的小分类器(ridge)
params = [1, 10, 15, 20, 25, 30, 40]
test_scores = []
ridge = Ridge(15)#岭回归alpha=15
params = [1, 10, 15, 20, 25, 30, 40]
test_scores = []
for param in params:
clf = BaggingRegressor(n_estimators=param, base_estimator=ridge)
test_score = np.sqrt(-cross_val_score(clf, train_X, train_y, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
plt.plot(params, test_scores)
plt.title("n_estimator vs CV Error");
然后我们测试下Boost:
def BoostModel():
input_df = pd.read_csv('train.csv', index_col=0)
submit_df = pd.read_csv('test.csv', index_col=0)
train_y = np.log1p(input_df.pop('SalePrice')) # 训练标签
df = pd.concat([input_df, submit_df])
df = dataprocess.processData(df)
input_df = df.loc[input_df.index]
submit_df = df.loc[submit_df.index]
train_X = input_df.values
test_X = submit_df.values
params = [10,15,20,25,30,35,40,45,50]
ridge = Ridge(15)#岭回归alpha=15
test_scores = []
for param in params:
clf = AdaBoostRegressor(n_estimators=param,base_estimator=ridge)
test_score = np.sqrt(-cross_val_score(clf, train_X, train_y, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(test_score)
plt.plot(params,test_scores)
plt.show()
然后我们用XGBOOST来进行模型选择:
from xgboost import XGBRegressor
def xgboostModel():
input_df = pd.read_csv('train.csv', index_col=0)
submit_df = pd.read_csv('test.csv', index_col=0)
train_y = np.log1p(input_df.pop('SalePrice')) # 训练标签
df = pd.concat([input_df, submit_df])
df = dataprocess.processData(df)
input_df = df.loc[input_df.index]
submit_df = df.loc[submit_df.index]
train_X = input_df.values
test_X = submit_df.values
params = [1,2,3,4,5,6]
ridge = Ridge(15) # 岭回归alpha=15
test_scores = []
for param in params:
clf = XGBRegressor(max_depth=param)
test_score = np.sqrt(-cross_val_score(clf, train_X, train_y, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
plt.plot(params, test_scores)
plt.title("max_depth vs CV Error");
plt.show()
clf = XGBRegressor(max_depth=6)
clf.fit(train_X,train_y)
predictions = clf.predict(test_X).astype(np.float64)
predictions = np.exp(predictions) - 1
result = pd.DataFrame({"Id":submit_df.index,"SalePrice":predictions})
result.to_csv('xgboost_result.csv',index=False)
这里我们用一个Stacking的思维来汲取两种或者多种模型的优点
首先,我们把最好的parameter拿出来,做成我们最终的model
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
#Stacking
def stackModel():
input_df = pd.read_csv('train.csv', index_col=0)
submit_df = pd.read_csv('test.csv', index_col=0)
train_y = np.log1p(input_df.pop('SalePrice')).as_matrix() # 训练标签
df = pd.concat([input_df, submit_df])
df = dataprocess.processData(df)
input_df = df.loc[input_df.index]
submit_df = df.loc[submit_df.index]
train_X = input_df.values
test_X = submit_df.values
clfs = [RandomForestRegressor(n_estimators=500,max_features=.3),
XGBRegressor(max_depth=6,n_estimators=500),
Ridge(15)]
#训练过程
dataset_stack_train = np.zeros((train_X.shape[0],len(clfs)))
dataset_stack_test = np.zeros((test_X.shape[0],len(clfs)))
for j,clf in enumerate(clfs):
clf.fit(train_X,train_y)
y_submission = clf.predict(test_X)
y_train = clf.predict(train_X)
dataset_stack_train[:,j] = y_train
dataset_stack_test[:,j] = y_submission
print("开始Stacking....")
clf = RandomForestRegressor(n_estimators=1000,max_depth=8)
clf.fit(dataset_stack_train,train_y)
y_submission = clf.predict(dataset_stack_test)
predictions = np.expm1(y_submission)
result = pd.DataFrame({"Id": submit_df.index, "SalePrice": predictions})
result.to_csv('stack_result.csv', index=False)
后面融合的模型方法一样。如果是分类我们可以用
LogisticRegression
回归