House Prices: Advanced Regression Techniques(房价预测)

问题背景:购房者需要购买梦想中的房子,你需要从房子的79个变量中预测房子的价格是多少.

分为以下几个步骤:

  1. 导入数据观察每个变量特征的意义以及对于房价的重要程度
  2. 筛选出主要影响房价的变量
  3. 清洗和转换变量
  4. 测试和输出数据

https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data 这个是官方的地址可以从中获得数据文件

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline

data_train = pd.read_csv('train.csv')
data_test = pd.read_csv('test.csv')
data_train.sample(3)
def drop_low_colums(df):
    data_corr=df.corr()
    d_list=data_corr[data_corr.SalePrice<0.5].index.tolist()
    return d_list

data_train=data_train.drop(['YearBuilt','1stFlrSF'], axis=1)
data_test=data_test.drop(['YearBuilt','1stFlrSF'], axis=1)
data_drop_train=data_train.drop(['Alley','PoolQC','Fence','MiscFeature'], axis=1)
data_drop_test=data_test.drop(['Alley','PoolQC','Fence','MiscFeature'], axis=1)
drop_list=drop_low_colums(data_drop_train)
data_drop_train1=data_drop_train.drop(drop_list, axis=1)
data_drop_test1=data_drop_test.drop(drop_list, axis=1)
data_drop_train1=data_drop_train1.fillna('0')
data_drop_test1=data_drop_test1.fillna('0')
from sklearn import preprocessing
def encode_features(df_train, df_test):
    features = data_drop_train1.select_dtypes(include=object).columns
   
    
    for feature in features:
        le = preprocessing.LabelEncoder()
        df_train[feature] = le.fit_transform(df_train[feature])
        df_test[feature] = le.fit_transform(df_test[feature])
    return df_train, df_test
    
data_train, data_test = encode_features(data_drop_train1, data_drop_test1)
data_drop_train2=data_train.corr()
drop_list2=list(data_drop_train2.query('SalePrice<0.5').index)
data_drop_train2=data_train.drop(drop_list2, axis=1)
data_drop_test2=data_test.drop(drop_list2, axis=1)
def change_colums(colums):
    fz=np.ceil(np.log2(colums.max()))
    return fz

fz=change_colums(data_drop_train2.GarageArea)
data_drop_train2['GarageArea'] = pd.cut(data_drop_train2.GarageArea, fz)
fz=change_colums(data_drop_train2.GrLivArea)
data_drop_train2['GrLivArea'] = pd.cut(data_drop_train2.GrLivArea, fz)
fz=change_colums(data_drop_train2.TotalBsmtSF)
data_drop_train2['TotalBsmtSF'] = pd.cut(data_drop_train2.TotalBsmtSF, fz)
fz=change_colums(data_drop_train2.YearRemodAdd)
data_drop_train2['YearRemodAdd'] = pd.cut(data_drop_train2.YearRemodAdd, fz)
data_drop_test2["TotalBsmtSF"] = data_drop_test2["TotalBsmtSF"].astype("int64")
data_drop_test2["GarageArea"] = data_drop_test2["GarageArea"].astype("int64")
data_drop_test2["GrLivArea"] = data_drop_test2["GrLivArea"].astype("int64")
data_drop_test2["GarageCars"] = data_drop_test2["GarageCars"].astype("int64")
data_drop_test2.loc[data_drop_test2['YearRemodAdd'] == '0'] = int(data_drop_test2["YearRemodAdd"].mean())
fz=change_colums(data_drop_test2.YearRemodAdd)
data_drop_test2['YearRemodAdd'] = pd.cut(data_drop_test2.YearRemodAdd, fz)
fz=change_colums(data_drop_test2.TotalBsmtSF)
data_drop_test2['TotalBsmtSF'] = pd.cut(data_drop_test2.TotalBsmtSF,fz)
fz=change_colums(data_drop_test2.GarageArea)
data_drop_test2['GarageArea'] = pd.cut(data_drop_test2.GarageArea,fz)
fz=change_colums(data_drop_test2.GrLivArea)
data_drop_test2['GrLivArea'] = pd.cut(data_drop_test2.GrLivArea, fz)
def encode_features1(df_train, df_test):
    features = ['YearRemodAdd', 'TotalBsmtSF', 'GrLivArea', 'GarageArea']
    df_combined = pd.concat([df_train[features], df_test[features]])
    
    for feature in features:
        le = preprocessing.LabelEncoder()
        le = le.fit(df_combined[feature])
        df_train[feature] = le.transform(df_train[feature])
        df_test[feature] = le.transform(df_test[feature])
    return df_train, df_test

data_train1, data_test1 = encode_features1(data_drop_train2, data_drop_test2)
from sklearn.model_selection import train_test_split
X=data_train1[['OverallQual', 'YearRemodAdd', 'TotalBsmtSF', 'GrLivArea', 'FullBath', 'TotRmsAbvGrd', 'GarageCars', 'GarageArea']]
y=data_train1['SalePrice']
#随机划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# 1.决策树回归
from sklearn import tree
model_decision_tree_regression = tree.DecisionTreeRegressor()
 
# 2.线性回归
from sklearn.linear_model import LinearRegression
model_linear_regression = LinearRegression()
 
# 3.SVM回归
from sklearn import svm
model_svm = svm.SVR()
 
# 4.kNN回归
from sklearn import neighbors
model_k_neighbor = neighbors.KNeighborsRegressor()
 
# 5.随机森林回归
from sklearn import ensemble
model_random_forest_regressor = ensemble.RandomForestRegressor(n_estimators=20)  # 使用20个决策树
 
# 6.Adaboost回归
from sklearn import ensemble
model_adaboost_regressor = ensemble.AdaBoostRegressor(n_estimators=50)  # 这里使用50个决策树
 
# 7.GBRT回归
from sklearn import ensemble
model_gradient_boosting_regressor = ensemble.GradientBoostingRegressor(n_estimators=100)  # 这里使用100个决策树
 
# 8.Bagging回归
from sklearn import ensemble
model_bagging_regressor = ensemble.BaggingRegressor()
 
# 9.ExtraTree极端随机数回归
from sklearn.tree import ExtraTreeRegressor
model_extra_tree_regressor = ExtraTreeRegressor()
def try_different_method(model, method):
    model.fit(X_train, y_train)
    score = model.score(X_test, y_test)
    result = model.predict(X_test)
    plt.figure()
    plt.plot(np.arange(len(result)), y_test, "go-", label="True value")
    plt.plot(np.arange(len(result)), result, "ro-", label="Predict value")
    plt.title(f"method:{method}---score:{score}")
    plt.legend(loc="best")
try_different_method(model_decision_tree_regression,"model_decision_tree_regression")
try_different_method(model_linear_regression,"model_linear_regression")
try_different_method(model_svm,"model_svm")
try_different_method(model_k_neighbor,"model_k_neighbor")
try_different_method(model_random_forest_regressor,"model_random_forest_regressor")
try_different_method(model_adaboost_regressor,"model_adaboost_regressor")
try_different_method(model_gradient_boosting_regressor,"model_gradient_boosting_regressor")
try_different_method(model_bagging_regressor,"model_bagging_regressor")
try_different_method(model_extra_tree_regressor,"model_extra_tree_regressor")
model_gradient_boosting_regressor.fit(X_train, y_train)
model_gradient_boosting_regressor_result = model_gradient_boosting_regressor.predict(data_test1)
submission=pd.DataFrame({'Id':data_test['Id'],'SalePrice':model_gradient_boosting_regressor_result})
submission.to_csv('submission.csv',index=False)

最后提交下文件就可以得到分数了。只是做简单的特征工程,也没有对参数进行优化,可以继续进行优化

https://github.com/Timlcy/house_price/blob/master/housePrices.ipynb

你可能感兴趣的:(大数据,回归)