import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import metrics
from sklearn.metrics import accuracy_score,roc_auc_score
from xgboost.sklearn import XGBClassifier
data_all = pd.read_csv('/home/infisa/wjht/project/DataWhale/data_all.csv', encoding='gbk')
features = [x for x in data_all.columns if x not in ['status']]
X = data_all[features]
y = data_all['status']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=2018)
forest=RandomForestClassifier(n_estimators=100,random_state=2018) # 随机森林
forest.fit(X_train,y_train)
forest_y_score=forest.predict_proba(X_test)
# print(forest_y_score[:,1])
forest_score=forest.score(X_test,y_test) #准确率
# print('forest_score:',forest_score)
'ranfor_score:0.7820602662929222'
Gbdt=GradientBoostingClassifier(random_state=2018) #CBDT
Gbdt.fit(X_train,y_train)
Gbdt_score=Gbdt.score(X_train,y_train) #准确率
# print('Gbdt_score:',Gbdt_score)
'Gbdt_score:0.8623384430417794'
Xgbc=XGBClassifier(random_state=2018) #Xgbc
Xgbc.fit(X_train,y_train)
y_xgbc_pred=Xgbc.predict(X_test)
Xgbc_score=accuracy_score(y_test,y_xgbc_pred) #准确率
# print('Xgbc_score:',Xgbc_score)
'Xgbc_score:0.7855641205325858'
gbm=lgb.LGBMClassifier(random_state=2018) #lgb
gbm.fit(X_train,y_train)
y_gbm_pred=gbm.predict(X_test)
gbm_score=accuracy_score(y_test,y_gbm_pred) #准确率
# print('gbm_score:',gbm_score)
'gbm_score:0.7701471618780659'
y_test_hot = label_binarize(y_test,classes =(0, 1)) # 将测试集标签数据用二值化编码的方式转换为矩阵
Gbdt_y_score = Gbdt.decision_function(X_test) # 得到Gbdt预测的损失值
forest_fpr,forest_tpr,forest_threasholds=metrics.roc_curve(y_test_hot.ravel(),forest_y_score[:,1].ravel()) # 计算ROC的值,forest_threasholds为阈值
Gbdt_fpr,Gbdt_tpr,Gbdt_threasholds=metrics.roc_curve(y_test_hot.ravel(),Gbdt_y_score.ravel()) # 计算ROC的值,Gbdt_threasholds为阈值
forest_auc=metrics.auc(forest_fpr,forest_tpr) #Gbdt_auc值
# print('forest_auc',forest_auc)
'forest_auc 0.7491366989035293'
Gbdt_auc=metrics.auc(Gbdt_fpr,Gbdt_tpr) #Gbdt_auc值
# print('Gbdt_auc:',Gbdt_auc)
'Gbdt_auc:0.7633094425839567'
Xgbc_auc=roc_auc_score(y_test,y_xgbc_pred) #Xgbc_auc值
# print('Xgbc_auc:',Xgbc_auc)
'Xgbc_auc:0.6431606209508309'
gbm_auc=roc_auc_score(y_test,y_gbm_pred) #gbm_auc值
# print('gbm_auc:',gbm_auc)
'gbm_auc:0.6310118097503468'
综合Forest,GBDT,XGBoot,lightgbm几种算法得出的准确率和auc值,GBDT的score:0.8623384430417794,auc:0.7633094425839567的效果最好.
思考
对上面这四种模型理解还很肤浅,现在对随机森林和GBDT了解较多,LightGBM和xgboot只是简单了解了一些,里面有很多参数还不清楚什么意思.
参考的文章
sklearn随机森林分类类RandomForestClassifier
lightGBM原理、改进简述
python机器学习案例系列教程——LightGBM算法
auc指标含义的理解
机器学习sklearn19.0——集成学习——bagging、随机森林算法
集成学习之Adaboost算法原理小结
Sklearn-GBDT(GradientBoostingDecisonTree)梯度提升树