# -*- coding: utf-8 -*-
from sklearn.datasets import load_iris
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold, train_test_split
import pandas as pd
# 显示所有列
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import lightgbm as lgb
pd.set_option('display.max_columns', None)
# 显示所有行
pd.set_option('display.max_rows', None)
# 设置value的显示长度为100,默认为50
pd.set_option('max_colwidth', 100)
def stacking(model, train_data, train_target, test_data, n_fold):
"""
:param model: 模型算法
:param train_data: 训练集(不含带预测的目标特征)
:param train_target: 需要预测的目标特征
:param test_data: 测试集
:param n_fold: 交叉验证的折数
:return:
"""
skf = StratifiedKFold(n_splits=n_fold, random_state=1) # StratifiedKFold 默认分层采样
train_pred = np.zeros((train_data.shape[0], 1), int) # 存储训练集预测结果
test_pred = np.zeros((test_data.shape[0], 1), int) # 存储测试集预测结果 行数:len(test_data) ,列数:1列
for skf_index, (train_index, val_index) in enumerate(skf.split(train_data, train_target)):
print('第 ', skf_index+1, ' 折交叉验证开始... ')
# 训练集划分
x_train, x_val = train_data.iloc[train_index], train_data.iloc[val_index]
y_train, y_val = train_target.iloc[train_index], train_target.iloc[val_index]
# 模型构建
y_train = np.ravel(y_train) # 向量转成数组
model.fit(X=x_train, y=y_train)
# 模型预测
accs = accuracy_score(y_val, model.predict(x_val))
print('第 ', skf_index+1, ' 折交叉验证 : accuracy : ', accs)
# 训练集预测结果
val_pred = model.predict(x_val)
for i in range(len(val_index)):
train_pred[val_index[i]] = val_pred[i]
# 保存测试集预测结果
test_pred = np.column_stack((test_pred, model.predict(test_data))) # 将矩阵按列合并
test_pred_mean = np.mean(test_pred, axis=1) # 按行计算均值(会出现小数)
test_pred_mean = pd.DataFrame(test_pred_mean) # 转成DataFrame
test_pred_mean = test_pred_mean.apply(lambda x: round(x)) # 小数需要四舍五入成整数
return np.ravel(test_pred_mean), train_pred
if __name__ == '__main__':
# 导入鸢尾花数据集
iris = load_iris()
x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=1)
x_train = pd.DataFrame(x_train)
y_train = pd.DataFrame(y_train)
x_test = pd.DataFrame(x_test)
y_test = pd.DataFrame(y_test)
# 三个初级学习器进行初级训练
# 随机森林算法进行训练
rf = RandomForestClassifier(n_jobs=-1, max_depth=100, n_estimators=800)
print('==============================随机森林模型==============================')
rf_test_pred, rf_train_pred = stacking(model=rf, train_data=x_train, train_target=y_train, test_data=x_test, n_fold=5)
rf_test_pred = pd.DataFrame(rf_test_pred)
rf_train_pred = pd.DataFrame(rf_train_pred)
# 用决策树算法进行训练
dt = DecisionTreeClassifier(random_state=1)
print('==============================决策树模型==============================')
dt_test_pred, dt_train_pred = stacking(model=dt, train_data=x_train, train_target=y_train, test_data=x_test, n_fold=5)
dt_test_pred = pd.DataFrame(dt_test_pred)
dt_train_pred = pd.DataFrame(dt_train_pred)
# 用K近邻算法进行训练
knn = KNeighborsClassifier()
print('==============================K近邻模型==============================')
knn_test_pred, knn_train_pred = stacking(model=knn, train_data=x_train, train_target=y_train, test_data=x_test, n_fold=5)
knn_test_pred = pd.DataFrame(knn_test_pred)
knn_train_pred = pd.DataFrame(knn_train_pred)
# rf_train_pred,dt_train_pred,knn_train_pred 合并生成次级训练集 train_set
# rf_test_pred,dt_test_pred,knn_test_pred 合并生成次级测试集集 test_set
train_set = pd.concat([rf_train_pred, dt_train_pred, knn_train_pred], axis=1)
test_set = pd.concat([rf_test_pred, dt_test_pred, knn_test_pred], axis=1)
# lightgbm作为次级学习器进行训练
lgb = lgb.LGBMClassifier(
objective='multiclass',
num_class=3,
boosting_type='gbdt',
n_estimators=2000,
subsample=0.8,
learning_rate=0.1,
n_jobs=4,
reg_alpha=0.1,
reg_lambda=0.1,
num_leaves=55
)
y_train = np.ravel(y_train) # 向量转成数组
lgb.fit(train_set, y_train)
test_pred = lgb.predict(test_set)
# 模型评价
test_acc = accuracy_score(y_test, test_pred)
print('\ntest_accuracy : ', test_acc)
==============================随机森林模型==============================
第 1 折交叉验证开始...
第 1 折交叉验证 : accuracy : 0.96
第 2 折交叉验证开始...
第 2 折交叉验证 : accuracy : 0.96
第 3 折交叉验证开始...
第 3 折交叉验证 : accuracy : 1.0
第 4 折交叉验证开始...
第 4 折交叉验证 : accuracy : 0.9583333333333334
第 5 折交叉验证开始...
第 5 折交叉验证 : accuracy : 0.8181818181818182
==============================决策树模型==============================
第 1 折交叉验证开始...
第 1 折交叉验证 : accuracy : 0.96
第 2 折交叉验证开始...
第 2 折交叉验证 : accuracy : 0.96
第 3 折交叉验证开始...
第 3 折交叉验证 : accuracy : 1.0
第 4 折交叉验证开始...
第 4 折交叉验证 : accuracy : 0.9583333333333334
第 5 折交叉验证开始...
第 5 折交叉验证 : accuracy : 0.9545454545454546
==============================K近邻模型==============================
第 1 折交叉验证开始...
第 1 折交叉验证 : accuracy : 0.96
第 2 折交叉验证开始...
第 2 折交叉验证 : accuracy : 0.96
第 3 折交叉验证开始...
第 3 折交叉验证 : accuracy : 1.0
第 4 折交叉验证开始...
第 4 折交叉验证 : accuracy : 0.9583333333333334
第 5 折交叉验证开始...
第 5 折交叉验证 : accuracy : 0.8636363636363636
test_accuracy : 0.9666666666666667