XGBoost代码调库,XGBoost和GBDT的区别

import xgboost as xgb
# First XGBoost model for Pima Indians dataset
from numpy import loadtxt
from xgboost import XGBClassifier,XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,confusion_matrix,classification_report


# load data
dataset = loadtxt(r'./pima-indians-diabetes.csv', delimiter=",")
# split data into X and y
X = dataset[:,0:8]
Y = dataset[:,8]
# split data into train and test sets
seed = 7
test_size = 0.33
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
# fit model no training data
# sklearn
model = xgb.XGBClassifier()
model.fit(X_train, y_train)

# TODO 原生
# xgb.DMatrix xgboost 原生库自带的数据结构规范化函数
# data_train = xgb.DMatrix(X_train, label=y_train)
# data_test = xgb.DMatrix(X_test, label=y_test)
# param = {'max_depth': 3, 'eta': 1, 'silent': 1, 'objective': 'multi:softmax', 'num_class': 3}  # logitraw
# model = xgb.train(param, data_train)

# 获取特征重要性并画图展示
feature_import = model.feature_importances_
model.save_model(r'XGB.model')

y_pred = model.predict(X_test)

predictions = [round(value) for value in y_pred]
# evaluate predictions

accuracy = accuracy_score(y_test, predictions)
preci = precision_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))

XGBoost代码调库,XGBoost和GBDT的区别_第1张图片

 

你可能感兴趣的:(python,人工智能)