python实现机器学习算法XGBoost

  • 查询数据库数据保存为pandas.dataframe
from sqlalchemy import create_engine
# 初始化数据库连接,使用pymysql模块
engine = create_engine('mysql+pymysql://用户名:host:3306/数据库名')
# 查询语句,选出employee表中的所有数据
sql = '''
      SELECT  字段 FROM 表名 WHERE 条件
      '''
# read_sql_query的两个参数: sql语句, 数据库连接
dataset = pd.read_sql_query(sql, engine)

# 输出employee表的查询结果
print(dataset.head(10))
  • 获取特征值和目标值,拆分特征为测试集合训练集(0.2为例)
from sklearn.model_selection import train_test_split
# Split-out validation dataset
array = dataset.values
# 假设前5列为特征  最后一列为目标值
X = array[:,:5]
y = array[:,5]
print(X)
print(y)
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, y, test_size=validation_size, random_state=seed)
  • 使用XGBoost进行预测,打印精准率,召回率和f1,混淆矩阵和准确率
from sklearn.metrics import classification_report
from xgboost import XGBClassifier
# 拟合XGBoost模型
lr = XGBClassifier(scale_pos_weight=负样本/正样本, n_estimatores=1000, max_depth=6,subsample=0.8,colsample_btree=0.8)
# learning_rate=0.1,
# n_estimators=1000,         # 树的个数--1000棵树建立xgboost
# max_depth=6,               # 树的深度
# min_child_weight = 1,      # 叶子节点最小权重
# gamma=0.,                  # 惩罚项中叶子结点个数前的参数
# subsample=0.8,             # 随机选择80%样本建立决策树
# colsample_btree=0.8,       # 随机选择80%特征建立决策树
# objective='multi:softmax', # 指定损失函数
# scale_pos_weight=3.3,      # 解决样本个数不平衡的问题
# random_state=27,           # 随机数
lr.fit(X_train, Y_train)
predictions = lr.predict(X_validation)
# 打印分类准确率分数
print(accuracy_score(Y_validation, predictions))
# 打印混淆矩阵
print(confusion_matrix(Y_validation, predictions))
# 打印精准率,召回率和f1
print(classification_report(Y_validation, predictions))

你可能感兴趣的:(机器学习算法实现)