利用 hyperopt 为 lightgbm 自动调参

lightgbm 为 GBDT 算法的又一个工程实现,相比于 xgboost,lightgbm 训练效率更高,同时效果同样优秀。但是其参数众多,人工调参不仅繁琐,效果也未必能获得最优。
hyperopt 是 python 中的一个用于"分布式异步算法组态/超参数优化"的类库,广泛意义上,可以将带有超参数的模型看作是一个必然的非凸函数,因此hyperopt 几乎可以稳定的获取比手工更加合理的调参结果。

0. 相关python库的载入

from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.preprocessing import OneHotEncoder, RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from hyperopt import hp, fmin, tpe, partial
from pyspark.sql import SparkSession
from sklearn import metrics
import lightgbm as lgb
import pandas as pd
import numpy as np
import pickle
import base64
import sys

1. 数据准备

首先定义一个sklearn 里 pipeline 方式的数据转换类:

class FeatureProcess(BaseEstimator, TransformerMixin):
    """
    定义数据转换的类,使用方式如下:
    pipeline = FeatureProcess(categorical_cols, numerical_cols, label_cols)
    pipiline.fit(train_data)
    X = pipeline.transform(train_data)
    其中 train_data 为 pandas 的 dataframe
    """

    def __init__(self, categorical_cols=None, numerical_cols=None, label_cols=None):
        """
        :param categorical_cols: 类别特征
        :param numerical_cols: 数值类特征
        :param label_cols: 标签类特征 格式为空格分隔的字符串
        """
        self.categorical_cols = categorical_cols
        self.numerical_cols = numerical_cols
        self.label_cols = label_cols
        self.onehot = None
        self.scaler = None
        self.labels_pipe = {}
        self.stop_words = {'null', 'nan'}
        self.feature_names = None

    def _get_onehot_feature_names(self, categorical_cols):
        """
        返回 onthot 后的特征名,定义为 原始列名+onthot对应的值
        :param categorical_cols:
        :return:
        """
        feature_names = list()
        for col, values in zip(categorical_cols, self.onehot.categories_):
            for val in values:
                feature_names.append('%s_%s' % (col, val))
        return feature_names

    def fit(self, df, y=None):
        # 空值处理
        df = df.replace({'': np.NAN})
        # 类型转换
        df[self.categorical_cols + self.label_cols] = df[self.categorical_cols + self.label_cols].astype(str)
        df[self.numerical_cols] = df[self.numerical_cols].astype(float).replace(float('inf'), 0)
        # onehot处理
        self.onehot = OneHotEncoder(handle_unknown='ignore', sparse=False).fit(df[self.categorical_cols])
        # 标签数据处理
        label_feature_names = []
        for label_col in self.label_cols:
            self.labels_pipe[label_col] = TfidfVectorizer(stop_words=self.stop_words, min_df=500).fit(df[label_col].values)
            label_feature_names.extend(sorted(self.labels_pipe[label_col].vocabulary_,
                                              key=lambda x: self.labels_pipe[label_col].vocabulary_[x]))
        # 去掉最大值
        self.scaler = RobustScaler(with_centering=False, with_scaling=False, quantile_range=(1, 99)).fit(
            df[self.numerical_cols])
        # feature_names
        self.feature_names = self._get_onehot_feature_names(self.categorical_cols) + self.numerical_cols + label_feature_names
        return self

    def transform(self, df, y=None):
        # 空值处理
        df = df.replace({'': np.NAN})
        # 类型转换
        df[self.categorical_cols + self.label_cols] = df[self.categorical_cols + self.label_cols].astype(str)
        df[self.numerical_cols] = df[self.numerical_cols].astype(float).replace(float('inf'), 0)
        # 数据转换
        onehot_data = self.onehot.transform(df[self.categorical_cols])
        scaler_data = self.scaler.transform(df[self.numerical_cols])
        # 标签数据处理
        label_data = np.concatenate([self.labels_pipe[label_col].transform(df[label_col].values).toarray() 
                                     for label_col in self.label_cols], axis=1)
        data = np.c_[onehot_data, scaler_data, label_data]
        return data

    def fit_transform(self, df, y=None):
        return self.fit(df, y).transform(df, y)

2. 定义参数空间

hp.randint 产生的是从0开始的整数,所以定义了 args_tranform 函数进行转换。

# 自定义hyperopt的参数空间
space = {"learning_rate": hp.randint("learning_rate", 7),
         "max_depth": hp.randint("max_depth", 10),
         "num_leaves": hp.randint("num_leaves", 10),
         "bagging_fraction": hp.randint("bagging_fraction", 5),
         "bagging_freq": hp.randint("bagging_freq", 9),
         "feature_fraction": hp.randint("feature_fraction", 5),
         "lambda_l1": hp.randint("lambda_l1", 6),
         "lambda_l2": hp.randint("lambda_l2", 8),
         "cat_smooth": hp.randint("cat_smooth", 20),
         "min_data_in_leaf": hp.randint("min_data_in_leaf", 20),
         }


def args_tranform(args_dict, is_print=False):
    params = dict()

    params["learning_rate"] = args_dict["learning_rate"] * 0.02 + 0.01
    params["max_depth"] = args_dict["max_depth"] + 3
    params["num_leaves"] = args_dict["num_leaves"] * 5 + 5
    params["bagging_fraction"] = args_dict["bagging_fraction"] * 0.1 + 0.2
    params["bagging_freq"] = args_dict["bagging_freq"] + 1
    params["feature_fraction"] = args_dict["feature_fraction"] * 0.1 + 0.5
    params["lambda_l1"] = args_dict["lambda_l1"] * 0.1 + 0.1
    params["lambda_l2"] = args_dict["lambda_l2"] * 5
    params["cat_smooth"] = args_dict["cat_smooth"] + 1
    params["min_data_in_leaf"] = args_dict["min_data_in_leaf"] * 20 + 50

    params["boosting_type"] = 'gbdt'
    params["objective"] = 'binary'
    params["metric"] = 'auc'
    params["verbosity"] = 0
    params["seed"] = 42
    params["num_threads"] = 16

    if is_print:
        print(params)

    return params

3. 构建模型工厂和目标函数

目标函数我增加了控制过拟合的参数,这样能保证 train_aucval_auc 的差值不会过大。

def model_fit(argsDict, rate=0.2):
    params = args_tranform(argsDict)

    model = lgb.train(params, dtrain, 500, dval, early_stopping_rounds=20, verbose_eval=False)

    return get_score(model, rate)


def get_score(model, rate):
    """
    :param model:
    :param rate: 控制过拟合的参数,参数越大表示越严格
    :return:
    """
    y_val_pred = model.predict(X_val, num_iteration=model.best_iteration)
    y_train_pred = model.predict(X_train, num_iteration=model.best_iteration)
    train_auc = metrics.roc_auc_score(y_train, y_train_pred)
    val_auc = metrics.roc_auc_score(y_val, y_val_pred)

    return -val_auc + rate * abs(train_auc - val_auc)

4. 模型训练

通过spark读取训练数据,最后将模型和数据转换的pipeline一起写入到集群文件,也可以将最后结果写入到数据库中。

if __name__ == '__main__':
    df = spark.sql(sql).toPandas()
    # 测试集划分
    train_set, test_set = train_test_split(df, test_size=0.15, shuffle=True, stratify=df['target'],
                                           random_state=123)
    pipeline = FeatureProcess(categorical_cols, numerical_cols, label_cols)
    X = pipeline.fit_transform(train_set.drop('target', axis=1))
    y = train_set['target']

    X_test = pipeline.transform(test_set.drop('target', axis=1))
    y_test = test_set['target']
    feature_names = pipeline.feature_names
    # 训练集和验证集划分
    X_train, X_val, y_train, y_val = train_test_split(X, y.values, test_size=0.15, shuffle=True, stratify=y, random_state=123)

    dtrain = lgb.Dataset(data=X_train, label=y_train)
    dval = lgb.Dataset(data=X_val, label=y_val, reference=dtrain)
    valid_sets = [dtrain, dval]
    # 模型训练
    algo = partial(tpe.suggest, n_startup_jobs=-1)
    best = fmin(model_fit, space, algo=algo, max_evals=50, show_progressbar=True)
    lgb_params = args_tranform(best, is_print=True)
    best_model = lgb.train(lgb_params, dtrain, 1500, valid_sets, ['train', 'eval'], early_stopping_rounds=50,
                           verbose_eval=200)
    # 结果指标
    metric_score = best_model.best_score
    metric_score['test'] = {}
    y_prob = best_model.predict(X_test, num_iteration=best_model.best_iteration)
    metric_score['test']['auc'] = metrics.roc_auc_score(y_test, y_prob)
    metric_score['test']['acc'] = metrics.accuracy_score(y_test, (y_prob >= 0.5).astype(int))

    # 保存
    res_df = pd.DataFrame()
    res_df['model_name'] = ['model_v1.0']
    res_df['pipeline'] = base64.b64encode(pickle.dumps(pipeline)).strip().decode('utf8')
    res_df['model'] = base64.b64encode(pickle.dumps(best_model)).strip().decode('utf8')
    res_df['metric_score'] = str(metric_score)

    sparkDF = spark.createDataFrame(res_df)
    sparkDF.write.format('csv').save(model_path)
    # sparkDF.createOrReplaceTempView("model_result")
    # spark.sql(insert_sql)

你可能感兴趣的:(利用 hyperopt 为 lightgbm 自动调参)