百度AI比赛-点击反欺诈预测-XGBoost十折子模型融合

1:报名地址

        https://aistudio.baidu.com/aistudio/competition/detail/52

        百度AI比赛-点击反欺诈预测-XGBoost十折子模型融合_第1张图片

2:排名分数

        百度AI比赛-点击反欺诈预测-XGBoost十折子模型融合_第2张图片

百度AI比赛-点击反欺诈预测-XGBoost十折子模型融合_第3张图片

3:证书登场

4: 模型源码

        废话不多说,直接上源码        

import pandas as pd
import warnings
warnings.filterwarnings('ignore')

# 数据加载
train = pd.read_csv('./train.csv')
test = pd.read_csv('./test1.csv')
test = test.iloc[:, 1:]
train = train.iloc[:, 1:]
train

# ['os', 'osv', 'lan', 'sid’]
features = train.columns.tolist()
features.remove('label')
print(features)

for feature in features:
    print(feature, train[feature].nunique())
    
    
# 对osv进行数据清洗
def osv_trans(x):
    x = str(x).replace('Android_', '').replace('Android ', '').replace('W', '')
    if str(x).find('.')>0:
        temp_index1 = x.find('.')
        if x.find(' ')>0:
            temp_index2 = x.find(' ')
        else:
            temp_index2 = len(x)

        if x.find('-')>0:
            temp_index2 = x.find('-')
            
        result = x[0:temp_index1] + '.' + x[temp_index1+1:temp_index2].replace('.', '')
        try:
            return float(result)
        except:
            print(x+'#########')
            return 0
    try:
        return float(x)
    except:
        print(x+'#########')
        return 0
#train['osv'] => LabelEncoder ?
# 采用众数,进行缺失值的填充
train['osv'].fillna('8.1.0', inplace=True)
# 数据清洗
train['osv'] = train['osv'].apply(osv_trans)

# 采用众数,进行缺失值的填充
test['osv'].fillna('8.1.0', inplace=True)
# 数据清洗
test['osv'] = test['osv'].apply(osv_trans)



train['lan'].value_counts()
#lan_map = {'zh-CN': 1, }
train['lan'].value_counts().index
lan_map = {'zh-CN': 1, 'zh_CN':2, 'Zh-CN': 3, 'zh-cn': 4, 'zh_CN_#Hans':5, 'zh': 6, 'ZH': 7, 'cn':8, 'CN':9, 'zh-HK': 10, 'tw': 11, 'TW': 12, 'zh-TW': 13,             'zh-MO':14, 'en':15, 'en-GB': 16, 'en-US': 17, 'ko': 18, 'ja': 19, 'it': 20, 'mi':21} 
train['lan'] = train['lan'].map(lan_map)
test['lan'] = test['lan'].map(lan_map)
test['lan'].value_counts()

# 对于有缺失的lan 设置为22
train['lan'].fillna(22, inplace=True)
test['lan'].fillna(22, inplace=True)
remove_list = ['os', 'lan', 'sid']
col = features
for i in remove_list:
    col.remove(i)
col


from datetime import datetime

# lambda 是一句话函数,匿名函数
train['timestamp'] = train['timestamp'].apply(lambda x: datetime.fromtimestamp(x/1000))
#1559892728241.7212
#1559871800477.1477
#1625493942.538375
#import time
#time.time()
#test['timestamp'] = test['timestamp'].apply(lambda x: datetime.fromtimestamp(x/1000))
#test['timestamp']
test['timestamp'] = test['timestamp'].apply(lambda x: datetime.fromtimestamp(x/1000))
test['timestamp']


def version_trans(x):
    if x=='V3':
        return 3
    if x=='v1':
        return 1
    if x=='P_Final_6':
        return 6
    if x=='V6':
        return 6
    if x=='GA3':
        return 3
    if x=='GA2':
        return 2
    if x=='V2':
        return 2
    if x=='50':
        return 5
    return int(x)
train['version'] = train['version'].apply(version_trans)
test['version'] = test['version'].apply(version_trans)
train['version'] = train['version'].astype('int')
test['version'] = test['version'].astype('int')
# 特征筛选
features = train[col]
# 构造fea_hash_len特征
features['fea_hash_len'] = features['fea_hash'].map(lambda x: len(str(x)))
features['fea1_hash_len'] = features['fea1_hash'].map(lambda x: len(str(x)))


# Thinking:为什么将很大的,很长的fea_hash化为0?
# 如果fea_hash很长,都归为0,否则为自己的本身
features['fea_hash'] = features['fea_hash'].map(lambda x: 0 if len(str(x))>16 else int(x))
features['fea1_hash'] = features['fea1_hash'].map(lambda x: 0 if len(str(x))>16 else int(x))
features

test_features = test[col]
# 构造fea_hash_len特征
test_features['fea_hash_len'] = test_features['fea_hash'].map(lambda x: len(str(x)))
test_features['fea1_hash_len'] = test_features['fea1_hash'].map(lambda x: len(str(x)))

# Thinking:为什么将很大的,很长的fea_hash化为0?
# 如果fea_hash很长,都归为0,否则为自己的本身
test_features['fea_hash'] = test_features['fea_hash'].map(lambda x: 0 if len(str(x))>16 else int(x))
test_features['fea1_hash'] = test_features['fea1_hash'].map(lambda x: 0 if len(str(x))>16 else int(x))
test_features


temp = pd.DatetimeIndex(features['timestamp'])
features['year'] = temp.year
features['month'] = temp.month
features['day'] = temp.day
features['week_day'] = temp.weekday #星期几
features['hour'] = temp.hour
features['minute'] = temp.minute

# 求时间的diff
start_time = features['timestamp'].min()
features['time_diff'] = features['timestamp'] - start_time
features['time_diff'] = features['time_diff'].dt.days + features['time_diff'].dt.seconds/3600/24
features[['timestamp', 'year', 'month', 'day', 'week_day', 'hour', 'minute', 'time_diff']]

# 创建时间戳索引
temp = pd.DatetimeIndex(test_features['timestamp'])
test_features['year'] = temp.year
test_features['month'] = temp.month
test_features['day'] = temp.day
test_features['week_day'] = temp.weekday #星期几
test_features['hour'] = temp.hour
test_features['minute'] = temp.minute

# 求时间的diff
#start_time = features['timestamp'].min()
test_features['time_diff'] = test_features['timestamp'] - start_time
test_features['time_diff'] = test_features['time_diff'].dt.days + test_features['time_diff'].dt.seconds/3600/24
#test_features[['timestamp', 'year', 'month', 'day', 'week_day', 'hour', 'minute', 'time_diff']]
test_features['time_diff']

features['dev_height'].value_counts()
features['dev_width'].value_counts()
# 构造面积特征
features['dev_area'] = features['dev_height'] * features['dev_width']
test_features['dev_area'] = test_features['dev_height'] * test_features['dev_width']

# 构造相除特征
features['dev_rato'] = features['dev_height'] / features['dev_width']
test_features['dev_rato'] = test_features['dev_height'] / test_features['dev_width']


#features['ntt'].value_counts()
features['carrier'].value_counts()
features['package'].value_counts()
# version - osv APP版本与操作系统版本差
features['osv'].value_counts()
features['version_osv'] = features['osv'] - features['version']
test_features['version_osv'] = test_features['osv'] - test_features['version']


#定义10折子模型
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
def xgb_model(clf,train_x,train_y,test):
    sk=StratifiedKFold(n_splits=10,random_state=2021)
    
    prob=[]
    mean_acc=0
    for k,(train_index,val_index) in enumerate(sk.split(train_x,train_y)):
        train_x_real=train_x.iloc[train_index]
        train_y_real=train_y.iloc[train_index]
        val_x=train_x.iloc[val_index]
        val_y=train_y.iloc[val_index]
        #模型训练
        clf=clf.fit(train_x_real,train_y_real)
        
        val_y_pred=clf.predict(val_x)
        acc_val=accuracy_score(val_y,val_y_pred)
        print('第{}个子模型 accuracy{}'.format(k+1,acc_val))
        mean_acc+=mean_acc/10
        
        test_y_pred=clf.predict_proba(test)
        prob.append(test_y_pred)
    print(mean_acc)
    
    mean_prob=sum(prob)/10
    return mean_prob


import xgboost as xgb
model_xgb2 = xgb.XGBClassifier(
            max_depth=15, learning_rate=0.005, n_estimators=5300, 
            objective='binary:logistic', tree_method='auto', 
            subsample=0.7, colsample_bytree=0.7, 
            min_child_samples=3, eval_metric='auc', reg_lambda=0.5
        )
result_xgb2=xgb_model(model_xgb2,features.drop(['timestamp'], axis=1),train['label'],test_features.drop(['timestamp'], axis=1))


result_xgb3=[x[1] for x in result_xgb2]
result_xgb3=[1 if x>=0.5 else 0 for x in result_xgb3]
print(result_xgb3)

res = pd.DataFrame(test['sid'])
res['label'] = result_xgb3
res.to_csv('./baseline_.89.116.csv', index=False)
res

5:提分要点 

1:数据清洗

        去掉一些不合理的数据,例如对字段osv的清洗

2:缺失值补齐

3:特征筛选与构造

        fea_hash,

        fea1_hash

        dev_area:设备面积大小

        version_osv:APP版本与操作系统版本差

4:创建时间戳索引

5:五折子模型或者十折子模型

6:祖传参数调整

6:注

1:建议先做特征筛选和与调祖传参数让单个模型的分数尽可能高,再进行模型融合。

2:高手很多,不喜勿喷

        

        

你可能感兴趣的:(AI比赛,python,大数据,算法)