最近刚看完kaggle上的这一个算法实现,如下:(其中的解释我是放//后面),有什么有不同见解,请评论,或者给一些意见!!!!!
import pandas as pd
import numpy as np
import lightgbm as lgb
#import xgboost as xgb
from scipy.sparse import vstack, csr_matrix, save_npz, load_npz //scipy.sparse是处理稀疏矩阵的函数
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import StratifiedKFold #K折交叉验证函数 其方法是使用N位状态寄存器来对N个状态进行编码,每个状态都由他独立的寄存器位,并且在任意时候,其中只有一位有效。
#from sklearn.metrics import roc_auc_score
import gc
gc.enable()
dtypes = {
'MachineIdentifier': 'category',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
print('Download Train and Test Data.\n')
train = pd.read_csv('../input/train.csv', dtype=dtypes, low_memory=True)
train['MachineIdentifier'] = train.index.astype('uint32')
test = pd.read_csv('../input/test.csv', dtype=dtypes, low_memory=True)
test['MachineIdentifier'] = test.index.astype('uint32')
gc.collect()
print('Transform all features to category.\n')
for usecol in train.columns.tolist()[1:-1]:
train[usecol] = train[usecol].astype('str') //转化为字符串。
test[usecol] = test[usecol].astype('str') //
#Fit LabelEncoder
le = LabelEncoder().fit(
np.unique(train[usecol].unique().tolist()+ //使所有不同的数据被标号,按0,1,2,3,4...逐个进行标号
test[usecol].unique().tolist()))
#At the end 0 will be used for dropped values
train[usecol] = le.transform(train[usecol])+1 //转为标号然后加1,因为后面0会用来替代nan值
test[usecol] = le.transform(test[usecol])+1
agg_tr = (train
.groupby([usecol]) //按usecol对train进行分组
.aggregate({'MachineIdentifier':'count'}) //分组后在每组MachineIdentifier中下方显示同个MachineIdentifier出现的次数
.reset_index() //重置索引(0,1,2,3,4,5),以防他的index出现逻辑问题
.rename({'MachineIdentifier':'Train'}, axis=1)) //将名字MachineIdentifier改为Train
agg_te = (test
.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Test'}, axis=1))
agg = pd.merge(agg_tr, agg_te, on=usecol, how='outer').replace(np.nan, 0) //将agg_tr和agg_te合并为一个矩阵,并将其中产生的nan值用0代替
#Select values with more than 1000 observations
agg = agg[(agg['Train'] > 1000)].reset_index(drop=True) //选择次数超过1000次的行,次数少代表着其重要性较低
agg['Total'] = agg['Train'] + agg['Test'] //增加一个‘total’列记录每种出现的总次数
#Drop unbalanced values
agg = agg[(agg['Train'] / agg['Total'] > 0.2) & (agg['Train'] / agg['Total'] < 0.8)] //选择符合中括号中条件的行,确保训练集与测试集相对平衡,不至于偏差过大而使测试结果不当
agg[usecol+'Copy'] = agg[usecol] //增加一个与usecol相同的列(名为‘usecolCopy’)
train[usecol] = (pd.merge(train[[usecol]],
agg[[usecol, usecol+'Copy']],
on=usecol, how='left')[usecol+'Copy']
.replace(np.nan, 0).astype('int').astype('category')) //按处理好的agg与train的[usecol]进行合并之后把其中[usecol+'Copy']替换train中的[usecol]
test[usecol] = (pd.merge(test[[usecol]],
agg[[usecol, usecol+'Copy']],
on=usecol, how='left')[usecol+'Copy']
.replace(np.nan, 0).astype('int').astype('category')) //同上
del le, agg_tr, agg_te, agg, usecol
gc.collect() //回收垃圾
y_train = np.array(train['HasDetections']) //需不需要侦察,这个便是对样本的标签
train_ids = train.index //数据处理后,索引值确定,不需要再改变索引,故可用索引值指向数据集
test_ids = test.index
del train['HasDetections'], train['MachineIdentifier'], test['MachineIdentifier']
gc.collect()
print("If you don't want use Sparse Matrix choose Kernel Version 2 to get simple solution.\n")
print('--------------------------------------------------------------------------------------------------------')
print('Transform Data to Sparse Matrix.')
print('Sparse Matrix can be used to fit a lot of models, eg. XGBoost, LightGBM, Random Forest, K-Means and etc.')
print('To concatenate Sparse Matrices by column use hstack()')
print('Read more about Sparse Matrix https://docs.scipy.org/doc/scipy/reference/sparse.html')
print('Good Luck!')
print('--------------------------------------------------------------------------------------------------------')
#Fit OneHotEncoder
ohe = OneHotEncoder(categories='auto', sparse=True, dtype='uint8').fit(train)
#Transform data using small groups to reduce memory usage //一次选一部分进行运算,一次100000个数据,避免一次性将所有数据进行运算,那样对内存运行有较大的开销
m = 100000
train = vstack([ohe.transform(train[i*m:(i+1)*m]) for i in range(train.shape[0] // m + 1)]) //按100000个数据行为一维,将矩阵提升一维,以这‘一维’为一次
test = vstack([ohe.transform(test[i*m:(i+1)*m]) for i in range(test.shape[0] // m + 1)])
save_npz('train.npz', train, compressed=True) //以npz的格式保存
save_npz('test.npz', test, compressed=True)
del ohe, train, test
gc.collect()
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
skf.get_n_splits(train_ids, y_train)
lgb_test_result = np.zeros(test_ids.shape[0]) //建一个为0的数组,作用是为后面存取测试结果
#lgb_train_result = np.zeros(train_ids.shape[0])
#xgb_test_result = np.zeros(test_ids.shape[0])
#xgb_train_result = np.zeros(train_ids.shape[0])
counter = 0 //为下文显示折数
print('\nLightGBM\n')
for train_index, test_index in skf.split(train_ids, y_train):
print('Fold {}\n'.format(counter + 1)) //输出K折数
train = load_npz('train.npz')
X_fit = vstack([train[train_index[i*m:(i+1)*m]] for i in range(train_index.shape[0] // m + 1)]) //由于前面按100000的倍数提升维度,故指向数据集的索引也需要提升相同倍数的维度
X_val = vstack([train[test_index[i*m:(i+1)*m]] for i in range(test_index.shape[0] // m + 1)])
X_fit, X_val = csr_matrix(X_fit, dtype='float32'), csr_matrix(X_val, dtype='float32')
y_fit, y_val = y_train[train_index], y_train[test_index]
del train
gc.collect()
lgb_model = lgb.LGBMClassifier(max_depth=-1,
n_estimators=30000,
learning_rate=0.05,
num_leaves=2**12-1,
colsample_bytree=0.28,
objective='binary',
n_jobs=-1)
#xgb_model = xgb.XGBClassifier(max_depth=6,
# n_estimators=30000,
# colsample_bytree=0.2,
# learning_rate=0.1,
# objective='binary:logistic',
# n_jobs=-1)
lgb_model.fit(X_fit, y_fit, eval_metric='auc',
eval_set=[(X_val, y_val)],
verbose=100, early_stopping_rounds=100)
#xgb_model.fit(X_fit, y_fit, eval_metric='auc',
# eval_set=[(X_val, y_val)],
# verbose=1000, early_stopping_rounds=300)
#lgb_train_result[test_index] += lgb_model.predict_proba(X_val)[:,1]
#xgb_train_result[test_index] += xgb_model.predict_proba(X_val)[:,1]
del X_fit, X_val, y_fit, y_val, train_index, test_index
gc.collect()
test = load_npz('test.npz')
test = csr_matrix(test, dtype='float32')
lgb_test_result += lgb_model.predict_proba(test)[:,1]
#xgb_test_result += xgb_model.predict_proba(test)[:,1]
counter += 1
del test
gc.collect()
#Stop fitting to prevent time limit error
#if counter == 3 : break
#print('\nLigthGBM VAL AUC Score: {}'.format(roc_auc_score(y_train, lgb_train_result)))
#print('\nXGBoost VAL AUC Score: {}'.format(roc_auc_score(y_train, xgb_train_result)))
submission = pd.read_csv('../input/sample_submission.csv')
submission['HasDetections'] = lgb_test_result / counter
submission.to_csv('lgb_submission.csv', index=False)
#submission['HasDetections'] = xgb_test_result / counter
#submission.to_csv('xgb_submission.csv', index=False)
#submission['HasDetections'] = 0.5 * lgb_test_result / counter + 0.5 * xgb_test_result / counter
##submission.to_csv('lgb_xgb_submission.csv', index=False)
print('\nDone.')