# pandas dataFrame[col].value_counts() 类似
# Get number of unique entries in each column with categorical data
object_nunique = list(map(lambda col: X_train[col].nunique(), object_cols))
d = dict(zip(object_cols, object_nunique))
# Print number of unique entries by column, in ascending order
sorted(d.items(), key=lambda x: x[1])
哑变量处理,指定要处理的特征,以及列前缀
for fea in feature_cols:
df = pd.get_dummies(df,columns=[fea],prefix=fea)
如下格式尽量避免使用,因为在jion的时候有bug
embark_dummies= pd.get_dummies(train_data['Embarked'])
#get_dummies()该列有多少种可能值就用多少列表示,一列代表一种可能值,与独热编码相似
train_data = train_data.join(embark_dummies)
train_data.drop(['Embarked'],axis=1,inplace=True)
embark_dummies = train_data[['S','C','Q']]
embark_dummies.head()
categorical_features = 'all'
,这个参数指定了对哪些特征进行编码,默认对所有类别都进行编码。也可以自己指定选择哪些特征,通过索引或者 bool 值来指定,看下例:# -*- coding: utf-8 -*-
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(categorical_features = [0,2]) # 等价于 [True, False, True]
enc.fit([[0, 0, 3],
[1, 1, 0],
[0, 2, 1],
[1, 0, 2]])
ans = enc.transform([[0, 2, 3]]).toarray()
#等频切割变量
def bin_frequency(x,y,n=10): # x为待分箱的变量,y为target变量.n为分箱数量
total = y.count() #1 计算总样本数
bad = y.sum() #2 计算坏样本数
good = total-bad #3 计算好样本数
if x.value_counts().shape[0]==2: #4 如果该变量值是0和1则只分两组
d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,2)})
else:
d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.qcut(x,n,duplicates='drop')}) #5 用pd.cut实现等频分箱
d2 = d1.groupby('bucket',as_index=True) #6 按照分箱结果进行分组聚合
d3 = pd.DataFrame(d2.x.min(),columns=['min_bin'])
d3['min_bin'] = d2.x.min() #7 箱体的左边界
d3['max_bin'] = d2.x.max() #8 箱体的右边界
d3['bad'] = d2.y.sum() #9 每个箱体中坏样本的数量
d3['total'] = d2.y.count() #10 每个箱体的总样本数
d3['bad_rate'] = d3['bad']/d3['total'] #11 每个箱体中坏样本所占总样本数的比例
d3['badattr'] = d3['bad']/bad #12 每个箱体中坏样本所占坏样本总数的比例
d3['goodattr'] = (d3['total'] - d3['bad'])/good #13 每个箱体中好样本所占好样本总数的比例
d3['WOEi'] = np.log(d3['badattr']/d3['goodattr']) #14 计算每个箱体的woe值
IV = ((d3['badattr']-d3['goodattr'])*d3['WOEi']).sum() #15 计算变量的iv值
d3['IVi'] = (d3['badattr']-d3['goodattr'])*d3['WOEi'] #16 计算IV
d4 = (d3.sort_values(by='min_bin')).reset_index(drop=True) #17 对箱体从大到小进行排序
cut = []
cut.append(float('-inf'))
for i in d4.min_bin:
cut.append(i)
cut.append(float('inf'))
WOEi = list(d4['WOEi'].round(3))
return IV,cut,WOEi,d4
columns_iv = [
'7天内申请人在多个平台申请借款',
'1个月内申请人在多个平台申请借款',
'3个月内申请人在多个平台申请借款',
'7天内关联P2P网贷平台数',
'1个月内关联P2P网贷平台数',
'3个月内关联P2P网贷平台数',
'X3个月内申请人手机号作为第二联系人手机号出现的次数',
'X3个月内申请人手机号作为前三联系人手机号出现的次数'
]
ivs=[]
for i in columns_iv:
print(i)
IV,cut,WOEi,d4 = bin_frequency(df[i], df['flag'])
print('IV=', IV)
ivs.append(IV)
print(d4)
#自定义切割变量
def bin_cut(x,y,cut): # x为待分箱的变量,y为target变量.cut为分箱的切割点
total = y.count() # 计算总样本数
bad = y.sum() # 计算坏样本数
good = total-bad # 计算好样本数
bucket = cut
if x.value_counts().shape[0]==2:
d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,2)})
else:
d1 = pd.DataFrame({'x':x,'y':y,'bucket':pd.cut(x,cut)}) # 用pd.cut实现分箱
d2 = d1.groupby('bucket',as_index=True) # 按照分箱结果进行分组聚合
d3 = pd.DataFrame(d2.x.min(),columns=['min_bin'])
d3['min_bin'] = d2.x.min() # 箱体的左边界
d3['max_bin'] = d2.x.max() # 箱体的右边界
d3['bad'] = d2.y.sum() # 每个箱体中坏样本的数量
d3['total'] = d2.y.count() # 每个箱体的总样本数
d3['bad_rate'] = d3['bad']/d3['total'] # 每个箱体中坏样本所占总样本数的比例
d3['badattr'] = d3['bad']/bad # 每个箱体中坏样本所占坏样本总数的比例
d3['goodattr'] = (d3['total'] - d3['bad'])/good # 每个箱体中好样本所占好样本总数的比例
d3['WOEi'] = np.log(d3['badattr']/d3['goodattr']) # 计算每个箱体的woe值
IV = ((d3['badattr']-d3['goodattr'])*d3['WOEi']).sum() # 计算变量的iv值
d3['IVi'] = (d3['badattr']-d3['goodattr'])*d3['WOEi']
d4 = (d3.sort_values(by='min_bin')).reset_index(drop=True) # 对箱体从大到小进行排序
cut = []
cut.append(float('-inf'))
for i in d4.min_bin:
cut.append(i)
cut.append(float('inf'))
WOEi = list(d4['WOEi'].round(3))
return IV,cut,WOEi,d4
IV,cut,WOEi,d4 = bin_cut(df['3个月内关联P2P网贷平台数'], df['y'],[-1,0,1,4,40])
参考:https://blog.csdn.net/qq_32532663/article/details/104583491?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-1.add_param_isCf&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-1.add_param_isCf