import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.preprocessing import Normalizer
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.decomposition import PCA
#添加graphviz环境变量
import os
#os.environ["Path"]+=os.pathsep+"F:\graphviz\bin"
os.environ["PATH"]+=os.pathsep+"F:/graphviz/bin"
import pydotplus
#sl:satisfaction_level——False表示MinMaxScaler归一化
#npr:number_project——False表示MinMaxScaler归一化,True表示StandardScaler标准化
#amh:average_monthly_hours——False表示MinMaxScaler归一化,True表示StandardScaler标准化
#tsc:time_spend_company——False表示MinMaxScaler归一化,True表示StandardScaler标准化
#wa:Work_accident——False表示MinMaxScaler归一化,True表示StandardScaler标准化
#pl5:promotion_last_5years——False表示MinMaxScaler归一化,True表示StandardScaler标准化
#dp:department——False:LabelEncoding,True:OneHotEncoding
#slr:salary——False:LabelEncoding,True:OneHotEncoding
#lower_d:是否降维——False:不降维,True降维
#ld_n:指定降为几维
def hr_preprocessing(sl=False,le=False,npr=False,amh=False,tsc=False,wa=False,pl5=False,dp=False,slr=False,lower_d=False,ld_n=1):
df=pd.read_csv("HR.csv") #读入数据
#1.清洗数据
df=df.dropna(subset=["satisfaction_level","last_evaluation"])
df=df[df["satisfaction_level"]<=1][df["salary"]!="nme"]
#2.得到标注
label=df["left"]
df=df.drop("left",axis=1) #axis=1以列删除
#3.特征选择
#4.特征处理:标准化,归一化
scaler_lst=[sl,le,npr,amh,tsc,wa,pl5]
column_lst=["satisfaction_level","last_evaluation","number_project",\
"average_monthly_hours","time_spend_company","Work_accident",\
"promotion_last_5years"]
for i in range(len(scaler_lst)):
if not scaler_lst[i]:
df[column_lst[i]]=\
MinMaxScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1)).reshape(1,-1)[0]
#reshape(-1,1)列,reshape(1,-1)[0]二维向量取第零个
else:
df[column_lst[i]]=\
StandardScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1)).reshape(1,-1)[0]
scaler_lst=[slr,dp]
column_lst=["salary","department"]
for i in range(len(scaler_lst)):
if not scaler_lst[i]:
if column_lst[i]=="salary":
df[column_lst[i]]=[map_salary(s) for s in df["salary"].values]
else:
df[column_lst[i]]=LabelEncoder().fit_transform(df[column_lst[i]])
df[column_lst[i]]=MinMaxScaler().fit_transform(df[column_lst[i]].values.reshape(-1,1)).reshape(1,-1)[0]#再归一化或标准化也可不进行
else:
df=pd.get_dummies(df,columns=[column_lst[i]]) #pandas提供的独热
if lower_d:
#return LinearDiscriminantAnalysis(n_components=ld_n)
#LDA这里的n_components不能大于标注的类的个数,有限值,改用无限制的PCA方法
return PCA(n_components=ld_n).fit_transform(df.values),label
return df,label
#由于LabelEncoder会默认按英文字母排序,为了让low为0,medium为1,high为2,需自定义
d=dict([("low",0),("medium",1),("high",2)])
def map_salary(s):
return d.get(s,0) #如果没有找到就返回0,默认是低收入人群
def hr_modeling(features,label):
#切分训练集和验证集(测试集)
from sklearn.model_selection import train_test_split
f_v=features.values #原先的数据是DataFrame,装换为数值,得到特征值
f_names=features.columns.values #得到特征名称
l_v=label.values
x_tt,x_validation,y_tt,y_validation=train_test_split(f_v,l_v,test_size=0.2)
#将训练集再切分为训练集和测试集
x_train,x_test,y_train,y_test=train_test_split(x_tt,y_tt,test_size=0.25)
#引入评价指标
from sklearn.metrics import accuracy_score,recall_score,f1_score
#引入分类器
from sklearn.neighbors import NearestNeighbors,KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB,BernoulliNB#高斯贝叶斯,伯努利贝叶斯(适合离散变量)
from sklearn.tree import DecisionTreeClassifier,export_graphviz
from sklearn.externals.six import StringIO
from sklearn.svm import SVC #从SVM中引入SVC分类器
from sklearn.ensemble import RandomForestClassifier #随机森林 bagging集成方法
from sklearn.ensemble import AdaBoostClassifier #AdaBoost boosting集成方法
models=[]
#models.append(("KNN",KNeighborsClassifier(n_neighbors=3)))
#models.append(("GaussianNB",GaussianNB()))
#models.append(("BernoulliNB",BernoulliNB()))
models.append(("DecisionTreeGini",DecisionTreeClassifier())) #默认是基尼系数
#models.append(("DecisionTreeGini",DecisionTreeClassifier(min_impurity_split=0.1))) #可以填参数来剪枝
#models.append(("DecisionTreeEntropy",DecisionTreeClassifier(criterion="entropy")))#信息增益
models.append(("SVM Classifier",SVC(C=1000))) #可以通过参数C来控制精度,C越大要求精度越高
models.append(("RandomForest",RandomForestClassifier())) #可以添加参数调整
models.append(("Adaboost",AdaBoostClassifier()))
for clf_name,clf in models:
clf.fit(x_train,y_train)
xy_lst=[(x_train,y_train),(x_validation,y_validation),(x_test,y_test)]
for i in range(len(xy_lst)):
x_part=xy_lst[i][0] #为遍历中的第0部分
y_part=xy_lst[i][1] #为遍历中的第1部分
y_pred=clf.predict(x_part)
print(i) #i是下标,0表示训练集,1表示验证集,2表示测试集
print(clf_name,"ACC:",accuracy_score(y_part,y_pred))
print(clf_name,"REC:",recall_score(y_part,y_pred))
print(clf_name,"F-score:",f1_score(y_part,y_pred))
#针对决策树模型,用pydotplus生成决策树图pdf格式。
#dot_data=export_graphviz(clf,out_file=None,feature_names=f_names,
# class_names=["NL","L"],filled=True,rounded=True,special_characters=True)
#out_file=None表示结果会直接输出,特征名,分类名,图形效果
#graph=pydotplus.graph_from_dot_data(dot_data)
#graph.write_pdf("dt_tree.pdf")
#调用
def main():
features,label=hr_preprocessing() #默认是False,也可以改为True
hr_modeling(features,label)
if __name__=="__main__":
main()