2019-02-04

import pandasas pd

import matplotlib.pyplotas plt

import numpyas np

data= pd.read_csv("creditcard.csv")

a=pd.value_counts(data["Class"])

count_classes= pd.value_counts(data['Class'], sort = True).sort_index()

from sklearn.preprocessingimport StandardScaler

# 1、StandardScaler就是z-score方法

# 将原始数据归一化为均值为0,方差为1的数据集 并将之存储到Amount列

data['normAmount'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1))

#  删除数据中Time  Amount 列

# 删除没用的两列数据,得到一个新的数据集

data= data.drop(['Time','Amount'],axis=1)

# 先对数据进行切分

X= data.ix[:, data.columns!= 'Class']

y= data.ix[:, data.columns== 'Class']

# 随机下采样

# 筛选出class为1的数据总数,并取得其索引值

# Number of data points in the minority class

# 统计异常值得个数

number_records_fraud= len(data[data.Class== 1])

# 统计欺诈样本的下标,并变成矩阵的格式:

fraud_indices= np.array(data[data.Class== 1].index)

# Picking the indices of the normal classes

# 记录正常值的下标:

# 把class为0的数据索引拿到手

normal_indices= data[data.Class== 0].index

# Out of the indices we picked, randomly select "x" number (number_records_fraud)

# 从normal_indices中抽取number_records_fraud

# 从正常值的索引中,选择和异常值相等个数的样本,保证样本的均衡:

# np.random.choice(a,size, replace, p):在a中以概率p随机选择size个数据,replace是指是否有放回;

random_normal_indices= np.random.choice(normal_indices, number_records_fraud, replace = False)

# 将数据转换成数组:

# 转换成numpy的array格式

random_normal_indices= np.array(random_normal_indices)

# Appending the 2 indices

# fraud_indices:欺诈样本的下标;random_normal_indices:正常值数组;

# concatenate:数据库的拼接;axis=1:按照对应行的数据进行拼接;

# 将两组索引数据连接成性的数据索引

under_sample_indices= np.concatenate([fraud_indices,random_normal_indices])

# Under sample dataset

# loc["a","b"]:表示第a行,第b列;

# iloc[1,1]:按照行列来索引,左式为第二行第二列;

# 获取下标所在行的所有列,即得到训练所需要的数据集:

# 下采样数据集

# 定位到真正的数据

under_sample_data= data.iloc[under_sample_indices,:]

# 将数据集按照class列进行分类

# 切分出下采样数据的特征和标签

X_undersample= under_sample_data.ix[:, under_sample_data.columns!= 'Class']

y_undersample= under_sample_data.ix[:, under_sample_data.columns== 'Class']

# Showing ratio

# 展示下比例

# 计算正负比例为0.5

print("Percentage of normal transactions: ", len(under_sample_data[under_sample_data.Class== 0])/len(under_sample_data))

print("Percentage of fraud transactions: ", len(under_sample_data[under_sample_data.Class== 1])/len(under_sample_data))

print("Total number of transactions in resampled data: ", len(under_sample_data))

# 导入交叉验证模块的数据切分

from sklearn.model_selectionimport train_test_split

# Whole dataset

# 交叉验证

# 随机划分训练集和测试集:x为除了class之外的其他的值,y为最终的结果列;

# test_size:样本占比;

# 从原始集中获取到训练集与测试集:

# train_test_split:x,y按照test_size的尺寸随机提取数据,然后划分到四个数据集中

# 对全部数据集进行切分,注意使用相同的随机策略

X_train, X_test, y_train, y_test= train_test_split(X,y,test_size = 0.3, random_state = 0)

print("Number transactions train dataset: ", len(X_train))

print("Number transactions test dataset: ", len(X_test))

print("Total number of transactions: ", len(X_train)+len(X_test))

# Undersampled dataset

# 数据平衡之后的数据中获取到训练集与测试集:

# 对下采样数据集进行切分

X_train_undersample, X_test_undersample, y_train_undersample, y_test_undersample= train_test_split(X_undersample

,y_undersample

,test_size = 0.3

                                                                                                  ,random_state = 0)

print("")

print("Number transactions train dataset: ", len(X_train_undersample))

print("Number transactions test dataset: ", len(X_test_undersample))

print("Total number of transactions: ", len(X_train_undersample)+len(X_test_undersample))

你可能感兴趣的:(2019-02-04)