关注微信公共号:小程在线
关注CSDN博客:程志伟的博客
Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
Type "copyright", "credits" or "license" for more information.
IPython 7.6.1 -- An enhanced Interactive Python.
当数据(x)按照最小值中心化后,再按极差(最大值 - 最小值)缩放,数据移动了最小值个单位,
并且会被收敛到[0,1]之间,而这个过程,就叫做数据归一化
preprocessing.MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
data
Out[1]: [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
import pandas as pd
pd.DataFrame(data)
Out[2]:
0 1
0 -1.0 2
1 -0.5 6
2 0.0 10
3 1.0 18
scaler = MinMaxScaler() #实例化
scaler = scaler.fit(data) #fit,在这里本质是生成min(x)和max(x)
result = scaler.transform(data) #通过接口导出结果
result
Out[3]:
array([[0. , 0. ],
[0.25, 0.25],
[0.5 , 0.5 ],
[1. , 1. ]])
#训练和导出结果一步达成
result_ = scaler.fit_transform(data)
#将归一化后的结果逆转
scaler.inverse_transform(result)
Out[4]:
array([[-1. , 2. ],
[-0.5, 6. ],
[ 0. , 10. ],
[ 1. , 18. ]])
#使用MinMaxScaler的参数feature_range实现将数据归一化到[0,1]以外的范围中
data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
scaler = MinMaxScaler(feature_range=[5,10]) #依然实例化
result = scaler.fit_transform(data) #fit_transform一步导出结果
result
Out[5]:
array([[ 5. , 5. ],
[ 6.25, 6.25],
[ 7.5 , 7.5 ],
[10. , 10. ]])
import numpy as np
X = np.array([[-1, 2], [-0.5, 6], [0, 10], [1, 18]])
#归一化
X_nor = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_nor
Out[6]:
array([[0. , 0. ],
[0.25, 0.25],
[0.5 , 0.5 ],
[1. , 1. ]])
X_returned = X_nor * (X.max(axis=0) - X.min(axis=0)) + X.min(axis=0)
X_returned
Out[7]:
array([[-1. , 2. ],
[-0.5, 6. ],
[ 0. , 10. ],
[ 1. , 18. ]])
当数据(x)按均值(μ)中心化后,再按标准差(σ)缩放,数据就会服从为均值为0,方差为1的正态分布(即标准正态分
布),而这个过程,就叫做数据标准化
preprocessing.StandardScaler
from sklearn.preprocessing import StandardScaler
data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
data
Out[8]: [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
scaler = StandardScaler() #实例化
scaler.fit(data) #fit,本质是生成均值和方差
Out[10]: StandardScaler(copy=True, with_mean=True, with_std=True)
scaler.mean_ #查看均值的属性mean_
Out[11]: array([-0.125, 9. ])
scaler.var_ #查看方差的属性var_
Out[12]: array([ 0.546875, 35. ])
x_std = scaler.transform(data) #通过接口导出结果
x_std
Out[13]:
array([[-1.18321596, -1.18321596],
[-0.50709255, -0.50709255],
[ 0.16903085, 0.16903085],
[ 1.52127766, 1.52127766]])
x_std.mean() #导出的结果是一个数组,用mean()查看均值
Out[14]: 0.0
x_std.std() #用std()查看方差
Out[15]: 1.0
scaler.fit_transform(data) #使用fit_transform(data)一步达成结果
Out[16]:
array([[-1.18321596, -1.18321596],
[-0.50709255, -0.50709255],
[ 0.16903085, 0.16903085],
[ 1.52127766, 1.52127766]])
scaler.inverse_transform(x_std) #使用inverse_transform逆转标准化
Out[17]:
array([[-1. , 2. ],
[-0.5, 6. ],
[ 0. , 10. ],
[ 1. , 18. ]])
################# 缺失值 ###################
impute.SimpleImputer
import pandas as pd
data = pd.read_csv(r"H:\程志伟\python\菜菜的机器学习skleaen课堂\数据预处理和特征工程 - 数据\Narrativedata.csv",index_col=0)
data.head()
Out[18]:
Age Sex Embarked Survived
0 22.0 male S No
1 38.0 female C Yes
2 26.0 female S Yes
3 35.0 female S Yes
4 35.0 male S No
data.info()
Int64Index: 891 entries, 0 to 890
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Age 714 non-null float64
1 Sex 891 non-null object
2 Embarked 889 non-null object
3 Survived 891 non-null object
dtypes: float64(1), object(3)
memory usage: 34.8+ KB
#填补年龄
#sklearn当中特征矩阵必须是二维,reshape(-1,1)将数据升为二维数据
Age = data.loc[:,"Age"].values.reshape(-1,1)
Age[:10]
Out[20]:
array([[22.],
[38.],
[26.],
[35.],
[35.],
[nan],
[54.],
[ 2.],
[27.],
[14.]])
from sklearn.impute import SimpleImputer
imp_mean = SimpleImputer() #实例化,默认均值填补
imp_median = SimpleImputer(strategy="median") #用中位数填补
imp_0 = SimpleImputer(strategy="constant",fill_value=0) #用0填补
imp_mean = imp_mean.fit_transform(Age) #fit_transform一步完成调取结果
imp_median = imp_median.fit_transform(Age)
imp_0 = imp_0.fit_transform(Age)
imp_mean[:10]
Out[23]:
array([[22. ],
[38. ],
[26. ],
[35. ],
[35. ],
[29.69911765],
[54. ],
[ 2. ],
[27. ],
[14. ]])
#在这里我们使用中位数填补Age
imp_median[:10]
Out[24]:
array([[22.],
[38.],
[26.],
[35.],
[35.],
[28.],
[54.],
[ 2.],
[27.],
[14.]])
imp_0[:10]
Out[25]:
array([[22.],
[38.],
[26.],
[35.],
[35.],
[ 0.],
[54.],
[ 2.],
[27.],
[14.]])
#使用众数填补Embarked
data.loc[:,"Age"] = imp_median
data.info()
Int64Index: 891 entries, 0 to 890
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Age 891 non-null float64
1 Sex 891 non-null object
2 Embarked 889 non-null object
3 Survived 891 non-null object
dtypes: float64(1), object(3)
memory usage: 34.8+ KB
Embarked = data.loc[:,"Embarked"].values.reshape(-1,1)
imp_mode = SimpleImputer(strategy = "most_frequent")
data.loc[:,"Embarked"] = imp_mode.fit_transform(Embarked)
data.info()
Int64Index: 891 entries, 0 to 890
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Age 891 non-null float64
1 Sex 891 non-null object
2 Embarked 891 non-null object
3 Survived 891 non-null object
dtypes: float64(1), object(3)
memory usage: 34.8+ KB
BONUS:用Pandas和Numpy进行填补其实更加简单
import pandas as pd
data_ = pd.read_csv(r"H:\程志伟\python\菜菜的机器学习skleaen课堂\数据预处理和特征工程 - 数据\Narrativedata.csv",index_col=0)
data_.head()
data_.loc[:,"Age"] = data_.loc[:,"Age"].fillna(data_.loc[:,"Age"].median())
#.fillna 在DataFrame里面直接进行填补
data_.dropna(axis=0,inplace=True)
#.dropna(axis=0)删除所有有缺失值的行,.dropna(axis=1)删除所有有缺失值的列
#参数inplace,为True表示在原数据集上进行修改,为False表示生成一个复制对象,不修改原数据,默认False
data_.info()
Int64Index: 889 entries, 0 to 890
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Age 889 non-null float64
1 Sex 889 non-null object
2 Embarked 889 non-null object
3 Survived 889 non-null object
dtypes: float64(1), object(3)
memory usage: 34.7+ KB
########## 处理分类型特征:编码与哑变量 ###############
#preprocessing.LabelEncoder:标签专用,能够将分类转换为分类数值
from sklearn.preprocessing import LabelEncoder
y = data.iloc[:,-1] #要输入的是标签,不是特征矩阵,所以允许一维
le = LabelEncoder() #实例化
le = le.fit(y) #导入数据
label = le.transform(y) #transform接口调取结果
le.classes_ #属性.classes_查看标签中究竟有多少类别
Out[34]: array(['No', 'Unknown', 'Yes'], dtype=object)
#label #查看获取的结果label
#le.fit_transform(y) #也可以直接fit_transform一步到位
#le.inverse_transform(label) #使用inverse_transform可以逆转
data.iloc[:,-1] = label #让标签等于我们运行出来的结果
data.head()
Out[35]:
Age Sex Embarked Survived
0 22.0 male S 0
1 38.0 female C 2
2 26.0 female S 2
3 35.0 female S 2
4 35.0 male S 0
#还可以这么写:
from sklearn.preprocessing import LabelEncoder
data.iloc[:,-1] = LabelEncoder().fit_transform(data.iloc[:,-1])
#preprocessing.OrdinalEncoder:特征专用,能够将分类特征转换为分类数值
from sklearn.preprocessing import OrdinalEncoder
#接口categories_对应LabelEncoder的接口classes_,一模一样的功能
data_ = data.copy()
data_.head()
Out[37]:
Age Sex Embarked Survived
0 22.0 male S 0
1 38.0 female C 2
2 26.0 female S 2
3 35.0 female S 2
4 35.0 male S 0
OrdinalEncoder().fit(data_.iloc[:,1:-1]).categories_
Out[38]: [array(['female', 'male'], dtype=object), array(['C', 'Q', 'S'], dtype=object)]
data_.iloc[:,1:-1] = OrdinalEncoder().fit_transform(data_.iloc[:,1:-1])
data_.head()
Out[39]:
Age Sex Embarked Survived
0 22.0 1.0 2.0 0
1 38.0 0.0 0.0 2
2 26.0 0.0 2.0 2
3 35.0 0.0 2.0 2
4 35.0 1.0 2.0 0
#preprocessing.OneHotEncoder:独热编码,创建哑变量
data.head()
from sklearn.preprocessing import OneHotEncoder
X = data.iloc[:,1:-1]
enc = OneHotEncoder(categories='auto').fit(X)
result = enc.transform(X).toarray()
result
Out[40]:
array([[0., 1., 0., 0., 1.],
[1., 0., 1., 0., 0.],
[1., 0., 0., 0., 1.],
...,
[1., 0., 0., 0., 1.],
[0., 1., 1., 0., 0.],
[0., 1., 0., 1., 0.]])
OneHotEncoder(categories='auto').fit_transform(X).toarray()
Out[41]:
array([[0., 1., 0., 0., 1.],
[1., 0., 1., 0., 0.],
[1., 0., 0., 0., 1.],
...,
[1., 0., 0., 0., 1.],
[0., 1., 1., 0., 0.],
[0., 1., 0., 1., 0.]])
pd.DataFrame(enc.inverse_transform(result))
enc.get_feature_names()
result
Out[42]:
array([[0., 1., 0., 0., 1.],
[1., 0., 1., 0., 0.],
[1., 0., 0., 0., 1.],
...,
[1., 0., 0., 0., 1.],
[0., 1., 1., 0., 0.],
[0., 1., 0., 1., 0.]])
result.shape
Out[43]: (891, 5)
#axis=1,表示跨行进行合并,也就是将量表左右相连,如果是axis=0,就是将量表上下相连
newdata = pd.concat([data,pd.DataFrame(result)],axis=1)
newdata.head()
Out[44]:
Age Sex Embarked Survived 0 1 2 3 4
0 22.0 male S 0 0.0 1.0 0.0 0.0 1.0
1 38.0 female C 2 1.0 0.0 1.0 0.0 0.0
2 26.0 female S 2 1.0 0.0 0.0 0.0 1.0
3 35.0 female S 2 1.0 0.0 0.0 0.0 1.0
4 35.0 male S 0 0.0 1.0 0.0 0.0 1.0
newdata.drop(["Sex","Embarked"],axis=1,inplace=True)
newdata.columns =["Age","Survived","Female","Male","Embarked_C","Embarked_Q","Embarked_S"]
newdata.head()
Out[45]:
Age Survived Female Male Embarked_C Embarked_Q Embarked_S
0 22.0 0 0.0 1.0 0.0 0.0 1.0
1 38.0 2 1.0 0.0 1.0 0.0 0.0
2 26.0 2 1.0 0.0 0.0 0.0 1.0
3 35.0 2 1.0 0.0 0.0 0.0 1.0
4 35.0 0 0.0 1.0 0.0 0.0 1.0
####################处理连续型特征:二值化与分段###############
data_2 = data.copy()
from sklearn.preprocessing import Binarizer
X = data_2.iloc[:,0].values.reshape(-1,1) #类为特征专用,所以不能使用一维数组
transformer = Binarizer(threshold=30).fit_transform(X)
data_2.iloc[:,0] = transformer
data_2.head()
Out[48]:
Age Sex Embarked Survived
0 0.0 male S 0
1 1.0 female C 2
2 0.0 female S 2
3 1.0 female S 2
4 1.0 male S 0
from sklearn.preprocessing import KBinsDiscretizer
X = data.iloc[:,0].values.reshape(-1,1)
est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
est.fit_transform(X)
#查看转换后分的箱:变成了一列中的三箱
set(est.fit_transform(X).ravel())
Out[49]: {0.0, 1.0, 2.0}
est = KBinsDiscretizer(n_bins=3, encode='onehot', strategy='uniform')
#查看转换后分的箱:变成了哑变量
est.fit_transform(X).toarray()
Out[50]:
array([[1., 0., 0.],
[0., 1., 0.],
[1., 0., 0.],
...,
[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])