python归一化sklearn_关于sklearn中的归一化,标准化以及返回inverse_transform()的详细例子,注释清晰...

import torch

from sklearn import preprocessing

import numpy as np

X = np.array([[ 1., -1., 2.],[ 2., 0., 0.],[ 0., 1., -1.]])

print(X)

# The transformation is given by::

# X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))

# X_scaled = X_std * (max - min) + min

# where min, max = feature_range.

# The transformation is calculated as::

# X_scaled = scale * X + min - X.min(axis=0) * scale

# where scale = (max - min) / (X.max(axis=0) - X.min(axis=0))

# 归一化(任意范围)

scaler1= preprocessing.MinMaxScaler(feature_range=(-1, 1)).fit(X)#归一化到(-1,1),scaler1保存最大最小值,便于回到原来参数

print(scaler1)

X_scaled1 = scaler1.transform(X)#下次归一化,按scaler1归一化到(-1,1)

print("归一化X_scaled1:{}".format(X_scaled1))

X1=scaler1.inverse_transform(X_scaled1)#按scaler1回到原来参数

print(X1)

print("fuck"*30)

# 标准化(标准高斯分布)

scaler2 = preprocessing.StandardScaler().fit(X)#标准化到,scaler2保存方差和均值

print(scaler2)

X_scaled2 = preprocessing.scale(X)#下次标准化,按scaler2标准化到(mean_=0,var_=1)

print("标准化X_scaled2:{}".format(X_scaled2))

X2=scaler2.inverse_transform(X_scaled2)#按scaler2回到原来参数

print(X2)

print("*"*100)

print("scaler2.var_:{}".format(scaler2.var_))

print("scaler2.mean_:{}".format(scaler2.mean_))

print("&"*100)

print("scaler1.data_min_:{}".format(scaler1.data_min_))

print("scaler1.data_max_:{}".format(scaler1.data_max_))

效果:

黑色背景可能更好点:

原文链接:https://blog.csdn.net/john_ashley/article/details/106913774

你可能感兴趣的:(python归一化sklearn_关于sklearn中的归一化,标准化以及返回inverse_transform()的详细例子,注释清晰...)