一些常用的pytorch代码段
class PCA:
#input_shape:[data_nums, data_dimension]
#output_shape:[data_nums, output_dim]
def __init__(self,output_dim) -> None:
self.output_dim = output_dim
def fit(self,X_data):
N = len(X_data)
H = torch.eye(n=N)-1/N*(torch.matmul(torch.ones(size=(N,1)),torch.ones(size=(1,N))))
X_data = torch.matmul(H,X_data)
_,_,v = torch.svd(X_data)
self.base = v[:,:self.output_dim]
def fit_transform(self,X_data):
self.fit(X_data)
return self.transform(X_data)
def transform(self,X_data):
return torch.matmul(X_data,self.base)
def inverse_transform(self,X_data):
return torch.matmul(X_data,self.base.T)
#测试集需要使用训练集的转换矩阵进行降维
pca = PCA(4) #定义降低到的维度
pca.fit(X_train)
X_train = torch.tensor(np.array(X_train)) #转换为Tensor
X_train = torch.reshape(X_train,[X_train.shape[0],X_train.shape[2]*X_train.shape[3]]) #变换维度([2000,784])
X_train = pca.fit_transform(X_train) #实现降维
X_test = pca.transform(X_test)
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def load_mnist():
#define the directory where mnist.npz is(Please watch the '\'!)
path = r'.//dataset//mnist.npz'
f = np.load(path)
x_train, y_train = f['x_train'],f['y_train']
x_test, y_test = f['x_test'],f['y_test']
f.close()
return (x_train, y_train), (x_test, y_test)
def filter_36(x, y): #y=3为true,y=6为false
keep = (y == 3) | (y == 6)
x, y = x[keep], y[keep]
y = y == 3
return x,y
def get_mnist():
(x_train, y_train), (x_test, y_test) = load_mnist()
x_train, y_train = filter_36(x_train, y_train)
x_test, y_test = filter_36(x_test, y_test)
x_train = np.reshape(x_train,(x_train.shape[0],x_train.shape[1] * x_train.shape[2]))
x_test = np.reshape(x_test,(x_test.shape[0],x_test.shape[1]*x_test.shape[2]))
# #数据标准化
sc = StandardScaler().fit(x_train) #计算均值和方差
sc1 = StandardScaler().fit(x_test)
X_std_train = sc.transform(x_train)
X_std_test = sc1.transform(x_test)
pca_x_train = PCA(6).fit_transform(X_std_train)
pca_x_test = PCA(6).fit_transform(X_std_test)
x_train = np.arctan(pca_x_train)
x_test = np.arctan(pca_x_test)
#标签从True,False转为1,0(3,1)(6,0)
y_train = y_train + 0
y_test = y_test + 0
# print(pca_x_test[0])
# print(x_test[0])
return x_train,y_train,x_test,
(1) BCELoss()
输出维度为1,一般在网络的最后一层需要Sigmoid()
target的取值为{0,1},训练数据y维度为[batch,1],类型为float,如y_train = torch.tensor(np.array(y_train)).float()
criterion = nn.BCELoss()
#预测值取值范围[0,1]
preds = outputs >= 0.5
batch_corrects = torch.sum(preds == labels.data).item()
(2) BCEWithLogitsLoss()
自动在网络输出后面加上Sigmoid(),然后计算cross_entropy
criterion = nn.BCEWithLogitsLoss()
#预测值取值范围[0,1],如何求预测值待查询资料
(1) CrossEntropyLoss()
输出维度与待分类类别有关,分n类输出维度为n,自动在输出作用softmax()函数。训练数据y维度为[batch,],类型为Long,如y_train = torch.tensor(np.array(y_train)).long()
criterion = nn.CrossEntropyLoss()
_, preds = torch.max(outputs, 1)
batch_corrects = torch.sum(preds == labels.data).item()