nn.Module
类的 call 函数forward
函数来完成前向计算autograd
自动生成反向传播所需的 backward
函数),并计算导数将梯度反向传播给网络的参数import
# 配置GPU,这里有两种方式
## 方案一:进行整体环境配置,使用os.environ,可以指定多块。当前程序下读取`CUDA_VISIBLE_DEVICES`使用那一块GPU显卡
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2'
## 方案二:使用“device”,后续对要使用GPU的变量用.to(device)即可,cuda:1指的是第二块卡
###`torch.cuda.is_available()`是true表示有GPU即显卡
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
# 配置其他超参数,如batch_size, num_workers, learning rate, 以及总的epochs
batch_size = 256
num_workers = 4 # 对于Windows用户,这里应设置为0,否则会出现多线程错误
lr = 1e-4
epochs = 20
PyTorch数据读入是通过Dataset+DataLoader的方式完成的,Dataset定义好数据的格式和数据变换形式,DataLoader用iterative的方式不断读入批次数据。
还需要data_transform对数据进行必要的变换,比如说需要将图片统一为一致的大小,以便后续能够输入网络训练;需要将数据格式转为Tensor类,等等,这些变换可以很方便地借助torchvision的transforms包来完成。
#首先设置数据变换
from torchvision import transforms
image_size = 28
#其中“data_transform”可以对图像进行一定的变换,如翻转、裁剪等操作,可自己定义
data_transform = transforms.Compose([
#这一步取决于后续的数据读取方式,如果使用内置数据集读取方式则不需要
transforms.ToPILImage(),
transforms.Resize(image_size),
transforms.ToTensor()
])
##读入csv格式的数据,自行构建Dataset类
#csv是一个表格,一行是一个图片。第一列是lable,类别,后面的列为图片像素(0-255),从左上到右下的顺序
#csv数据下载链接:https://www.kaggle.com/zalando-research/fashionmnist
#继承dataset子类,创建自己的数据集
class FMDataset(Dataset):
#需要输入一个dataframe
def __init__(self, df, transform=None):
self.df = df
self.transform = transform
#iloc是pandas的操作
self.images = df.iloc[:,1:].values.astype(np.uint8)
self.labels = df.iloc[:, 0].values
def __len__(self):
return len(self.images)
#他的写法决定了dataset如何构建,即提取一个图片的信息
def __getitem__(self, idx):
#32*32变成28*28,1为自己增添的通道(灰色图)
image = self.images[idx].reshape(28,28,1)
label = int(self.labels[idx])
if self.transform is not None:
image = self.transform(image)
else:
image = torch.tensor(image/255., dtype=torch.float)
label = torch.tensor(label, dtype=torch.long)
return image, label
train_df = pd.read_csv("./FashionMNIST/fashion-mnist_train.csv")
test_df = pd.read_csv("./FashionMNIST/fashion-mnist_test.csv")
#实例化dataset的类
train_data = FMDataset(train_df, data_transform)
test_data = FMDataset(test_df, data_transform)
import torch
from torchvision import datasets
train_data = datasets.ImageFolder(train_path, transform=data_transform)
val_data = datasets.ImageFolder(val_path, transform=data_transform)
这里使用了PyTorch自带的ImageFolder类的用于读取按一定结构存储的图片数据(path对应图片存放的目录,目录下包含若干子目录,每个子目录对应属于同一个类的图片)。
#其中图片存放在一个文件夹,另外有一个csv文件给出了图片名称对应的标签,txt文件的路径包含训练/验证集的图像名称
class MyDataset(Dataset):
def __init__(self, data_dir, info_csv, image_list, transform=None):
"""
参数:
data_dir:图像目录的路径。
info_csv:包含图像索引的 csv 文件的路径,带有相应的标签。
image_list:txt文件的路径包含训练/验证集的图像名称
transform:要应用于样本的可选变换。
"""
label_info = pd.read_csv(info_csv)
image_file = open(image_list).readlines()
self.data_dir = data_dir
self.image_file = image_file
self.label_info = label_info
self.transform = transform
def __getitem__(self, index):
"""
参数:
index:项目的索引
回报:
图像及其标签
"""
image_name = self.image_file[index].strip('\n')
raw_label = self.label_info.loc[self.label_info['Image_index'] == image_name]
label = raw_label.iloc[:,0]
image_name = os.path.join(self.data_dir, image_name)
image = Image.open(image_name).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, label
def __len__(self):
return len(self.image_file)
train_data = MyDset(data_dir, info_csv, image_list, transform=data_transform)
test_data = MyDset(data_dir, info_csv, image_list, transform=data_transform)
#train_data实际上是dataset类,要传入train_loader
#训练时最后一个batch可能加不满,训练会产生较大影响,所以训练时`drop_last=True`
train_loader = DataLoader(train_data, batch_size=batch_size,shuffle=True, num_workers=num_workers, drop_last=True)
test_loader = DataLoader(test_data, batch_size=batch_size,shuffle=False, num_workers=num_workers)
读入后,我们可以做一些数据可视化操作,主要是验证我们读入的数据是否正确
import matplotlib.pyplot as plt
image, label = next(iter(train_loader))
print(image.shape, label.shape)
plt.imshow(image[0][0], cmap="gray")
torch.Size([256, 1, 28, 28])
torch.Size([256])
<matplotlib.image.AxesImage at 0x7f19a043cc10>
from torch.utils.data import DataLoader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=4, shuffle=True, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, num_workers=4, shuffle=False)
这里可以看一下我们的加载的数据。PyTorch中的DataLoader的读取可以使用next和iter来完成
import matplotlib.pyplot as plt
images, labels = next(iter(val_loader))
print(images.shape)
plt.imshow(images[0].transpose(1,2,0))
plt.show()
下载并使用PyTorch提供的内置数据集.只适用于常见的数据集,如MNIST,CIFAR10等,PyTorch官方提供了数据下载。这种方式往往适用于快速测试方法(比如测试下某个idea在MNIST数据集上是否有效。
#首先设置数据变换
from torchvision import transforms
image_size = 28
data_transform = transforms.Compose([
transforms.ToPILImage(),
])
#使用torchvision自带数据集,下载可能需要一段时间
from torchvision import datasets
train_data = datasets.FashionMNIST(root='./', train=True, download=True, transform=data_transform)
test_data = datasets.FashionMNIST(root='./', train=False, download=True, transform=data_transform)
/data1/ljq/anaconda3/envs/smp/lib/python3.8/site-packages/torchvision/datasets/mnist.py:498: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /opt/conda/conda-bld/pytorch_1623448234945/work/torch/csrc/utils/tensor_numpy.cpp:180.)
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
#train_data实际上是dataset类,要传入train_loader
#训练时最后一个batch可能加不满,训练会产生较大影响,所以训练时`drop_last=True`
train_loader = DataLoader(train_data, batch_size=batch_size,shuffle=True, num_workers=num_workers, drop_last=True)
test_loader = DataLoader(test_data, batch_size=batch_size,shuffle=False, num_workers=num_workers)
读入后,我们可以做一些数据可视化操作,主要是验证我们读入的数据是否正确
import matplotlib.pyplot as plt
image, label = next(iter(train_loader))
print(image.shape, label.shape)
plt.imshow(image[0][0], cmap="gray")
torch.Size([256, 1, 28, 28])
torch.Size([256])
<matplotlib.image.AxesImage at 0x7f19a043cc10>
我们可以使用torch.nn
包来构建神经网络。nn
包则依赖于autograd
包来定义模型并对它们求导。一个nn.Module
包含各个层和一个forward(input)
方法,该方法返回output
注意:torch.nn
只支持小批量处理 (mini-batches)。整个 torch.nn
包只支持小批量样本的输入,不支持单个样本的输入。
比如,nn.Conv2d
接受一个4维的张量,即nSamples x nChannels x Height x Width
如果是一个单独的样本,只需要使用input.unsqueeze(0)
来添加一个“假的”批大小维度。
- torch.Tensor
- 一个多维数组,支持诸如backward()
等的自动求导操作,同时也保存了张量的梯度。
- nn.Module
- 神经网络模块。是一种方便封装参数的方式,具有将参数移动到GPU、导出、加载等功能。
- nn.Parameter
- 张量的一种,当它作为一个属性分配给一个Module
时,它会被自动注册为一个参数。
- autograd.Function
- 实现了自动求导前向和反向传播的定义,每个Tensor
至少创建一个Function
节点,该节点连接到创建Tensor
的函数并对其历史进行编码。
自定义含模型参数的自定义层。其中的模型参数可以通过训练学出。
Parameter 类其实是 Tensor 的子类。如果一 个 Tensor 是 Parameter 那么它会自动被添加到模型的参数列表里。
所以自定义含模型参数的层时,我们应该将参数定义成 Parameter
除了直接定义成 Parameter 类外,还可以使⽤ ParameterList 和 ParameterDict 分别定义参数的列表和字典。
class MyListDense(nn.Module):
def __init__(self):
super(MyListDense, self).__init__()
self.params = nn.ParameterList([nn.Parameter(torch.randn(4, 4)) for i in range(3)])
self.params.append(nn.Parameter(torch.randn(4, 1)))
def forward(self, x):
for i in range(len(self.params)):
x = torch.mm(x, self.params[i])
return x
net = MyListDense()
print(net)
class MyDictDense(nn.Module):
def __init__(self):
super(MyDictDense, self).__init__()
self.params = nn.ParameterDict({
'linear1': nn.Parameter(torch.randn(4, 4)),
'linear2': nn.Parameter(torch.randn(4, 1))
})
# 新增
self.params.update({'linear3': nn.Parameter(torch.randn(4, 2))})
def forward(self, x, choice='linear1'):
return torch.mm(x, self.params[choice])
net = MyDictDense()
print(net)
一个模型的可学习参数可以通过net.parameters()
返回
params = list(net.parameters())
print(len(params))
print(params[0].size()) # conv1的权重
10
torch.Size([6, 1, 5, 5])
二维卷积层将输入和卷积核做互相关运算,并加上一个标量偏差来得到输出
卷积层的模型参数包括了卷积核和标量偏差
在训练模型的时候,通常我们先对卷积核随机初始化,然后不断迭代卷积核和偏差
import torch
from torch import nn
# 卷积运算(二维互相关)
def corr2d(X, K):
h, w = K.shape
X, K = X.float(), K.float()
Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i: i + h, j: j + w] * K).sum()
return Y
# 二维卷积层
class Conv2D(nn.Module):
def __init__(self, kernel_size):
super(Conv2D, self).__init__()
self.weight = nn.Parameter(torch.randn(kernel_size))
self.bias = nn.Parameter(torch.randn(1))
def forward(self, x):
return corr2d(x, self.weight) + self.bias
卷积窗口形状为 p × q p \times q p×q 的卷积层称为 p × q p \times q p×q 卷积层。同样, p × q p \times q p×q 卷积或 p × q p \times q p×q 卷积核说明卷积核的高和宽分别为 p p p 和 q q q。
填充(padding)是指在输⼊高和宽的两侧填充元素(通常是0元素),** 填充增加输出的高和宽 **
在二维互相关运算中,卷积窗口从输入数组的最左上方开始,按从左往右、从上往下 的顺序,依次在输⼊数组上滑动。我们将每次滑动的行数和列数称为步幅(stride),** 步幅可以减小输出的高和宽 **
池化层直接计算池化窗口内元素的最大值(最大池化)或者平均值(平均池化)
下面把池化层的前向计算实现在pool2d
函数里。
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 输入图像channel:1;输出channel:6;5x5卷积核
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# 2x2 Max pooling
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# 如果是方阵,则可以只使用一个数字进行定义
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # 除去批处理维度的其他所有维度
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
Net(
(conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(fc1): Linear(in_features=400, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)
让我们尝试一个随机的 32x32 的输入。注意:这个网络 (LeNet)的期待输入是 32x32 的张量。如果使用 MNIST 数据集来训练这个网络,要把图片大小重新调整到 32x32。
input = torch.randn(1, 1, 32, 32)
out = net(input)
print(out)
清零所有参数的梯度缓存,然后进行随机梯度的反向传播:
net.zero_grad()
out.backward(torch.randn(1, 10))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 32, 5),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Dropout(0.3),
nn.Conv2d(32, 64, 5),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
nn.Dropout(0.3)
)
self.fc = nn.Sequential(
nn.Linear(64*4*4, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.conv(x)
x = x.view(-1, 64*4*4)
x = self.fc(x)
# x = nn.functional.normalize(x)
return x
model = Net()
model = model.cuda()
# model = nn.DataParallel(model).cuda() # 多卡训练时的写法,之后的课程中会进一步讲解
通过访问torch.nn.init的官方文档链接 ,我们发现torch.nn.init
提供了以下初始化方法:
1 . torch.nn.init.uniform_
(tensor, a=0.0, b=1.0)
2 . torch.nn.init.normal_
(tensor, mean=0.0, std=1.0)
3 . torch.nn.init.constant_
(tensor, val)
4 . torch.nn.init.ones_
(tensor)
5 . torch.nn.init.zeros_
(tensor)
6 . torch.nn.init.eye_
(tensor)
7 . torch.nn.init.dirac_
(tensor, groups=1)
8 . torch.nn.init.xavier_uniform_
(tensor, gain=1.0)
9 . torch.nn.init.xavier_normal_
(tensor, gain=1.0)
10 . torch.nn.init.kaiming_uniform_
(tensor, a=0, mode=‘fan__in’, nonlinearity=‘leaky_relu’)
11 . torch.nn.init.kaiming_normal_
(tensor, a=0, mode=‘fan_in’, nonlinearity=‘leaky_relu’)
12 . torch.nn.init.orthogonal_
(tensor, gain=1)
13 . torch.nn.init.sparse_
(tensor, sparsity, std=0.01)
14 . torch.nn.init.calculate_gain
(nonlinearity, param=None)
# 对conv进行kaiming初始化
torch.nn.init.kaiming_normal_(conv.weight.data)
conv.weight.data
# 对linear进行常数初始化
torch.nn.init.constant_(linear.weight.data,0.3)
linear.weight.data
tensor([[[[ 0.3249, -0.0500, 0.6703],
[-0.3561, 0.0946, 0.4380],
[-0.9426, 0.9116, 0.4374]]],
[[[ 0.6727, 0.9885, 0.1635],
[ 0.7218, -1.2841, -0.2970],
[-0.9128, -0.1134, -0.3846]]],
[[[ 0.2018, 0.4668, -0.0937],
[-0.2701, -0.3073, 0.6686],
[-0.3269, -0.0094, 0.3246]]]])
tensor([[0.3000, 0.3000, 0.3000, 0.3000, 0.3000, 0.3000, 0.3000, 0.3000, 0.3000,0.3000]])
人们常常将各种初始化方法定义为一个initialize_weights()
的函数并在模型初始后进行使用。
def initialize_weights(self):
for m in self.modules():
# 判断是否属于Conv2d
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight.data)
# 判断是否有偏置
if m.bias is not None:
torch.nn.init.constant_(m.bias.data,0.3)
elif isinstance(m, nn.Linear):
torch.nn.init.normal_(m.weight.data, 0.1)
if m.bias is not None:
torch.nn.init.zeros_(m.bias.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zeros_()
这段代码流程是遍历当前模型的每一层,然后判断各层属于什么类型,然后根据不同类型层,设定不同的权值初始化方法。我们可以通过下面的例程进行一个简短的演示:
# 模型的定义
class MLP(nn.Module):
# 声明带有模型参数的层,这里声明了两个全连接层
def __init__(self, **kwargs):
# 调用MLP父类Block的构造函数来进行必要的初始化。这样在构造实例时还可以指定其他函数
super(MLP, self).__init__(**kwargs)
self.hidden = nn.Conv2d(1,1,3)
self.act = nn.ReLU()
self.output = nn.Linear(10,1)
# 定义模型的前向计算,即如何根据输入x计算返回所需要的模型输出
def forward(self, x):
o = self.act(self.hidden(x))
return self.output(o)
mlp = MLP()
print(list(mlp.parameters()))
print("-------初始化-------")
initialize_weights(mlp)
print(list(mlp.parameters()))
[Parameter containing:
tensor([[[[ 0.2103, -0.1679, 0.1757],
[-0.0647, -0.0136, -0.0410],
[ 0.1371, -0.1738, -0.0850]]]], requires_grad=True), Parameter containing:
tensor([0.2507], requires_grad=True), Parameter containing:
tensor([[ 0.2790, -0.1247, 0.2762, 0.1149, -0.2121, -0.3022, -0.1859, 0.2983,
-0.0757, -0.2868]], requires_grad=True), Parameter containing:
tensor([-0.0905], requires_grad=True)]
"-------初始化-------"
[Parameter containing:
tensor([[[[-0.3196, -0.0204, -0.5784],
[ 0.2660, 0.2242, -0.4198],
[-0.0952, 0.6033, -0.8108]]]], requires_grad=True),
Parameter containing:
tensor([0.3000], requires_grad=True),
Parameter containing:
tensor([[ 0.7542, 0.5796, 2.2963, -0.1814, -0.9627, 1.9044, 0.4763, 1.2077,
0.8583, 1.9494]], requires_grad=True),
Parameter containing:
tensor([0.], requires_grad=True)]
使用torch.nn模块自带的CrossEntropy损失
PyTorch会自动把整数型的label转为one-hot型,用于计算CE loss
这里需要确保label是从0开始的,同时模型不加softmax层(使用logits计算),这也说明了PyTorch训练中各个部分不是独立的,需要通盘考虑
criterion = nn.CrossEntropyLoss()
#如果对第四个权重训练的不满意,就可以惩罚三份。weight是指损失的权重
#criterion = nn.CrossEntropyLoss(weight=[1,1,1,1,3,1,1,1,1,1])
Pytorch很人性化的给我们提供了一个优化器的库torch.optim,在这里面提供了十种优化器。
Optimizer
的定义如下:class Optimizer(object):
def __init__(self, params, defaults):
self.defaults = defaults
self.state = defaultdict(dict)
self.param_groups = []
defaults
:存储的是优化器的超参数,例子如下:{'lr': 0.1, 'momentum': 0.9, 'dampening': 0, 'weight_decay': 0, 'nesterov': False}
state
:参数的缓存,例子如下:defaultdict(<class 'dict'>, {tensor([[ 0.3864, -0.0131],
[-0.1911, -0.4511]], requires_grad=True): {'momentum_buffer': tensor([[0.0052, 0.0052],
[0.0052, 0.0052]])}})
param_groups
:管理的参数组,是一个list,其中每个元素是一个字典,顺序是params,lr,momentum,dampening,weight_decay,nesterov,例子如下:[{'params': [tensor([[-0.1022, -1.6890],[-1.5116, -1.7846]], requires_grad=True)], 'lr': 1, 'momentum': 0, 'dampening': 0, 'weight_decay': 0, 'nesterov': False}]
zero_grad()
:清空所管理参数的梯度,PyTorch的特性是张量的梯度不自动清零,因此每次反向传播后都需要清空梯度。step()
:执行一步梯度更新,参数更新add_param_group()
:添加参数组load_state_dict()
:加载状态参数字典,可以用来进行模型的断点续训练,继续上次的参数进行训练state_dict()
:获取优化器当前状态信息字典optimizer = optim.Adam(model.parameters(), lr=0.001)
各自封装成函数,方便后续调用
model.train() # 训练状态
model.eval() # 验证/测试状态
train:1读取,2转换,3梯度清零,4输入,5计算损失,6反向传播,7参数更新
test/valid:1读取,2转换,4输入,5计算损失,8计算指标
for data, label in train_loader:
data, label = data.cuda(), label.cuda()
optimizer.zero_grad()
output = model(data)
loss = criterion(output, label)
loss.backward()
optimizer.step()
def train(epoch):
model.train()
train_loss = 0
for data, label in train_loader:
data, label = data.cuda(), label.cuda()
optimizer.zero_grad()
output = model(data)
loss = criterion(label, output)
loss.backward()
optimizer.step()
train_loss += loss.item()*data.size(0)
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, train_loss))
def val(epoch):
model.eval()
val_loss = 0
with torch.no_grad():
for data, label in val_loader:
data, label = data.cuda(), label.cuda()
output = model(data)
preds = torch.argmax(output, 1)
loss = criterion(output, label)
val_loss += loss.item()*data.size(0)
running_accu += torch.sum(preds == label.data)
val_loss = val_loss/len(val_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, val_loss))
def train(epoch):
model.train()
train_loss = 0
#for i, (data,lable) in enumerate(train_loader):
#上面的i就是指的batch,对每个batch进行操作要这样写
for data, label in train_loader:
data, label = data.cuda(), label.cuda()
#优化器梯度清0放在反向传播之前即可
optimizer.zero_grad()
output = model(data)
loss = criterion(output, label)
loss.backward()
#更新权重
optimizer.step()
train_loss += loss.item()*data.size(0)
#len即为dataloader中的__len__函数
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, train_loss))
def val(epoch):
model.eval()
val_loss = 0
gt_labels = []
pred_labels = []
#不进行梯度更新
with torch.no_grad():
for data, label in test_loader:
#上面已经model.cuda(),所以数据也要放上去
data, label = data.cuda(), label.cuda()
#无optimizer.zero_grad()
output = model(data)
#提取输出(one-hot向量)实际预测的类别,即概率最大
#batch多大preds多大
preds = torch.argmax(output, 1)
gt_labels.append(label.cpu().data.numpy())
pred_labels.append(preds.cpu().data.numpy())
loss = criterion(output, label)
#无loss.backward()
#无optimizer.step()
val_loss += loss.item()*data.size(0)
val_loss = val_loss/len(test_loader.dataset)
#把每个图片的标签拼接成大张量
gt_labels, pred_labels = np.concatenate(gt_labels), np.concatenate(pred_labels)
#算准确率
acc = np.sum(gt_labels==pred_labels)/len(pred_labels)
print('Epoch: {} \tValidation Loss: {:.6f}, Accuracy: {:6f}'.format(epoch, val_loss, acc))
#一个epoch给train,一个epoch给valid
for epoch in range(1, epochs+1):
train(epoch)
val(epoch)
/data1/ljq/anaconda3/envs/smp/lib/python3.8/site-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /opt/conda/conda-bld/pytorch_1623448234945/work/c10/core/TensorImpl.h:1156.)
return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
Epoch: 1 Training Loss: 0.659050
Epoch: 1 Validation Loss: 0.420328, Accuracy: 0.852000
Epoch: 2 Training Loss: 0.403703
Epoch: 2 Validation Loss: 0.350373, Accuracy: 0.872300
Epoch: 3 Training Loss: 0.350197
Epoch: 3 Validation Loss: 0.293053, Accuracy: 0.893200
Epoch: 4 Training Loss: 0.322463
Epoch: 4 Validation Loss: 0.283335, Accuracy: 0.892300
Epoch: 5 Training Loss: 0.300117
Epoch: 5 Validation Loss: 0.268653, Accuracy: 0.903500
Epoch: 6 Training Loss: 0.282179
Epoch: 6 Validation Loss: 0.247219, Accuracy: 0.907200
Epoch: 7 Training Loss: 0.268283
Epoch: 7 Validation Loss: 0.242937, Accuracy: 0.907800
Epoch: 8 Training Loss: 0.257615
Epoch: 8 Validation Loss: 0.234324, Accuracy: 0.912200
Epoch: 9 Training Loss: 0.245795
Epoch: 9 Validation Loss: 0.231515, Accuracy: 0.914100
Epoch: 10 Training Loss: 0.238739
Epoch: 10 Validation Loss: 0.229616, Accuracy: 0.914400
Epoch: 11 Training Loss: 0.230499
Epoch: 11 Validation Loss: 0.228124, Accuracy: 0.915200
Epoch: 12 Training Loss: 0.221574
Epoch: 12 Validation Loss: 0.211928, Accuracy: 0.921200
Epoch: 13 Training Loss: 0.217924
Epoch: 13 Validation Loss: 0.209744, Accuracy: 0.921700
Epoch: 14 Training Loss: 0.206033
Epoch: 14 Validation Loss: 0.215477, Accuracy: 0.921400
Epoch: 15 Training Loss: 0.203349
Epoch: 15 Validation Loss: 0.215550, Accuracy: 0.919400
Epoch: 16 Training Loss: 0.196319
Epoch: 16 Validation Loss: 0.210800, Accuracy: 0.923700
Epoch: 17 Training Loss: 0.191969
Epoch: 17 Validation Loss: 0.207266, Accuracy: 0.923700
Epoch: 18 Training Loss: 0.185466
Epoch: 18 Validation Loss: 0.207138, Accuracy: 0.924200
Epoch: 19 Training Loss: 0.178241
Epoch: 19 Validation Loss: 0.204093, Accuracy: 0.924900
Epoch: 20 Training Loss: 0.176674
Epoch: 20 Validation Loss: 0.197495, Accuracy: 0.928300
#测试集acc上升,训练集loss下降,表示训练还没有结束(epoch不够)
训练完成后,可以使用torch.save保存模型参数或者整个模型,也可以在训练过程中保存模型
save_path = "./FahionModel.pkl"
torch.save(model, save_path)
gpu_info=!nvdia-smi -i 0
gpu_info=`\n`.join(gpu_info)
#查看自己的环境占用了多少neicun
print(gpu_info)
STUffT&夏日回音