Reference:
Pytorch官方文档——SAVE AND LOAD THE MODEL
Pytorch官方文档——OPTIMIZING MODEL PARAMETERS
以上是Pytorch官方的文档,本文主要对其进行翻译整理,并加入一些自己的理解,仅作日后复习查阅所用。
搭建完模型load完数据后,便需要对模型的参数进行优化,即训练这一过程。
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor()
)
train_dataloader = DataLoader(training_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork()
定义如下训练时的超参数:Epoch数(迭代循环次数)、Batch Size、Learning Rate学习率
一个Epoch包含两个步骤:1、训练过程,训练修改网络参数使结果向预期结果收敛;2、验证(Validation)过程,输入测试数据,检验网络的performance。
损失函数衡量网络的表现状况(通过比较预期输出与实际网络输出)。
Optimization是每个epoch中修改模型参数从而减少损失函数error的过程。训练过程中的优化主要分为三个步骤:1、运行optimizer.zero_grad()函数将模型参数的梯度置零;2、运行 loss.backwards()函数反向传播loss;3、有梯度后,运行optimizer.step()对参数进行修改。
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
# Compute prediction and loss
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, model, loss_fn)
print("Done!")
import torch
import torchvision.models as models
#加载训练好的模型,并将参数存入内部字典
model = models.vgg16(pretrained=True)
torch.save(model.state_dict(), 'model_weights.pth')
#加载模型,并load模型参数
model = models.vgg16() # we do not specify pretrained=True, i.e. do not load default weights
model.load_state_dict(torch.load('model_weights.pth'))
model.eval()
#若想存参数和模型结构
torch.save(model, 'model.pth')
#加载模型和参数
model = torch.load('model.pth')