目录
一、前期准备
1.设置GPU
2.导入数据
二、构建简单的CNN网络
三、 训练模型
1.设置超参数
2. 编写训练函数
3. 编写测试函数
4. 设置动态学习率
5.正式训练
四、 结果可视化
1. Loss与Accuracy图
2. 指定图片进行预测
五、动态学习率
1. torch.optim.lr_scheduler.StepLR
2. lr_scheduler.LambdaLR
3. lr_scheduler.MultiStepLR
*六、拔高
1.模型训练-设置超参数-调整为Adam优化器
2.在1的基础上,模型训练-动态学习率调整
3.在1的基础上,模型训练-等间隔动态调整法
本文为[365天深度学习训练营]中的学习记录博客
参考文章:[Pytorch实战 | 第P5周:运动鞋识别]
原作者:[K同学啊|接辅导、项目定制]
要求:
拔高(可选):
- 语言环境:Python3.8
- 编译器:Pycharm
- 深度学习环境:Pytorch
- 数据集:K同学啊的百度网盘、和鲸
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from torchvision import transforms, datasets
import os, PIL, pathlib
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
输出:cuda
数据集解压后,有test和train图片集,里面都分别是Adidas和Nike。
data_dir = './46-data/'
data_dir = pathlib.Path(data_dir)
data_paths = list(data_dir.glob('*'))
classeNames = [str(path).split("\\")[1] for path in data_paths]
print(classeNames)
输出:['test', 'train']
图形变换,输出一下:用到torchvision.transforms.Compose()
类,有兴趣的同学可以参考这篇博客:torchvision.transforms.Compose()详解【Pytorch手册】
train_transforms = transforms.Compose([
transforms.Resize([224, 224]), # 将输入图片resize成统一尺寸
# transforms.RandomHorizontalFlip(), # 随机水平翻转
transforms.ToTensor(), # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
transforms.Normalize( # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]) # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])
test_transform = transforms.Compose([
transforms.Resize([224, 224]), # 将输入图片resize成统一尺寸
transforms.ToTensor(), # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
transforms.Normalize( # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]) # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])
train_dataset = datasets.ImageFolder("./46-data/train/",transform=train_transforms)
test_dataset = datasets.ImageFolder("./46-data/test/",transform=train_transforms)
print(train_dataset.class_to_idx)
{'adidas': 0, 'nike': 1}
batch_size = 32
train_dl = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0) //线程懒得调了...直接为0
test_dl = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
for X, y in test_dl:
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
Shape of X [N, C, H, W]: torch.Size([32, 3, 224, 224])
Shape of y: torch.Size([32]) torch.int64
和week4卷积层池化层设置基本一样
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 12, kernel_size=5, padding=0), #3,12为输入输出通道数量 12*220*220
nn.BatchNorm2d(12),
nn.ReLU())
self.conv2 = nn.Sequential(
nn.Conv2d(12, 12, kernel_size=5, padding=0), # 12*216*216
nn.BatchNorm2d(12),
nn.ReLU())
self.pool3 = nn.Sequential(
nn.MaxPool2d(2)) # 12*108*108
self.conv4 = nn.Sequential(
nn.Conv2d(12, 24, kernel_size=5, padding=0), # 24*104*104
nn.BatchNorm2d(24),
nn.ReLU())
self.conv5 = nn.Sequential(
nn.Conv2d(24, 24, kernel_size=5, padding=0), # 24*100*100
nn.BatchNorm2d(24),
nn.ReLU())
self.pool6 = nn.Sequential(
nn.MaxPool2d(2)) # 24*50*50
self.dropout = nn.Sequential(
nn.Dropout(0.2))
self.fc = nn.Sequential(
nn.Linear(24 * 50 * 50, len(classeNames)))
def forward(self, x):
batch_size = x.size(0)
x = self.conv1(x) # 卷积-BN-激活
x = self.conv2(x) # 卷积-BN-激活
x = self.pool3(x) # 池化
x = self.conv4(x) # 卷积-BN-激活
x = self.conv5(x) # 卷积-BN-激活
x = self.pool6(x) # 池化
x = self.dropout(x)
x = x.view(batch_size, -1) # flatten 变成全连接网络需要的输入 (batch, 24*50*50) ==> (batch, -1), -1 此处自动算出的是24*50*50
x = self.fc(x)
return x
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
model = Model().to(device)
print(model)
这里不同于前几周,采用动态学习率(还用SGD)
learn_rate = 1e-4 # 初始学习率
optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate)
loss_fn = nn.CrossEntropyLoss() # 创建损失函数
同week4
# 训练循环
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset) # 训练集的大小
num_batches = len(dataloader) # 批次数目, (size/batch_size,向上取整)
train_loss, train_acc = 0, 0 # 初始化训练损失和正确率
for X, y in dataloader: # 获取图片及其标签
X, y = X.to(device), y.to(device)
# 计算预测误差
pred = model(X) # 网络输出
loss = loss_fn(pred, y) # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失
# 反向传播
optimizer.zero_grad() # grad属性归零
loss.backward() # 反向传播
optimizer.step() # 每一步自动更新
# 记录acc与loss
train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()
train_loss += loss.item()
train_acc /= size
train_loss /= num_batches
return train_acc, train_loss
训练函数和测试函数差别不大,但是由于不进行梯度下降对网络权重进行更新,所以不用优化器
(所以测试函数代码部分和week1-4一样)
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset) # 测试集的大小
num_batches = len(dataloader) # 批次数目
test_loss, test_acc = 0, 0
# 当不进行训练时,停止梯度更新,节省计算内存消耗
with torch.no_grad():
for imgs, target in dataloader:
imgs, target = imgs.to(device), target.to(device)
# 计算loss
target_pred = model(imgs)
loss = loss_fn(target_pred, target)
test_loss += loss.item()
test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()
test_acc /= size
test_loss /= num_batches
return test_acc, test_loss
def adjust_learning_rate(optimizer, epoch, start_lr):
# 每2轮epoch衰减到原来的 0.92
lr = start_lr * (0.92 ** (epoch // 2))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
也可以调用官方的动态学习率的接口(效果和上面一样,调用时使用)
# 调用官方动态学习率接口时使用
lambda1 = lambda epoch: (0.92 ** (epoch // 2)
optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1) #选定调整方法
总体同前几周,就是循环开头部分因为自定义了学习率,要实时更新学习率
epochs = 40
train_loss = []
train_acc = []
test_loss = []
test_acc = []
for epoch in range(epochs):
# 更新学习率(使用自定义学习率时使用)
adjust_learning_rate(optimizer, epoch, learn_rate)
model.train()
epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, optimizer)
# scheduler.step() # 更新学习率(调用官方动态学习率接口时使用)
model.eval()
epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
train_acc.append(epoch_train_acc)
train_loss.append(epoch_train_loss)
test_acc.append(epoch_test_acc)
test_loss.append(epoch_test_loss)
# 获取当前的学习率
lr = optimizer.state_dict()['param_groups'][0]['lr']
template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')
print(template.format(epoch+1, epoch_train_acc*100, epoch_train_loss,
epoch_test_acc*100, epoch_test_loss, lr))
print('Done')
也可以加上保存模型的代码
# 模型保存
PATH = './model.pth' # 保存的参数文件名
torch.save(model.state_dict(), PATH)
# 将参数加载到model当中
model.load_state_dict(torch.load(PATH, map_location=device))
风扇呼呼转,准确率78%不到,顺便保持了下模型
import matplotlib.pyplot as plt
#隐藏警告
import warnings
warnings.filterwarnings("ignore") #忽略警告信息
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.rcParams['figure.dpi'] = 100 #分辨率
epochs_range = range(epochs)
plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
因为上面已经保存模型了,这里调用即可
⭐torch.squeeze()详解 :对数据的维度进行压缩,去掉维数为1的的维度
torch.squeeze(input, dim=None, *, out=None)
⭐torch.unsqueeze():对数据维度进行扩充。给指定位置加上维数为一的维度
torch.unsqueeze(input, dim)
week4中有对这两种函数的详解+例子,这里不再赘述
from PIL import Image
classes = list(train_dataset.class_to_idx)
def predict_one_image(image_path, model, transform, classes):
test_img = Image.open(image_path).convert('RGB')
#plt.imshow(test_img) # 展示预测的图片
test_img = transform(test_img)
img = test_img.to(device).unsqueeze(0) # (0表示,在第一个位置增加维度)
model.eval()
output = model(img)
_, pred = torch.max(output, 1)
pred_class = classes[pred]
print(f'预测结果是:{pred_class}')
# 预测训练集中的某张照片
predict_one_image(image_path='./46-data/test/adidas/9.jpg',
model=model,
transform=train_transforms,
classes=classes)
等间隔动态调整方法,每经过step_size个epoch,做一次学习率decay,以gamma值为缩小倍数。
torch.optim.lr_scheduler.StepLR(optimizer, step_size, gamma=0.1, last_epoch=-1)
- optimizer(Optimizer):是之前定义好的需要优化的优化器的实例名
- step_size(int):是学习率衰减的周期,每经过每个epoch,做一次学习率decay
- gamma(float):学习率衰减的乘法因子。Default:0.1
用法示例:
optimizer = torch.optim.SGD(net.parameters(), lr=0.001 )
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
根据自己定义的函数更新学习率。
torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1, verbose=False)
- optimizer(Optimizer):是之前定义好的需要优化的优化器的实例名
- lr_lambda(function):更新学习率的函数
用法示例:
lambda1 = lambda epoch: (0.92 ** (epoch // 2) # 第二组参数的调整方法
optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1) #选定调整方法
在特定的 epoch 中调整学习率
torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=0.1, last_epoch=-1, verbose=False)
- optimizer(Optimizer):是之前定义好的需要优化的优化器的实例名
- milestones(list):是一个关于epoch数值的list,表示在达到哪个epoch范围内开始变化,必须是升序排列
- gamma(float):学习率衰减的乘法因子。Default:0.1
用法示例:
optimizer = torch.optim.SGD(net.parameters(), lr=0.001 )
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[2,6,15], #调整学习率的epoch数
gamma=0.1)
更多的官方动态学习率设置方式可参考:torch.optim — PyTorch 1.13 documentation
调用官方接口示例:
model = [Parameter(torch.randn(2, 2, requires_grad=True))]
optimizer = SGD(model, 0.1)
scheduler = ExponentialLR(optimizer, gamma=0.9)
for epoch in range(20):
for input, target in dataset:
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
scheduler.step()
我们的测试集准确率才78%不到,达不到我们的要求,所以要对模型进行修改
根据week4的经验,Adam优化器可以实现学习率动态的变化。
optimizer = torch.optim.Adam(model.parameters(), lr=learn_rate)
原先是每2轮epoch学习率衰减到原先的92%,我们调整到98%
def adjust_learning_rate(optimizer, epoch, start_lr):
# 每 2 个epoch衰减到原来的 0.98
lr = start_lr * (0.98 ** (epoch // 2))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
但最终准确率反而下降到82%左右...这里把轮数由 每两轮 改为 每十轮 再试一次
def adjust_learning_rate(optimizer, epoch, start_lr):
# 每 2 个epoch衰减到原来的 0.98
lr = start_lr * (0.98 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
然后准确率84%左右....可能是我调的不太对,这里就先不用这个方法了
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
但准确率为80%左右...剩下几种方法大家可以自行测试