本文为365天深度学习训练营 中的学习记录博客
参考文章:Pytorch实战 | 第P6周:好莱坞明星识别
原作者:K同学啊|接辅导、项目定制
本期博客主要学习动态学习率的学习以及调用VGG16网络框架。
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from torchvision import transforms, datasets
import os, PIL, pathlib, warnings
warnings.filterwarnings("ignore")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
device(type='cuda')
data_dir = 'E:\深度学习\data\Day15'
data_dir = pathlib.Path(data_dir) # 转换为pathlib对象
data_paths = list(data_dir.glob('*')) # 获取所有子目录
classeNames = [str(path).split('\\')[4] for path in data_paths] # 获取所有子目录的名称
classeNames
['Angelina Jolie',
'Brad Pitt',
'Denzel Washington',
'Hugh Jackman',
'Jennifer Lawrence',
'Johnny Depp',
'Kate Winslet',
'Leonardo DiCaprio',
'Megan Fox',
'Natalie Portman',
'Nicole Kidman',
'Robert Downey Jr',
'Sandra Bullock',
'Scarlett Johansson',
'Tom Cruise',
'Tom Hanks',
'Will Smith']
data_transforms = transforms.Compose([ # 数据预处理
transforms.Resize([224, 224]), # 缩放图片(Image),保持长宽比不变,最短边为224像素
# transforms.CenterCrop(224),
transforms.ToTensor(), # 将图片(Image)转成Tensor,归一化至[0, 1]
transforms.Normalize(mean=[0.485, 0.456, 0.406], # 标准化至[-1, 1],规定均值和标准差
std=[0.229, 0.224, 0.225]) # [0.485, 0.456, 0.406]是ImageNet数据集的均值,[0.229, 0.224, 0.225]是ImageNet数据集的标准差
])
total_data = datasets.ImageFolder(data_dir, transform=data_transforms)
total_data
Dataset ImageFolder
Number of datapoints: 1800
Root location: E:\深度学习\data\Day15
StandardTransform
Transform: Compose(
Resize(size=[224, 224], interpolation=bilinear, max_size=None, antialias=None)
ToTensor()
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
)
total_data.class_to_idx # 类别索引
{'Angelina Jolie': 0,
'Brad Pitt': 1,
'Denzel Washington': 2,
'Hugh Jackman': 3,
'Jennifer Lawrence': 4,
'Johnny Depp': 5,
'Kate Winslet': 6,
'Leonardo DiCaprio': 7,
'Megan Fox': 8,
'Natalie Portman': 9,
'Nicole Kidman': 10,
'Robert Downey Jr': 11,
'Sandra Bullock': 12,
'Scarlett Johansson': 13,
'Tom Cruise': 14,
'Tom Hanks': 15,
'Will Smith': 16}
total_data.transform # 用于训练的数据预处理
Compose(
Resize(size=[224, 224], interpolation=bilinear, max_size=None, antialias=None)
ToTensor()
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
)
train_size = int(0.8 * len(total_data)) # 训练集大小
val_size = len(total_data) - train_size # 验证集大小
train_data, val_data = torch.utils.data.random_split(total_data, [train_size, val_size]) # 随机划分训练集和验证集
train_data, val_data
(<torch.utils.data.dataset.Subset at 0x2a9f997a7c0>,
<torch.utils.data.dataset.Subset at 0x2a9f997a070>)
batch_size = 32 # 批次大小
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=1) # 训练集
val_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, shuffle=True, num_workers=1) # 验证集
for X, y in train_loader: # 获取一个batch的数据
print('X:', X.shape, 'type:', X.dtype) # X为图片数据,y为标签
print('y:', y.shape, 'type:', y.dtype)
break
X: torch.Size([32, 3, 224, 224]) type: torch.float32
y: torch.Size([32]) type: torch.int64
from torchvision.models import vgg16 # 加载预训练模型
device = 'cuda' if torch.cuda.is_available() else 'cpu' # 判断是否有GPU
print("Using {} device".format(device)) # 输出使用的设备
model = vgg16(pretrained=True) # 加载预训练模型
for param in model.parameters(): # 冻结参数
param.requires_grad = False # 不需要计算梯度
model.classifier[6] = nn.Linear(4096, len(classeNames)) # 修改最后一层全连接层
model
VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace=True)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace=True)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace=True)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace=True)
(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): ReLU(inplace=True)
(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace=True)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace=True)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): ReLU(inplace=True)
(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): ReLU(inplace=True)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace=True)
(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace=True)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=17, bias=True)
)
)
# 训练循环
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset) # 训练集的大小
num_batches = len(dataloader) # 批次数目, (size/batch_size,向上取整)
train_loss, train_acc = 0, 0 # 初始化训练损失和正确率
for X, y in dataloader: # 获取图片及其标签
X, y = X.to(device), y.to(device)
# 计算预测误差
pred = model(X) # 网络输出
loss = loss_fn(pred, y) # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失
# 反向传播
optimizer.zero_grad() # grad属性归零
loss.backward() # 反向传播
optimizer.step() # 每一步自动更新
# 记录acc与loss
train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()
train_loss += loss.item()
train_acc /= size
train_loss /= num_batches
return train_acc, train_loss
def test (dataloader, model, loss_fn):
size = len(dataloader.dataset) # 测试集的大小
num_batches = len(dataloader) # 批次数目, (size/batch_size,向上取整)
test_loss, test_acc = 0, 0
# 当不进行训练时,停止梯度更新,节省计算内存消耗
with torch.no_grad():
for imgs, target in dataloader:
imgs, target = imgs.to(device), target.to(device)
# 计算loss
target_pred = model(imgs)
loss = loss_fn(target_pred, target)
test_loss += loss.item()
test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()
test_acc /= size
test_loss /= num_batches
return test_acc, test_loss
learning_rate = 1e-4
lambda1 = lambda epoch: 0.95 ** (epoch // 4)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
或者
from torch.nn.parameter import Parameter
from torch.optim import SGD
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import Dataset
model = [Parameter(torch.randn(2, 2, requires_grad=True))]
optimizer = SGD(model, lr=0.1)
scheduler = ExponentialLR(optimizer, gamma=0.9)
for epoch in range(20):
for batch_idx in range(3):
optimizer.zero_grad()
loss = torch.randn(1, requires_grad=True)
loss.backward()
optimizer.step()
import copy
loss_fn = nn.CrossEntropyLoss() # 交叉熵损失函数
epochs = 40 # 训练次数
train_loss, val_loss = [], [] # 记录训练集和验证集的损失
train_acc, val_acc = [], [] # 记录训练集和验证集的准确率
best_acc = 0 # 记录最佳准确率
for epoch in range(epochs): # 训练循环
for epoch in range(epochs):
# 更新学习率(使用自定义学习率时使用)
# adjust_learning_rate(optimizer, epoch, learn_rate)
model.train() # 训练模式
epoch_train_acc, epoch_train_loss = train(train_loader, model, loss_fn, optimizer) # 训练
scheduler.step() # 更新学习率(调用官方动态学习率接口时使用)
model.eval() # 验证模式
epoch_test_acc, epoch_test_loss = test(val_loader, model, loss_fn) # 验证
# 保存最佳模型到 best_model
if epoch_test_acc > best_acc: # 如果当前模型的准确率大于之前的最佳准确率
best_acc = epoch_test_acc # 更新最佳准确率
best_model = copy.deepcopy(model) # 保存最佳模型
train_acc.append(epoch_train_acc) # 记录训练集和验证集的准确率
train_loss.append(epoch_train_loss) # 记录训练集和验证集的损失
val_acc.append(epoch_test_acc) # 记录训练集和验证集的准确率
val_loss.append(epoch_test_loss) # 记录训练集和验证集的损失
# 获取当前的学习率
lr = optimizer.state_dict()['param_groups'][0]['lr']
template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')
print(template.format(epoch+1, epoch_train_acc*100, epoch_train_loss,
epoch_test_acc*100, epoch_test_loss, lr))
PATH = './cifar_net.pth'
torch.save(model.state_dict(), PATH) # 保存模型
print("Model saved")
import matplotlib.pyplot as plt
#隐藏警告
import warnings
warnings.filterwarnings("ignore") #忽略警告信息
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.rcParams['figure.dpi'] = 100 #分辨率
epochs_range = range(epochs)
plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
from PIL import Image
classes = list(total_data.class_to_idx) # 获取类别名称
def predict_one_image(image_path, model, transform, classes): # 预测单张图片
test_img = Image.open(image_path).convert('RGB') # 读取图片
plt.imshow(test_img) # 展示预测的图片
test_img = transform(test_img) # 图片预处理
img = test_img.to(device).unsqueeze(0) # 增加一个维度
model.eval() # 模型评估模式
output = model(img) # 预测
_,pred = torch.max(output,1) # 获取预测结果
pred_class = classes[pred] # 获取预测类别
print(f'预测结果是:{pred_class}') # 打印预测结果
# 预测训练集中的某张照片
predict_one_image(image_path='E:\\深度学习\\data\\Day15\\Leonardo DiCaprio\\002_86e8aa58.jpg',
model=model,
transform=data_transforms,
classes=classes)
best_model.eval() # 模型评估模式
epoch_test_acc, epoch_test_loss = test(train_loader, best_model, loss_fn) # 测试集准确率
print(f'最佳模型的测试集准确率为:{epoch_test_acc*100:.2f}%')
在 PyTorch 中,我们可以通过调整学习率来优化神经网络的训练。动态学习率是指在训练过程中,根据当前的迭代轮数或其他条件来动态调整学习率。以下是几种常用的设置动态学习率的方法:
学习率衰减:学习率随着训练轮数的增加而逐渐减小。例如,可以使用 PyTorch 中的 torch.optim.lr_scheduler
模块来实现学习率衰减,其中包括了很多种学习率调度器。下面是一个例子:
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
# 创建一个优化器和一个学习率调度器
optimizer = optim.SGD(model.parameters(), lr=0.1)
scheduler = StepLR(optimizer, step_size=10, gamma=0.1)
for epoch in range(100):
# 在每个 epoch 结束时更新学习率
scheduler.step()
# 训练代码
train(...)
这个例子中,使用了 StepLR
学习率调度器,它会在每个 step_size
轮训练之后,将学习率乘以 gamma
。也就是说,每经过 step_size
轮训练,学习率就会降低一个数量级。
基于损失值的学习率调整:学习率随着损失值的变化而调整。当损失值开始变化缓慢时,就可以适当减小学习率。例如,可以使用 PyTorch 中的 torch.optim.lr_scheduler.ReduceLROnPlateau
学习率调度器来实现基于损失值的学习率调整。下面是一个例子:
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
# 创建一个优化器和一个学习率调度器
optimizer = optim.SGD(model.parameters(), lr=0.1)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=True)
for epoch in range(100):
# 训练代码
train(...)
# 在每个 epoch 结束时计算验证集的损失值,并更新学习率
val_loss = validate(...)
scheduler.step(val_loss)
这个例子中,使用了 ReduceLROnPlateau
学习率调度器,它会在损失值不再明显下降时,适当减小学习率。具体来说,如果在 patience
个 epoch 中,损失值没有下降,则将学习率乘以 factor
。
循环学习率:学习率在一个区间内来回循环变化,以帮助模型跳出局部最优解。例如,可以使用 PyTorch 中的 torch.optim.lr_scheduler.CyclicLR
学习率调度器来实现循环学习率。下面是一个例子:
import torch.optim as optim
from torch.optim.lr_scheduler import CyclicLR
# 创建一个优化器和一个学习率调度器
optimizer = optim.SGD(model.parameters(), lr=0.1)
scheduler = CyclicLR(optimizer, base_lr=0.001, max_lr=0.1, step_size_up=100, cycle_momentum=False)
for epoch in range(100):
# 训练代码
train(...)
# 在每个 epoch 结束时更新学习率
scheduler.step()
这个例子中,使用了 CyclicLR
学习率调度器,它会在一个区间内来回循环变化学习率。具体来说,学习率会从 base_lr
增加到 max_lr
,然后又降回 base_lr
,如此往复。其中,step_size_up
指定了每个循环中学习率从 base_lr
增加到 max_lr
的轮数。cycle_momentum
参数可以选择是否在循环中改变动量的大小。
本期博客出现了一些问题,在训练的时候报错:AttributeError: 'list' object has no attribute 'train'
,目前还没有解决,一开始是我自己重新写的一份代码,然后报这个错误,然后我使用K老师的源码跑也不行,就很迷茫。目前还不知道如何解决。