1. 将需要固定,不参与训练层参数的requires_grad属性设为False:
# 在nn.Modele子类内固定features层参数
for p in self.features.parameters():
p.requires_grad=False
2. 将参与训练的层参数传入Optimizer:
param_to_optim = []
for param in self.model.parameters():
if param.requires_grad == False:
continue
param_to_optim.append(param)
optimizer = torch.optim.SGD(param_to_optim, lr=0.001, momentum=0.9, weight_decay=1e-4)
或者:
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001, momentum=0.9, weight_decay=1e-4)
def adjust_learning_rate(args, optimizer, epoch, gamma=0.1):
# 每训练args.step_size个epochs,学习率衰减到gamma倍
lr = args.lr * (gamma ** (epoch // args.step_size))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# 一个optimizer内多组参数,可以为不同组参数设置不同的学习率
optimizer = optim.SGD([
{'params': net.conv_block1.parameters(), 'lr': 0.002},
{'params': net.classifier1.parameters(), 'lr': 0.002},
{'params': net.conv_block2.parameters(), 'lr': 0.002},
{'params': net.classifier2.parameters(), 'lr': 0.002},
{'params': net.conv_block3.parameters(), 'lr': 0.002},
{'params': net.classifier3.parameters(), 'lr': 0.002},
{'params': net.features.parameters(), 'lr': 0.0002}
], momentum=0.9, weight_decay=5e-4)
# 定义多个optimizer,训练网络的不同模块
raw_optimizer = torch.optim.SGD(raw_parameters, lr=LR, momentum=0.9, weight_decay=WD)
concat_optimizer = torch.optim.SGD(concat_parameters, lr=LR, momentum=0.9, weight_decay=WD)
part_optimizer = torch.optim.SGD(part_parameters, lr=LR, momentum=0.9, weight_decay=WD)
partcls_optimizer = torch.optim.SGD(partcls_parameters, lr=LR, momentum=0.9, weight_decay=WD)
待续。。。