optim.StepLR,
optim.MultiStepLR,
optim.LambdaLR,
optim.ExponentialLR,
optim.CosineAnnealingLR,
optim.ReduceLROnPlateau
导入工具包
from torch.optim.lr_scheduler import StepLR, MultiStepLR, LambdaLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau
import torch
import torch.nn as nn
LambdaLR,
initial_lr = 0.1
optimzer_1 = torch.optim.Adam(Net.parameters(),lr = initial_lr)
scheduler_1 = LambdaLR(optimzer_1, lr_lambda=lambda epoch: 1/(epoch + 1)) #y = 1 /(epoch + 1)
print("初始化的学习率:",optimzer_1.defaults['lr'])
for epoch in range(1,11):
optimzer_1.zero_grad()
optimzer_1.step()
print("第%d个epoch的学习率:%f" % (epoch, optimzer_1.param_groups[0]['lr']))
scheduler_1.step()
initial_lr = 0.1
optimzer_1 = torch.optim.Adam(Net.parameters(),lr = initial_lr)
scheduler_1 = StepLR(optimzer_1, step_size=2, gamma=0.1)
print("初始化的学习率:",optimzer_1.defaults['lr'])
for epoch in range(1,11):
optimzer_1.zero_grad()
optimzer_1.step()
print("第%d个epoch的学习率:%f" % (epoch, optimzer_1.param_groups[0]['lr']))
scheduler_1.step()
initial_lr = 0.1
optimzer_1 = torch.optim.Adam(Net.parameters(),lr = initial_lr)
scheduler_1 = MultiStepLR(optimzer_1, milestones=[2,6,8], gamma=0.5)
print("初始化的学习率:",optimzer_1.defaults['lr'])
for epoch in range(1,11):
optimzer_1.zero_grad()
optimzer_1.step()
print("第%d个epoch的学习率:%f" % (epoch, optimzer_1.param_groups[0]['lr']))
scheduler_1.step()
ExponentialLR,
initial_lr = 0.1
optimzer_1 = torch.optim.Adam(Net.parameters(),lr = initial_lr)
scheduler_1 = ExponentialLR(optimzer_1, gamma=0.1)
print("初始化的学习率:",optimzer_1.defaults['lr'])
for epoch in range(1,11):
optimzer_1.zero_grad()
optimzer_1.step()
print("第%d个epoch的学习率:%f" % (epoch, optimzer_1.param_groups[0]['lr']))
scheduler_1.step()
import matplotlib.pyplot as plt
initial_lr = 0.1
optimzer_1 = torch.optim.Adam(Net.parameters(),lr = initial_lr)
scheduler_1 = CosineAnnealingLR(optimzer_1, T_max=10)
print("初始化的学习率:",optimzer_1.defaults['lr'])
lr_list = []
for epoch in range(1,101):
optimzer_1.zero_grad()
optimzer_1.step()
print("第%d个epoch的学习率:%f" % (epoch, optimzer_1.param_groups[0]['lr']))
lr_list.append(optimzer_1.param_groups[0]['lr'])
scheduler_1.step()
#画出epoch的变化图
plt.plot(list(range(1,101)),lr_list)
plt.xlabel("epoch")
plt.ylabel("lr")
plt.title("learning rate curve !")
plt.show()
initial_lr = 0.1
optimzer_1 = torch.optim.Adam(Net.parameters(),lr = initial_lr)
scheduler_1 = ReduceLROnPlateau(optimzer_1, mode='min',factor=0.1, patience=2) #容忍两次还没有更新,开始调整学习率
print("初始化的学习率:",optimzer_1.defaults['lr'])
lr_list = []
for epoch in range(1,15):
train_loss = 2
optimzer_1.zero_grad()
optimzer_1.step()
print("第%d个epoch的学习率:%f" % (epoch, optimzer_1.param_groups[0]['lr']))
lr_list.append(optimzer_1.param_groups[0]['lr'])
scheduler_1.step(train_loss)