pytorch学习:动量法momentum

关于动量法的原理这里不写了,参考别的文章:https://blog.csdn.net/tsyccnh/article/details/76270707

以下是代码实现:

# -*- coding: utf-8 -*-
"""
Created on Sun Sep  2 15:54:06 2018

@author: www
"""

import numpy as np
import torch
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader
from torch import nn
from torch.autograd import Variable
import time
import matplotlib.pyplot as plt

def get_data(x):
     x = np.array(x, dtype='float32')/255
     x = (x-0.5) / 0.5  #标准化
     x = x.reshape((-1,)) #拉平
     x = torch.from_numpy(x)
     return x

train_set = MNIST('./data', train=True, transform=get_data, download=True) # 载入数据集,申明定义的数据变换
test_set = MNIST('./data', train=False, transform=get_data, download=True)     

#定义Loss函数
criterion = nn.CrossEntropyLoss()

#自定义动量法
def sgd_momentum(parameters, vs, lr, gamma):
     for param, v in zip(parameters, vs):
          v[:] = gamma * v + lr * param.grad.data
          param.data = param.data - v
          
          

train_data = DataLoader(train_set, batch_size=64, shuffle=True)
#使用Sequential定义神经网络
net = nn.Sequential(
     nn.Linear(784,200),
     nn.ReLU(),
     nn.Linear(200, 10),
)

#将速度初始化为和参数相同的零张量
vs = []
for param in net.parameters():
     vs.append(torch.zeros_like(param.data))

#开始训练
losses = []
start = time.time()
for e in range(5):
     train_loss = 0
     for im, label in train_data:
          im = Variable(im)
          label = Variable(label)
          #前向传播
          out = net(im)
          loss = criterion(out, label)
          #反向传播
          net.zero_grad()
          loss.backward()
          sgd_momentum(net.parameters(), vs, 1e-2, 0.9)
          #记录误差
          train_loss += loss.item()
          
          losses.append(loss.item())
          
     print('epoch:{},Train Loss:{}'     .format(e, train_loss/len(train_data)))
end = time.time()
print('Time={}'.format(end - start))

x_axis = np.linspace(0, 5, len(losses), endpoint=True)
plt.semilogy(x_axis, losses, label='batch_size=1')
plt.legend(loc='best')

#当然,pytorch 内置了动量法的实现,非常简单,直接在 torch.optim.SGD(momentum=0.9)
# 即可,下面实现一下
train_data = DataLoader(train_set, batch_size=64, shuffle=True)
# 使用 Sequential 定义 3 层神经网络
net = nn.Sequential(
    nn.Linear(784, 200),
    nn.ReLU(),
    nn.Linear(200, 10),
)

optimizer = torch.optim.SGD(net.parameters(), lr=1e-2, momentum=0.9) # 加动量
# 开始训练
losses = []
idx = 0
start = time.time() # 记时开始
for e in range(5):
    train_loss = 0
    for im, label in train_data:
        im = Variable(im)
        label = Variable(label)
        # 前向传播
        out = net(im)
        loss = criterion(out, label)
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # 记录误差
        train_loss += loss.data[0]
        if idx % 30 == 0: # 30 步记录一次
            losses.append(loss.data[0])
        idx += 1
    print('epoch: {}, Train Loss: {:.6f}'
          .format(e, train_loss / len(train_data)))
end = time.time() # 计时结束
print('使用时间: {:.5f} s'.format(end - start))

x_axis = np.linspace(0, 5, len(losses), endpoint=True)
plt.semilogy(x_axis, losses, label='momentum: 0.9')
plt.legend(loc='best')








 

你可能感兴趣的:(pytorch)