pytorch自动求导机制——代码实现——(一看就懂系列!!!)

这里写自定义目录标题

  • pytorch自动求导机制——代码实现
    • 代码实现

pytorch自动求导机制——代码实现

代码实现

import torch
import torch.nn as nn
import numpy as np

#########————————定义模型———————————————————#############
class LinearRegressionModel(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(LinearRegressionModel, self).__init__()
        self.linear = nn.Linear(input_dim, output_dim)  

    def forward(self, x):
        out = self.linear(x)
        return out
#########————————定义输入神经元和输出神经元个数———————————————————#############
input_dim = 1
output_dim = 1

model = LinearRegressionModel(input_dim, output_dim)

#########————————torch cpu转到GPU运算———————————————————#############
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)

#########————————定义Loss Function———————————————————#############
criterion = nn.MSELoss()

#########————————定义优化器———————————————————#############
learning_rate = 0.01

optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
#########————————iter更新loss———————————————————#############
epochs = 1000
for epoch in range(epochs):
    epoch += 1
    inputs = torch.from_numpy(x_train).to(device)
    labels = torch.from_numpy(y_train).to(device)

    optimizer.zero_grad() # iter中如果不清零  会导致梯度叠加

    outputs = model(inputs)  #正向传播

    loss = criterion(outputs, labels)#计算loss

    loss.backward()          # 反向传播

    optimizer.step()          #更新所有参数

    if epoch % 50 == 0:
        print('epoch {}, loss {}'.format(epoch, loss.item()))#打印loss

你可能感兴趣的:(pytorch深度学习实战,深度学习)