《Pytorch深度学习实践》反向传播课程代码

import torch
import numpy as np
import matplotlib.pyplot as plt

x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]

w = torch.Tensor([0.0])
w.requires_grad = True    #是否需要计算梯度 = true

def forward(x):
    return x * w

def loss(x, y):       #创建一个计算图
    y_pred = forward(x)
    return (y_pred - y) ** 2

learning_rate = 0.01

print('predict (before training)', 4, forward(4).item())

for epoch in range(100):
    for x, y in zip(x_data,y_data):
        l = loss(x, y)
        l.backward()      #自动求出该条神经网络上需要计算梯度的参数的梯度,并存入变量w,计算结束后,该条计算图释放
        print('\tgrad:', x, y, w.grad.item())      #item是用来将梯度里的数据直接拿出作为标量
        w.data = w.data - learning_rate * w.grad.data    #张量里的data和grad都是Tensor

        w.grad.data.zero_()         #将权重里的梯度的数据全部清零

    print('progress:', epoch, l.item())    #l.item():取结果最后一次梯度下降得到的loss

print('predict (after training)', 4, forward(4).item())

你可能感兴趣的:(pytorch,深度学习,机器学习)