pytorch_autograd v1.backward()+variable.grad

pt_gradient

# gradientable variable
w = torch.normal(0, 0.01, size=(2,1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)

def linreg(X, w, b):
    return torch.matmul(X, w) + b
    
def squared_loss(y_hat, y):
    return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
    
def sgd(params, lr, batch_size):
    with torch.no_grad():
        for param in params:
            param -= lr * param.grad / batch_size    
            param.grad.zero_()
            
lr = 0.03 # Learning rate
num_epochs = 3  # Number of iterations
net = linreg  # Our fancy linear model
loss = squared_loss  # 0.5 (y-y')^2

for epoch in range(num_epochs):
    for X, y in data_iter(batch_size, features, labels):
        l = loss(net(X, w, b), y)  # Minibatch loss in X and y
        l.sum().backward()  # Compute gradient on l with respect to [w,b]

Backward produces gradient,where param.grad utilizes it.

        sgd([w, b], lr, batch_size)  # Update parameters using their gradient
    with torch.no_grad():    #disable gradient tracking
        train_l = loss(net(features, w, b), labels)
        print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')

你可能感兴趣的:(统计学习方法分析,pytorch,python,人工智能)