【pyTorch基础】自动求导(autograd)

import torch

求导

x = torch.tensor([3.], requires_grad=True)
y = torch.pow(x, 2) # y = x**2

grad_1 = torch.autograd.grad(y, x, create_graph=True) #grad_1 = dy/dx = 2x = 2 * 3 = 6
print(grad_1)
grad_2 = torch.autograd.grad(grad_1[0], x) # grad_2 = d(dy/dx)/dx = d(2x)/dx = 2
print(grad_2)
(tensor([6.], grad_fn=),)
(tensor([2.]),)

例子

x = torch.randn(3, requires_grad=True)
print(x)
y = x * 2
while y.data.norm() < 1000: # data.norm()它对张量y每个元素进行平方,然后对它们求和,最后取平方根;这些操作计算就是所谓的L2或欧几里德范数
    y = y * 2

print(y)
tensor([ 1.0925, -0.0039, -0.7622], requires_grad=True)
tensor([1118.7268,   -4.0416, -780.5355], grad_fn=)
x = torch.tensor([[1.,2.]],requires_grad= True)
y = torch.zeros(1,2)
y[0,0] = x[0,0] ** 2 + x[0,0]
y[0,1] = x[0,1] ** 3 + x[0,1]
z = 2 * y
v = torch.tensor([[1., 2.]], dtype=torch.float)
y.backward(v) # 对x求偏导,再左乘v
print(x.grad) 
tensor([[ 3., 26.]])

对backward()的详解,尤其是添加参数的可以参考此

# 通过将代码包裹在with torch.no_grad(),来停止对从跟踪历史中的.requires_grad=True的张量自动求导
print(x.requires_grad)
print((x ** 2).requires_grad)
with torch.no_grad():
    print((x ** 2).requires_grad)
True
True
False

练习

x = torch.tensor([5.],requires_grad = True)
y = torch.pow(x, 2)
grad_1 = torch.autograd.grad(y, x, create_graph=True) # 一阶导数
grad_2 = torch.autograd.grad(grad_1[0], x) # 二阶导数
print(grad_1,grad_2)
(tensor([10.], grad_fn=),) (tensor([2.]),)

你可能感兴趣的:(pyTorch,深度学习,人工智能,python)