torch variable

import torch
from torch.autograd import Variable

tensor = torch.FloatTensor([[1,2],[3,4]])
variable = Variable(tensor,requires_grad=True)

t_out = torch.mean(tensor*tensor) #x^2 中值
v_out = torch.mean(variable*variable)

print(t_out)
print(v_out)

v_out.backward() # variable的误差反向传递
# v_out= 1/4*sum(var*var)
# d(v_out)/d(var) = 1/4*2*variable = variable/2
# print(variable.grad) # 梯度 更新值

print(variable)
print(variable.data)
print(variable.data.numpy())

7.5
Variable containing:
 7.5000
[torch.FloatTensor of size 1]

Variable containing:
 1  2
 3  4
[torch.FloatTensor of size 2x2]


 1  2
 3  4
[torch.FloatTensor of size 2x2]

[[ 1.  2.]
 [ 3.  4.]]

你可能感兴趣的:(torch variable)