data和deach的区别

import torch as t

data

a = t.tensor([1,2,3.], requires_grad =True)
out = a.sigmoid()
out
tensor([0.7311, 0.8808, 0.9526], grad_fn=)
c = out.data
c.zero_()
tensor([0., 0., 0.])
out
tensor([0., 0., 0.], grad_fn=)
out.sum().backward()
a.grad
tensor([0., 0., 0.])

detach

a = t.tensor([1,2,3.], requires_grad =True)
out = a.sigmoid()
print(out.grad)
None
c = out.detach()
print(c.grad)
None
c.zero_()
tensor([0., 0., 0.])
out

tensor([0., 0., 0.], grad_fn=)
out.sum().backward()
---------------------------------------------------------------------------

RuntimeError                              Traceback (most recent call last)

 in ()
----> 1 out.sum().backward()


~/anaconda3/lib/python3.6/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
    105                 products. Defaults to ``False``.
    106         """
--> 107         torch.autograd.backward(self, gradient, retain_graph, create_graph)
    108 
    109     def register_hook(self, hook):


~/anaconda3/lib/python3.6/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
     91     Variable._execution_engine.run_backward(
     92         tensors, grad_tensors, retain_graph, create_graph,
---> 93         allow_unreachable=True)  # allow_unreachable flag
     94 
     95 


RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [3]], which is output 0 of SigmoidBackward, is at version 1; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).

参考链接

你可能感兴趣的:(Pytorch,Pytorch)