output =net(input)
target = variable(t.arange(0,10))
#the point
output=output.to(torch.float32)
target=target.to(torch.float32)
criterion = nn.MSELoss()
loss = criterion(output,target)
net.zero_grad()
print(“反向传播之前conv1.bias的梯度”)
print(net.conv1.bias.grad)
loss.backward() #此处疑难杂症 先跳过
print(“反向传播之后conv1.bias的梯度”)
print(net.conv1.bias.grad)
output =net(input)
target = variable(t.arange(0,10))
#the point
output=output.to(torch.float32)
target=target.to(torch.float32)
criterion = nn.MSELoss()
loss = criterion(output,target)
net.zero_grad()
print("反向传播之前conv1.bias的梯度")
print(net.conv1.bias.grad)
loss.backward() #此处疑难杂症 先跳过
print("反向传播之后conv1.bias的梯度")
print(net.conv1.bias.grad)