pytorch

pytorch

一、

1.张量(Tensors)

from __future__ import print_function
import torch
##
x = torch.Tensor(5, 3)
y = torch.rand(5, 3)
print(x)

##
print(y.size())
torch.Size([5, 3])#torch.Size实际上是一个元组

##add
y = torch.rand(5, 3)
print(x + y)

print(torch.add(x, y))

result = torch.Tensor(5, 3)
torch.add(x, y, out=result)
print(result)

y.add_(x)
print(y)

##打印
print(x[:, 1])

##numpy-reshape ->tensor-view
y = x.view(16)
z = x.view(-1, 8) # -1的意思是让PyTorch自己推断出第一维的大小。

##tensor<->numpy
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
# [2. 2. 2. 2. 2.]
print(b)
# tensor([ 2.,  2.,  2.,  2.,  2.], dtype=torch.float64)

2.numpy桥

a = torch.ones(5)
print(a)
##
1
 1
 1
 1
 1
[torch.FloatTensor of size 5]
b = a.numpy()
print(b)
print(type(b))
##
[ 1.  1.  1.  1.  1.]

3.CUDA张量

##example:
#使用.cuda函数可以将张量移动到GPU上。
# 如果有CUDA
# 我们会使用``torch.device``来把tensors放到GPU上
if torch.cuda.is_available():
	device = torch.device("cuda")          # 一个CUDA device对象。
	y = torch.ones_like(x, device=device)  # 直接在GPU上创建tensor
	x = x.to(device)                       # 也可以使用``.to("cuda")``把一个tensor从CPU移到GPU上
	z = x + y
	print(z)
	print(z.to("cpu", torch.double))       # ``.to``也可以在移动的过程中修改dtype
	
# 输出:
tensor([ 0.3034], device='cuda:0')
tensor([ 0.3034], dtype=torch.float64)	

二、Autograd: 自动求导(automatic differentiation)

简介:

autograd包为张量上的所有操作提供了自动求导.它是一个运行时定义的框架,这意味着反向传播是根据你的代码如何运行来定义,并且每次迭代可以不同.

1.关于torch.Tensor

1.
x1 = torch.ones(3, 3)
print(x1)

x = torch.ones(2, 2, requires_grad=True)
print(x)

#
tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]])

tensor([[1., 1.],
        [1., 1.]], requires_grad=True)
#

2.
a = torch.randn(2, 2)
a = ((a * 3) / (a - 1))
print(a.requires_grad)
a.requires_grad_(True)
print(a.requires_grad)
b = (a * a).sum()
print(b.grad_fn)

#
False
True

#

#
torch.Tensor是整个package中的核心类, 如果将属性.requires_grad设置为True, 它将追踪在这个类上定义的所有操作. 当代码要进行反向传播的时候, 直接调用.backward()就可以自动计算所有的梯度. 在这个Tensor上的所有梯度将被累加进属性.grad中。
#

3.
print(x.requires_grad)
y = x.detach()
print(y.requires_grad)
print(x.eq(y).all())

#
True
False
tensor(True)
#
如果想终止一个Tensor在计算图中的追踪回溯, 只要执行.detach()就可以将该Tensor从计算图中撤下, 在未来的回溯计算中也不会再计算该Tensor.
#

4.
print(x.requires_grad)
print((x ** 2).requires_grad)

with torch.no_grad():
    print((x ** 2).requires_grad)

#
True
True
False
#
可以采用代码块的方式with torch.no_grad():, 这种方式非常适用于对模型进行预测的时候, 因为预测阶段不再需要对梯度进行计算.
#

2.关于梯度Gradients

1.
print(x.requires_grad)
print((x ** 2).requires_grad)

with torch.no_grad():
    print((x ** 2).requires_grad)

#
True
True
False
#
可以通过设置.requires_grad=True来执行自动求导, 也可以通过代码块的限制来停止自动求导.
#

2.
out.backward()
print(x.grad)
#
tensor([[4.5000, 4.5000],
        [4.5000, 4.5000]])

#

三、

variable

import torch
from torch.autograd import Variable

tensor = torch.FloatTensor([[1,2],[3,4]])            # build a tensor
variable = Variable(tensor, requires_grad=True)      # build a variable
#requires_grad ,一般为false
t_out = torch.mean(tensor*tensor)       # x^2
v_out = torch.mean(variable*variable)   # x^2

v_out.backward()    #误差的反向传递
# v_out = 1/4 * sum(variable*variable)
# the gradients w.r.t the variable, d(v_out)/d(variable) = 1/4*2*variable = variable/2

print(variable.grad)
print(variable)
print(variable.data.numpy())
#variable.data是tensor形式

Activation(展示)

import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

# fake data
x = torch.linspace(-5, 5, 200)  # x data (tensor), shape=(100, 1)
x = Variable(x)
x_np = x.data.numpy() 

y_relu = torch.relu(x).data.numpy()
y_sigmoid = torch.sigmoid(x).data.numpy()
y_tanh = torch.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy() 
plt.figure(1, figsize=(8, 6))
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim((-1, 5))
plt.legend(loc='best')

plt.subplot(222)
plt.plot(x_np, y_sigmoid, c='red', label='sigmoid')
plt.ylim((-0.2, 1.2))
plt.legend(loc='best')

plt.subplot(223)
plt.plot(x_np, y_tanh, c='red', label='tanh')
plt.ylim((-1.2, 1.2))
plt.legend(loc='best')

plt.subplot(224)
plt.plot(x_np, y_softplus, c='red', label='softplus')
plt.ylim((-0.2, 6))
plt.legend(loc='best')

plt.show()

Regression

import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)  # x data (tensor), shape=(100, 1)
y = x.pow(2) + 0.2*torch.rand(x.size())                 # noisy y data (tensor), shape=(100, 1)

class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(n_feature, n_hidden)   # n个输入参数,n个隐藏层元素
        self.predict = torch.nn.Linear(n_hidden, n_output)   # n个隐藏层元素,n个输出

    def forward(self, x):
        x = F.relu(self.hidden(x))      # 对隐藏层x使用模型relu
        x = self.predict(x)             #
        # output
        return x

net = Net(n_feature=1, n_hidden=10, n_output=1)     # define the network
print(net)

optimizer = torch.optim.SGD(net.parameters(), lr=0.2) #lr学习效率
loss_func = torch.nn.MSELoss()  # 计算误差,均方差

plt.ion()   

for t in range(200):
    prediction = net(x)     # input x and predict based on x

    loss = loss_func(prediction, y)     # must be (1. nn output, 2. target)

    optimizer.zero_grad()   # 梯度降零
    loss.backward()         # 反向传递
    optimizer.step()        # 优化梯度

    if t % 5 == 0:
        # plot and show learning process
        plt.cla()
        plt.scatter(x.data.numpy(), y.data.numpy())
        plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
        plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color':  'red'})
        plt.pause(0.1)

plt.ioff()
plt.show()

Classification

import torch
import torch.nn.functional as F

# replace following class code with an easy sequential network
class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(n_feature, n_hidden)  
        self.predict = torch.nn.Linear(n_hidden, n_output) 

    def forward(self, x):
        x = F.relu(self.hidden(x))     
        x = self.predict(x)             
        return x

net1 = Net(1, 10, 1)

# easy and fast way to build your network
net2 = torch.nn.Sequential(
    torch.nn.Linear(1, 10),
    torch.nn.ReLU(),
    torch.nn.Linear(10, 1)
)


print(net1)     

print(net2)

你可能感兴趣的:(笔记,学习记录,numpy,pandas,matplotlib,python,pytorch)