pytorch 代码记录1

datawhale 代码运行

torch 类型

import torch
x1 = torch.rand(4, 3) 
print(x1)
x2 = torch.zeros(4, 3, dtype=torch.long)
print(x2)
x3 = torch.tensor([5.5, 3])
print(x3)
x4= x1.new_ones(4, 3, dtype=torch.double) # 创建一个新的全1矩阵tensor,返回的tensor默认具有相同的torch.dtype和torch.device # 也可以像之前的写法 
print(x4) 
x5 = torch.randn_like(x1, dtype=torch.float)
print(x5)# 重置数据类型 print(x) # 结果会有一样的size # 获取它的维度信息 
print(x5.size()) 
print(x5.shape)



tensor([[0.0044, 0.8442, 0.1455],
        [0.1779, 0.3329, 0.3311],
        [0.2145, 0.1926, 0.2167],
        [0.0683, 0.5833, 0.8637]])
tensor([[0, 0, 0],
        [0, 0, 0],
        [0, 0, 0],
        [0, 0, 0]])
tensor([5.5000, 3.0000])
tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]], dtype=torch.float64)
tensor([[ 0.6762,  2.2020, -0.8317],
        [ 0.0619, -1.1514,  2.6909],
        [ 0.2310, -0.4217, -0.2834],
        [-0.7071,  1.6272, -0.6119]])
torch.Size([4, 3])
torch.Size([4, 3])

张量操作

加法

import torch
 # 方式1 
y = torch.rand(4, 3)
print(x1 + y)
 # 方式2 
print(torch.add(x1, y)) 
# 方式3 in-place,原值修改 
y.add_(x1) 
print(y)
tensor([[0.8751, 1.1450, 0.2244],
        [0.6406, 1.1287, 1.2112],
        [0.3791, 0.3903, 0.4637],
        [0.9919, 1.0664, 1.2216]])
tensor([[0.8751, 1.1450, 0.2244],
        [0.6406, 1.1287, 1.2112],
        [0.3791, 0.3903, 0.4637],
        [0.9919, 1.0664, 1.2216]])
tensor([[0.8751, 1.1450, 0.2244],
        [0.6406, 1.1287, 1.2112],
        [0.3791, 0.3903, 0.4637],
        [0.9919, 1.0664, 1.2216]])

索引

import torch
x = torch.rand(4,3)
# 取第二列
print(x[:, 1])
print(x)

y = x[0,:] 
y += 1 
print(y) 
print(x[0,:])# 源tensor也被改了了



tensor([0.6157, 0.8934, 0.4963, 0.5572])
tensor([[0.4873, 0.6157, 0.1242],
        [0.4745, 0.8934, 0.2560],
        [0.5843, 0.4963, 0.9230],
        [0.8969, 0.5572, 0.3444]])
tensor([1.4873, 1.6157, 1.1242])
tensor([1.4873, 1.6157, 1.1242])

维度变换

x = torch.randn(4, 4) 
y = x.view(16) 
z = x.view(-1, 8) # -1是指这一维的维数由其他维度决定 
print(x,y,z)
print(x.size(), y.size(), z.size())

x += 1
print(x)
print(y) # 也加了了1

tensor([[-5.6219e-05,  1.1060e+00, -1.0893e+00,  7.2261e-01],
        [ 9.1824e-01, -1.0603e-01,  8.5275e-01, -1.7203e-01],
        [ 2.7445e-01,  7.1475e-01,  1.0594e+00, -5.4334e-02],
        [ 2.0202e-01, -2.1162e+00,  8.0789e-01, -2.0461e+00]]) tensor([-5.6219e-05,  1.1060e+00, -1.0893e+00,  7.2261e-01,  9.1824e-01,
        -1.0603e-01,  8.5275e-01, -1.7203e-01,  2.7445e-01,  7.1475e-01,
         1.0594e+00, -5.4334e-02,  2.0202e-01, -2.1162e+00,  8.0789e-01,
        -2.0461e+00]) tensor([[-5.6219e-05,  1.1060e+00, -1.0893e+00,  7.2261e-01,  9.1824e-01,
         -1.0603e-01,  8.5275e-01, -1.7203e-01],
        [ 2.7445e-01,  7.1475e-01,  1.0594e+00, -5.4334e-02,  2.0202e-01,
         -2.1162e+00,  8.0789e-01, -2.0461e+00]])
torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
tensor([[ 0.9999,  2.1060, -0.0893,  1.7226],
        [ 1.9182,  0.8940,  1.8528,  0.8280],
        [ 1.2744,  1.7148,  2.0594,  0.9457],
        [ 1.2020, -1.1162,  1.8079, -1.0461]])
tensor([ 0.9999,  2.1060, -0.0893,  1.7226,  1.9182,  0.8940,  1.8528,  0.8280,
         1.2744,  1.7148,  2.0594,  0.9457,  1.2020, -1.1162,  1.8079, -1.0461])

复制

z=torch.clone(y)
print(z)
tensor([ 0.9999,  2.1060, -0.0893,  1.7226,  1.9182,  0.8940,  1.8528,  0.8280,
         1.2744,  1.7148,  2.0594,  0.9457,  1.2020, -1.1162,  1.8079, -1.0461])

属性查看

import torch 
x = torch.randn(1) #torch类似numpy中array
print(type(x)) 
print(type(x.item()))




广播机制

x = torch.arange(1, 3).view(1, 2) 
print(x) 
y = torch.arange(1, 4).view(3, 1) 
print(y) 
print(x + y)
tensor([[1, 2]])
tensor([[1],
        [2],
        [3]])
tensor([[2, 3],
        [3, 4],
        [4, 5]])

自动求导

from __future__ import print_function
import torch
x = torch.randn(3,3,requires_grad=True)#为True,追踪对该张量的所有操作
print(x.grad_fn)

x = torch.ones(2, 2, requires_grad=True)
print(x)

y = x**2
print(y)

print(y.grad_fn)

z = y * y * 3
out = z.mean()

print(z, out)

a = torch.randn(2, 2) # 缺失情况下默认 requires_grad = False
a = ((a * 3) / (a - 1))
print(a.requires_grad)
a.requires_grad_(True)
print(a.requires_grad)
b = (a * a).sum()
print(b.grad_fn)
None
tensor([[1., 1.],
        [1., 1.]], requires_grad=True)
tensor([[1., 1.],
        [1., 1.]], grad_fn=)

tensor([[3., 3.],
        [3., 3.]], grad_fn=) tensor(3., grad_fn=)
False
True

梯度

out.backward()
print(x.grad)#求导
tensor([[3., 3.],
        [3., 3.]])


# 再来反向传播⼀一次,注意grad是累加的
out2 = x.sum()
out2.backward()
print(x.grad)

out3 = x.sum()
x.grad.data.zero_()
out3.backward()
print(x.grad)

x = torch.randn(3, requires_grad=True)
print(x)

y = x * 2
i = 0
while y.data.norm() < 1000:
    y = y * 2
    i = i + 1
print(y)
print(i)

v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float)
y.backward(v)

print(x.grad)
print(x.requires_grad)
print((x ** 2).requires_grad)

with torch.no_grad():
    print((x ** 2).requires_grad)

x = torch.ones(1,requires_grad=True)

print(x.data) # 还是一个tensor
print(x.data.requires_grad) # 但是已经是独立于计算图之外

y = 2 * x
x.data *= 100 # 只改变了值,不会记录在计算图,所以不会影响梯度传播

y.backward()
print(x) # 更改data的值也会影响tensor的值 
print(x.grad)

tensor([[4., 4.],
        [4., 4.]])
tensor([[1., 1.],
        [1., 1.]])
tensor([ 1.8472, -1.1261, -1.4593], requires_grad=True)
tensor([ 945.7661, -576.5563, -747.1741], grad_fn=)
8
tensor([5.1200e+01, 5.1200e+02, 5.1200e-02])
True
True
False
tensor([1.])
False
tensor([100.], requires_grad=True)
tensor([2.])

你可能感兴趣的:(python进阶,pytorch,python,numpy)