本文为根据官方文档做的入门学习笔记
张量就像python中的ndarrays,又可以用于GPU加速计算。
#uninitialized matrix
x = torch.empty(5, 3)
print(x)
tensor([[1.0880e-19, 1.0874e-19, 2.5443e+30],
[9.4727e+21, 2.4835e+27, 2.5428e+30],
[1.0877e-19, 1.5163e+23, 2.2012e+12],
[3.7899e+22, 5.2480e+05, 1.0175e+31],
[9.7056e+24, 1.6283e+32, 3.7913e+22]])
#randomly initialized matrix
x = torch.rand(5, 3)
print(x)
tensor([[0.1484, 0.8227, 0.9398],
[0.6957, 0.7653, 0.8424],
[0.2150, 0.8570, 0.5757],
[0.1498, 0.6313, 0.4983],
[0.2018, 0.4304, 0.3919]])
# a matrix filled zeros and of dtype long
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
tensor([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
#a tensor directly from data
x = torch.tensor([5.5, 3])
print(x)
tensor([5.5000, 3.0000])
x = x.new_ones(5, 3, dtype=torch.double)
print(x)
x = torch.randn_like(x, dtype=torch.float) #override dtype
print(x)
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=torch.float64)
tensor([[ 0.0412, 0.9730, 1.1397],
[-0.5710, -1.6817, -0.0902],
[-0.2467, -1.5928, -0.4624],
[ 0.3849, 0.2497, -1.3642],
[ 1.1823, 1.6061, -1.4947]])
#get its size
print(x.size())
torch.Size([5, 3])
Note: torch.size 是个元组,支持所有元组操作
其他语法:
#1
y = torch.rand(5, 3)
print(x + y)
out:
tensor([[ 0.9242, 1.3102, 2.0905],
[ 0.0236, -1.2898, 0.7076],
[-0.1685, -1.0089, -0.0096],
[ 1.1552, 1.0442, -0.4530],
[ 1.2622, 1.7581, -1.3049]])
#2
print(torch.add(x, y))
out:
tensor([[ 0.9242, 1.3102, 2.0905],
[ 0.0236, -1.2898, 0.7076],
[-0.1685, -1.0089, -0.0096],
[ 1.1552, 1.0442, -0.4530],
[ 1.2622, 1.7581, -1.3049]])
#3
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
out:
tensor([[ 0.9242, 1.3102, 2.0905],
[ 0.0236, -1.2898, 0.7076],
[-0.1685, -1.0089, -0.0096],
[ 1.1552, 1.0442, -0.4530],
[ 1.2622, 1.7581, -1.3049]])
#4
y.add_(x)
print(y)
out:
tensor([[ 0.9242, 1.3102, 2.0905],
[ 0.0236, -1.2898, 0.7076],
[-0.1685, -1.0089, -0.0096],
[ 1.1552, 1.0442, -0.4530],
[ 1.2622, 1.7581, -1.3049]])
Note: 任何改变一个张量的操作都要加后缀“_”,如x.copy_(y), x.t_(y)都会改变x。
#use torch.view to resize/reshape a tensor
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) #-1 is inferred from other dimension
print(x.size(), y.size(), z.size())
out:
torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
#use .item() to get the value of an one element tensor as a python number
x = torch.randn(1)
print(x)
print(x.item())
out:
tensor([-0.6661])
-0.6661176681518555
tensor和array之间可相互转换(Numpy Bridge):
tensor和array共享内存位置,改变一个的同时另一个也随着改变。
#convert a tensor to an array
a = torch.ones(5)
print(a)
out:
tensor([1., 1., 1., 1., 1.])
b = a.numpy()
print(b)
out:
[1. 1. 1. 1. 1.]
a.add_(1)
print(a)
print(b)
out:
tensor([2., 2., 2., 2., 2.])
[2. 2. 2. 2. 2.]
#convert an array to a tensor
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b)
out:
[2. 2. 2. 2. 2.]
tensor([2., 2., 2., 2., 2.], dtype=torch.float64)