PyTorch基础知识1.0
1.Tensor的基本概念和操作
import torch
import numpy as np
a = torch.tensor(4.0)
print('标量: ', a)
标量: tensor(4.)
b = torch.tensor([1, 5, 2])
print('向量: ', b)
向量: tensor([1, 5, 2])
c = torch.tensor([[1, 2, 5],
[5, 6, 9]])
print('矩阵: ', c)
矩阵: tensor([[1, 2, 5],
[5, 6, 9]])
d = torch.tensor([[1,2,5], [5,6,1], [9,7,5]])
print("多维矩阵 : ", d)
多维矩阵 : tensor([[1, 2, 5],
[5, 6, 1],
[9, 7, 5]])
e = torch.empty(5, 2)
print(e)
tensor([[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.]])
f = torch.rand(5, 2)
print(f)
tensor([[0.4557, 0.6264],
[0.0040, 0.6414],
[0.1270, 0.4115],
[0.2843, 0.4744],
[0.5275, 0.2181]])
g = torch.zeros(5, 2, dtype = torch.long)
print(g)
g1 = torch.zeros(5, 4)
print(g1)
tensor([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]])
tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]])
h = torch.ones(5, 2, dtype = torch.int)
print(h)
h1 = torch.ones(6, 2, dtype = torch.long)
print(h1)
tensor([[1, 1],
[1, 1],
[1, 1],
[1, 1],
[1, 1]], dtype=torch.int32)
tensor([[1, 1],
[1, 1],
[1, 1],
[1, 1],
[1, 1],
[1, 1]])
i = torch.rand_like(h1, dtype = torch.float)
print(i)
print(i.shape)
tensor([[0.3572, 0.3757],
[0.6741, 0.3193],
[0.0902, 0.0936],
[0.0152, 0.1191],
[0.2416, 0.5334],
[0.1352, 0.3116]])
torch.Size([6, 2])
2. Tensor 的基本运算
j = torch.tensor([2, 5, 6])
k = torch.tensor([8, 9, 6])
j_k = j + k
print(j_k)
tensor([10, 14, 12])
j = torch.tensor([4, 5, 8])
k = torch.tensor([8, 9, 51])
j_k = torch.add(j, k)
print(j_k)
tensor([12, 14, 59])
j = torch.tensor([4, 5, 8])
k = torch.tensor([8, 9, 51])
j.add_(k)
print(j)
print(k)
tensor([12, 14, 59])
tensor([ 8, 9, 51])
l = torch.tensor([4, 8, 9])
m = torch.tensor([6, 7, 85])
print(l // m)
tensor([0, 1, 0])
D:\anaconda\envs\mytorch\lib\site-packages\torch\_tensor.py:575: UserWarning: floor_divide is deprecated, and will be removed in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values.
To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). (Triggered internally at ..\aten\src\ATen\native\BinaryOps.cpp:467.)
return torch.floor_divide(self, other)
print(torch.true_divide(l, m))
tensor([0.6667, 1.1429, 0.1059])
n = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
p = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
print(torch.mm(n, p))
tensor([[184, 207, 138],
[184, 207, 138],
[184, 207, 138]])
n = torch.tensor([[8, 9, 6]])
p = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
print(torch.mm(n, p))
tensor([[184, 207, 138]])
n = torch.tensor([8, 9, 6])
p = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
print(torch.mm(n, p))
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
C:\Users\Public\Documents\Wondershare\CreatorTemp/ipykernel_12572/1070864940.py in
1 n = torch.tensor([8, 9, 6])
2 p = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
----> 3 print(torch.mm(n, p))
RuntimeError: self must be a matrix
n = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
p = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
print(torch.matmul(n, p))
tensor([[184, 207, 138],
[184, 207, 138],
[184, 207, 138]])
n = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
p = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
print(n.mm(p))
tensor([[184, 207, 138],
[184, 207, 138],
[184, 207, 138]])
n = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
p = torch.tensor([[8, 9, 6],[8, 9, 6],[8, 9, 6]])
print(n@p)
tensor([[184, 207, 138],
[184, 207, 138],
[184, 207, 138]])
q = torch.tensor([2, 3])
print(torch.pow(q, 2))
tensor([4, 9])
q = torch.tensor([2, 3])
print(q.pow(2))
tensor([4, 9])
q = torch.tensor([2, 3])
print(q**2)
tensor([4, 9])
r = torch.tensor(4)
r1 = torch.tensor([5, 9])
r2 = torch.tensor([1,5,6])
r3 = torch.tensor([[7, 8, 9], [4, 5, 6]])
print(torch.sqrt(r))
print(torch.sqrt(r1))
print(torch.sqrt(r2))
print(torch.sqrt(r3))
print(r.sqrt())
print(r1.sqrt())
print(r2.sqrt())
print(r3.sqrt())
tensor(2.)
tensor([2.2361, 3.0000])
tensor([1.0000, 2.2361, 2.4495])
tensor([[2.6458, 2.8284, 3.0000],
[2.0000, 2.2361, 2.4495]])
tensor(2.)
tensor([2.2361, 3.0000])
tensor([1.0000, 2.2361, 2.4495])
tensor([[2.6458, 2.8284, 3.0000],
[2.0000, 2.2361, 2.4495]])
r = torch.tensor(4)
r1 = torch.tensor([5, 9])
r2 = torch.tensor([1,5,6])
r3 = torch.tensor([[7, 8, 9], [4, 5, 6]])
print(r.sqrt())
print(r1.sqrt())
print(r2.sqrt())
print(r3.sqrt())
tensor(2.)
tensor([2.2361, 3.0000])
tensor([1.0000, 2.2361, 2.4495])
tensor([[2.6458, 2.8284, 3.0000],
[2.0000, 2.2361, 2.4495]])
s1 = torch.tensor(4)
s2 = torch.tensor([5, 6])
s3 = torch.tensor([[1,5], [6,8]])
print(torch.log2(s1))
print(torch.log2(s2))
print(torch.log2(s3))
print(torch.log(s1))
print(torch.log(s2))
print(torch.log(s3))
print(torch.log10(s1))
print(torch.log10(s2))
print(torch.log10(s3))
tensor(2.)
tensor([2.3219, 2.5850])
tensor([[0.0000, 2.3219],
[2.5850, 3.0000]])
tensor(1.3863)
tensor([1.6094, 1.7918])
tensor([[0.0000, 1.6094],
[1.7918, 2.0794]])
tensor(0.6021)
tensor([0.6990, 0.7782])
tensor([[0.0000, 0.6990],
[0.7782, 0.9031]])
s1 = torch.tensor(4)
s2 = torch.tensor([5, 6])
s3 = torch.tensor([[1,5], [6,8]])
print(s1.log2())
print(s2.log2())
print(s3.log2())
print(s1.log())
print(s2.log())
print(s3.log())
print(s1.log10())
print(s2.log10())
print(s3.log10())
tensor(2.)
tensor([2.3219, 2.5850])
tensor([[0.0000, 2.3219],
[2.5850, 3.0000]])
tensor(1.3863)
tensor([1.6094, 1.7918])
tensor([[0.0000, 1.6094],
[1.7918, 2.0794]])
tensor(0.6021)
tensor([0.6990, 0.7782])
tensor([[0.0000, 0.6990],
[0.7782, 0.9031]])
t = torch.tensor([5.26161, 9.16651, 8.51665])
print(t.floor())
tensor([5., 9., 8.])
print(t.ceil())
tensor([ 6., 10., 9.])
print(t.round())
tensor([5., 9., 9.])
print(t.trunc())
tensor([5., 9., 8.])
print(t.frac())
tensor([0.2616, 0.1665, 0.5167])
print(torch.tensor([8, 9]) % 7)
tensor([1, 2])
3.其他Tensor操作
u = torch.tensor([[5, 8, 9], [8, 1, 8]])
print(u.shape)
torch.Size([2, 3])
print(u.reshape(3,2))
tensor([[5, 8],
[9, 8],
[1, 8]])
print(u.resize(2, 3))
tensor([[5, 8, 9],
[8, 1, 8]])
D:\anaconda\envs\mytorch\lib\site-packages\torch\_tensor.py:490: UserWarning: non-inplace resize is deprecated
warnings.warn("non-inplace resize is deprecated")
print(u.view(3, 2))
print(u)
tensor([[5, 8],
[9, 8],
[1, 8]])
tensor([[5, 8, 9],
[8, 1, 8]])
v = torch.tensor(3.3)
print(v.item())
3.299999952316284
w = torch.tensor([8,5,6,1,161,16,15198,119800])
print(w.sort())
print(torch.sort(w))
torch.return_types.sort(
values=tensor([ 1, 5, 6, 8, 16, 161, 15198, 119800]),
indices=tensor([3, 1, 2, 0, 5, 4, 6, 7]))
torch.return_types.sort(
values=tensor([ 1, 5, 6, 8, 16, 161, 15198, 119800]),
indices=tensor([3, 1, 2, 0, 5, 4, 6, 7]))
print(torch.sort(w, descending = True))
torch.return_types.sort(
values=tensor([119800, 15198, 161, 16, 8, 6, 5, 1]),
indices=tensor([7, 6, 4, 5, 0, 2, 1, 3]))
x = torch.tensor([8,5,6,1,161,16,15198,119800] )
print(torch.topk(x, k = 2, dim = 0, largest = True, out = None))
torch.return_types.topk(
values=tensor([119800, 15198]),
indices=tensor([7, 6]))
print(torch.kthvalue(x, 5, dim = 0))
torch.return_types.kthvalue(
values=tensor(16),
indices=tensor(5))
y = torch.tensor([2,3,4])
print(torch.isnan(y))
tensor([False, False, False])
print(torch.isfinite(y))
tensor([True, True, True])
print(torch.isinf(y))
tensor([False, False, False])
z = torch.tensor([7, 2, 8])
print(z.numpy())
[7 2 8]
import numpy as np
z1 = np.ones(6)
print(z1)
print(torch.from_numpy(z1))
[1. 1. 1. 1. 1. 1.]
tensor([1., 1., 1., 1., 1., 1.], dtype=torch.float64)
if torch.cuda.is_available():
device = torch.device('gpu')
else:
device = torch.device('cpu')
a1 = torch.tensor([4, 5, 6], device = device)
a2 = torch.tensor([5, 15, 151], device = device)
print(a1)
print(a2)
a3 = a1 + a2
print(a3)
tensor([4, 5, 6])
tensor([ 5, 15, 151])
tensor([ 9, 20, 157])