For beginners, there are many apis of tensor in torch,which are efficient and useful.
How can we remember them when we want to use them to finish tensor tasks?
Well, there is a single python script with annotation. Just run, change the content and debug.
Have fun wit it !
import torch
import numpy as np
a = torch.randn(2,3) # tensor in cpu
print(isinstance(a, torch.FloatTensor))
a = a.cuda() # tensor in gpu
print(isinstance(a, torch.cuda.FloatTensor))
# b = torch.tensor(1.) # loss value
# print(b.shape, b.size()) # dim of 0
# c = torch.tensor([1.]) # linear layer input
# print(c.shape, c.size()) # dim of 1
# d = torch.ones(2,2) # linear layer batch input
# print(d.shape, d.size()) # dim of 2
# e = torch.rand(1,2,3) # rnn layer batch input
# print(e.shape, e.size()) # dim of 3
# f = torch.rand(2,3,28,28) # cnn layer batch input
# print(f.shape, f.size()) # dim of 4
# g = torch.rand(2,3,28,28)
# print(g.numel(), g.dim()) # return amounts, dimension
# '''create various tensor in torch'''
# h = torch.from_numpy(np.array([2,3,3]) ) # from numpy data
# print(h)
# i = torch.tensor([2,3,4]) # from python list
# print(i)
# # tensor api accept the data
# # i = torch.tensor(2,2) ❌
# # while Tensor, IntTensor & FloatTensor accept data or data shape
# j = torch.Tensor(2,3) # from uninitialized
# print(j) # be careful! the value in j can be real small or big
# k = torch.Tensor([2,3]) # from uninitialized
# print(k) # be careful! the value in j can be real small or big
# l = torch.empty([2,3]) # from uninitialized
# # l = torch.empty(2,3) ✔ both give the tensor in shape of [2,3]
# print(l) # be careful! the value in j can be real small or big
# # here we can set default type of tensor created by Tensor or etc. api
# m = torch.tensor([1.2,2])
# print(m.type())
# torch.set_default_tensor_type(torch.DoubleTensor)
# n = torch.tensor([1.2,2])
# print(n.type())
# o = torch.rand(3,3) # from random initialized
# print(o)
# p = torch.rand_like(o) # from random initialized
# print(p) # universe distribution in [0,1]
# q = torch.randint(1,10,[3,3]) # from random initialized
# print(q) # universe distribution in [min_val, max_val)
# r = torch.randn(3,3) # from random initialized, recommend to get a tensor!
# print(r) # standard normal distribution
# s = torch.normal(mean=torch.full([10],0.),std=torch.arange(1.,0.,-0.1)) # from random initialized
# print(s) # element value various in normal distribution of different means & stds
# t = torch.full([2,3],7) # from initial value initialized
# print(t)
# u = torch.full([],7) # from initial value initialized
# print(u)
# v = torch.full([10], 7) # from initial value initialized
# print(v)
# w = torch.ones(3,3) # initialized with 1
# print(w)
# x = torch.zeros(3,3) # initialized with 0
# print(x)
# y = torch.eye(3,3) # initialized with diagonal matrix
# print(y)
# z = torch.arange(0, 10, 2) # element value in [min_val, max_val) with step
# print(z)
# A = torch.linspace(0, 10, 4) # equal split
# print(A)
# B = torch.logspace(0, 1, 10) # log split
# print(B)
# C = torch.randperm(10) # shuffle tensor
# print(C)
# '''indexing and slice'''
# D = torch.rand(4,3,28,28)
# print(D.shape) # [4,3,28,28]
# # indexing by dim
# print(D[0].shape) # dim 0 [3,28,28]
# print(D[0,0].shape) # dim 1 [28,28]
# print(D[0,0,0].shape) # dim 2 [28]
# print(D[0,0,0,0].shape) # dim 3 []
# # indexing by first or last several ele
# print(D[:2].shape) # first two ele in dim 0 [2,3,28,28]
# print(D[:2,:2].shape) # first two ele in dim 1 of first two ele in dim 0 [2,2,28,28], same as below
# # print(D[:2,:2,:,:].shape)
# print(D[:2,-1:].shape) # last ele in dim 1 of first two ele in dim 0 [2,1,28,28], same as below
# # print(D[:2,-1:,:,:].shape)
# # indexing by steps
# print(D[::2,:,::2,::2].shape) # [2,3,14,14]
# # indexing by specific index
# print(D.index_select(0,torch.tensor([0,1,2])).shape) # [2,3,28,28]
# print(D.index_select(2,torch.arange(28)).shape) # [2,3,28,28]
# # indexing with ...
# print(D[...].shape) # [4,3,28,28]
# print(D[0,...].shape) # [3,28,28]
# print(D[0,...,::2].shape) # [3,28,14]
# # indexing with mask
# E = torch.randn(3,3)
# F = E.ge(0.5)
# G = torch.masked_select(E, F)
# print(G)
# # indexing with flatten index
# H = torch.tensor([[4,5,6],[7,8,9]])
# I = torch.take(H, torch.tensor([0,2,5]))
# print(I)
# '''dimension transform'''
# # operation view = reshape
# J = torch.rand(4,1,28,28)
# print(J.view(4, -1).shape) # Take careful, lost dimension
# print(J.reshape(4, -1).shape) # Take careful, lost dimension
# K = J.view(4,28,28) # Take careful, lost dimension
# K.view(4,28,28,1)
# print(K.shape)
# # squeeze & unsqueeze
# L = torch.rand(4,1,28,28) # [4,1,28,28]
# print(L.unsqueeze(0).shape) # [1, 4, 1, 28, 28]
# print(L.unsqueeze(-1).shape) # [4, 1, 28, 28, 1]
# print(L.unsqueeze(-1).squeeze(-1).shape) # [4,1,28,28]
# print(L.squeeze(1).unsqueeze(-1).shape) # [4,28,28,1]
# # expand & repeat
# M = torch.rand(32)
# print(M.shape) # [32]
# print(M.unsqueeze(1).unsqueeze(2).unsqueeze(0).shape) # [1,32,1,1]
# print(M.unsqueeze(1).unsqueeze(2).unsqueeze(0).expand(4,32,28,28).shape) # [4,32,28,28]
# print(M.unsqueeze(1).unsqueeze(2).unsqueeze(0).repeat(4,1,28,28).shape) # [4,32,28,28]
# # t & transpose & permute
# O = torch.rand(3,4)
# print(O.t().shape) # [4,3] only useful in 2d matrix
# P = torch.rand(4,1,28,28)
# print(P.transpose(1,2).shape)
# Q = torch.rand(4,3,32,28) # [4,3,32,28]
# print(Q.transpose(1,3).shape) # [4,28,32,3]
# print(Q.transpose(1,3).transpose(0,2).shape) # [32,28,4,3]
# print(Q.transpose(1,3).transpose(0,2).permute(2,3,0,1).shape) # [4,3,32,28]
# '''broadcasting'''
# # insert 1 dim ahead
# # expand dims with size 1 to same size
# R = torch.tensor([5,6,7])
# S = torch.tensor([[1,2,3],[1,2,3]])
# print(R.shape, S.shape, R+S)
# '''merge & split'''
# T = torch.rand(4,32,8)
# U = torch.rand(4,32,8)
# V = torch.cat([T,U],dim=2) # cat
# print(V.shape)
# T = torch.rand(4,32,32)
# U = torch.rand(4,32,32)
# V = torch.stack([T,U],dim=0) # stack
# print(V.shape)
# W = torch.randn(3,32,32)
# X,Y,Z = W.split([2,2,28], dim=1) # split
# print(X.shape, Y.shape, Z.shape)
# X,Y,Z = W.chunk(3, dim=0) # chunk
# print(X.shape, Y.shape, Z.shape)
# '''--------------------------------------------------------'''
# # basic operations
# a = torch.rand(3, 4)
# b = torch.rand(4)
# c = a + b # add
# d = torch.add(a, b) # add
# e = torch.all(torch.eq(c, d))
# print(e)
# c = a - b # sub
# d = torch.sub(a, b) # sub
# e = torch.all(torch.eq(c, d))
# print(e)
# # 2d matrix multiply
# a = torch.tensor([[1, 2], [1, 2]])
# b = torch.tensor([[1, 2], [1, 2]])
# c = torch.mm(a, b) # only use in 2d matrix
# d = torch.matmul(a, b)
# e = a @ b
# print(c)
# print(d)
# print(e)
# # an example of linear map function 784->512
# X1 = torch.randn(4,784)
# W1 = torch.randn(512, 784) # first output dimension then input dimension
# H1 = [email protected]()
# print(H1.shape)
# # >2d matrix multiply
# a = torch.randn(4,3,28,32)
# b = torch.randn(4,1,32,28)
# c = torch.matmul(a,b)
# print(c.shape)
# # power
# a = torch.full([2,2],3)
# b = a.pow(2)
# c = a ** 2
# print(b)
# print(c)
# d = b.sqrt()
# e = c ** 0.5
# print(d)
# print(e)
# f = d.rsqrt()
# print(f)
# # exp & log
# a = torch.ones(2,2)
# b = torch.exp(a)
# print(b)
# c = torch.log(a)
# print(c)
# # floor, ceil, round, trunc, frac
# a = torch.rand(2,2)
# print(a)
# b = a.floor()
# print(b)
# c = a.ceil()
# print(c)
# d = a.trunc()
# print(d)
# e = a.frac()
# print(e)
# f = a.round()
# print(f)
# # clamp
# grad = torch.rand(2,3)*15
# print(grad, grad.median())
# g = grad.clamp(0, grad.median())
# print(g, grad.median())
# # statics
# # norm-p
# a = torch.randn(8).float()
# print(a)
# b = a.view(2,4)
# c = a.view(2,2,2)
# print(b)
# print(c)
# print(a.norm(1), b.norm(1), c.norm(1))
# print(a.norm(2), b.norm(2), c.norm(2))
# print(b.norm(1, dim=1))
# print(c.norm(2, dim=1))
# # max min mean prod
# print(a.min(), a.max(), b.mean(dim=1), c.prod(dim=1))
# print(a.sum(), b.argmin(dim=1), c.argmax(dim=1, keepdim=True))
# # top-k & k-th
# print(a.topk(3), b.topk(3, dim=1), c.topk(1, dim=1, largest=False))
# # compare
# a = torch.randn(3,3)
# b = torch.randn(3,3)
# c = torch.eq(a, b)
# d = torch.equal(a, b)
# print(c)
# print(d)
# # advanced option
# # torch.where(condition, x, y)
# # return a tensor of element selected from if condition x else y
# a = torch.randn(2,2)
# print(a)
# b = torch.ones(2,2)
# c = torch.zeros(2,2)
# d = torch.where(a>0.5, b, c)
# print(d)
# # gather
# # torch.gather(input, dim, index, out=None) -> Tensor
# prob = torch.randn(4,10)
# idx = prob.topk(3, dim=1)
# label = torch.arange(100, 110)
# nums = torch.gather(label.expand(4,10), dim=1, index=idx[1])
# print(nums)