1.关于tensor的一些判断
torch.is_tensor(obj)
torch.is_storage(obj)
torch.set_default_dtype(d) #默认的type为torch.float32
torch.get_default_dtype() → torch.dtype #(→返回值,下同)
torch.set_default_tensor_type(t)
torch.numel(input) → int #返回tensor中所有的元素个数
torch.set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=None, profile=None)
2.创建tensor的一些方法
torch.tensor(data, dtype=None, device=None, requires_grad=False) → Tensor
torch.sparse_coo_tensor(indices, values, size=None, dtype=None, device=None, requires_grad=False)→ Tensor
torch.as_tensor(data, dtype=None, device=None) → Tensor
torch.from_numpy(ndarray) → Tensor
torch.zeros(*sizes, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False)→ Tensor
torch.zeros_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor
torch.ones(*sizes, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False)→ Tensor
torch.ones_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor
torch.arange(start=0, end, step=1, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
torch.range(start=0, end, step=1, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
torch.linspace(start, end, steps=100, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
torch.logspace(start, end, steps=100, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
torch.eye(n, m=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False)→ Tensor
torch.empty(*sizes, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False)→ Tensor
torch.empty_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor
torch.full(size, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
torch.full_like(input, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
3.一些变化
torch.cat(tensors, dim=0, out=None) → Tensor
torch.chunk(tensor, chunks, dim=0) → List of Tensors #在某一个维度将一个tensor分成几等份,chunks为int,即需要分成的份数
torch.gather(input, dim, index, out=None) → Tensor #Gathers values along an axis specified by dim.
torch.index_select(input, dim, index, out=None) → Tensor #类似于标准库slice函数的作用
torch.masked_select(input, mask, out=None) → Tensor
torch.narrow(input, dimension, start, length) → Tensor
torch.nonzero(input, out=None) → LongTensor #返回所有非零元素的位置索引,返回的是索引哦
torch.reshape(input, shape) → Tensor
torch.split(tensor, split_size_or_sections, dim=0)
torch.squeeze(input, dim=None, out=None) → Tensor #将维度=1的那个维度(即只包含一个元素的维度)去掉,即所谓的压榨
torch.stack(seq, dim=0, out=None) → Tensor
torch.t(input) → Tensor
torch.take(input, indices) → Tensor
torch.transpose(input, dim0, dim1) → Tensor
torch.unbind(tensor, dim=0) → seq
torch.unsqueeze(input, dim, out=None) → Tensor
torch.where(condition, x, y) → Tensor
4.Random Sampling 建立随机矩阵
torch.manual_seed(seed)
torch.initial_seed()
torch.get_rng_state()
torch.set_rng_state(new_state)
torch.default_generator → <torch._C.Generator object>
torch.bernoulli(input, *, generator=None, out=None) → Tensor
torch.multinomial(input, num_samples, replacement=False, out=None) → LongTensor
torch.normal()
torch.normal(mean, std, out=None) → Tensor
torch.rand(*sizes, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False)→ Tensor
torch.rand_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor
torch.randint(low=0, high, size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
torch.randint_like(input, low=0, high, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
torch.randn(*sizes, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False)→ Tensor
torch.randn_like(input, dtype=None, layout=None, device=None, requires_grad=False) → Tensor
torch.randperm(n, out=None, dtype=torch.int64, layout=torch.strided, device=None, requires_grad=False) → LongTensor
5.线代相关
# 这些在矩阵的处理中会有很大的用处,比如SVD,特征值分解可以直接调用相关的工具
torch.addbmm(beta=1, mat, alpha=1, batch1, batch2, out=None) → Tensor
torch.addmm(beta=1, mat, alpha=1, mat1, mat2, out=None) → Tensor
torch.addmv(beta=1, tensor, alpha=1, mat, vec, out=None) → Tensor
torch.addr(beta=1, mat, alpha=1, vec1, vec2, out=None) → Tensor
torch.baddbmm(beta=1, mat, alpha=1, batch1, batch2, out=None) → Tensor
torch.bmm(batch1, batch2, out=None) → Tensor
torch.btrifact(A, info=None, pivot=True)
torch.btrifact_with_info(A, pivot=True) -> (Tensor, IntTensor, IntTensor)
torch.btrisolve(b, LU_data, LU_pivots) → Tensor
torch.btriunpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True)
torch.chain_matmul(*matrices)
torch.cholesky(A, upper=False, out=None) → Tensor
torch.dot(tensor1, tensor2) → Tensor
torch.eig(a, eigenvectors=False, out=None) -> (Tensor, Tensor)
torch.gels(B, A, out=None) → Tensor
torch.geqrf(input, out=None) -> (Tensor, Tensor)
torch.ger(vec1, vec2, out=None) → Tensor
torch.gesv(B, A) -> (Tensor, Tensor)
torch.inverse(input, out=None) → Tensor
torch.det(A) → Tensor
torch.logdet(A) → Tensor
torch.slogdet(A) -> (Tensor, Tensor)
torch.matmul(tensor1, tensor2, out=None) → Tensor
torch.matrix_power(input, n) → Tensor
torch.matrix_rank(input, tol=None, bool symmetric=False) → Tensor
torch.mm(mat1, mat2, out=None) → Tensor
torch.mv(mat, vec, out=None) → Tensor
torch.orgqr(a, tau) → Tensor
torch.pinverse(input, rcond=1e-15) → Tensor
torch.potrf(a, upper=True, out=None)
torch.potrs(b, u, upper=True, out=None) → Tensor
torch.pstrf(a, upper=True, out=None) -> (Tensor, Tensor)
torch.qr(input, out=None) -> (Tensor, Tensor)
torch.svd(input, some=True, compute_uv=True, out=None) -> (Tensor, Tensor, Tensor)
torch.symeig(input, eigenvectors=False, upper=True, out=None) -> (Tensor, Tensor)