Tensor的算术运算
import torch
a = torch.Tensor([1, 2])
b = torch.Tensor([3, 4])
### 加法运算
c = a + b
c = torch.add(a, b)
c = a.add(b)
print(a)
c = a.add_(b)
print(a)
### 减法运算
c = a - b
c = torch.sub(a, b)
c = a.sub(b)
print(c)
print(a)
c = a.sub_(b)
print(c)
print(a)
### 乘法运算
c = a * b
c = torch.mul(a, b)
c = a.mul(b)
print(c)
c = a.mul_(b)
print(c)
### 除法运算
c = a / b
c = torch.div(a, b)
c = a.div(b)
print(c)
c = a.div_(b)
print(c)
### 矩阵运算 a*b ->(m,n)*(n,k)
a = torch.ones(2, 1)
b = torch.ones(1, 3)
print(a,b)
### a*b
c = torch.mm(a, b)
c = torch.matmul(a, b)
print(c)
c = a @ b
c = a.matmul(b)
c = a.mm(b)
print(c)
### 幂运算
c = torch.pow(a, 2)
c = a.pow(2)
c = a**2
c = a.pow_(2)
print(c)
### e**n exp运算
c = torch.exp(a)
c = a.exp_()
print(c)
### 开方运算
c = a.sqrt()
c = a.sqrt_()
print(c)
### 对数运算
c = torch.log2(a)
c = torch.log10(a)
c = torch.log(a)
c = torch.log_(a)
print(c)
Tensor序列的创建
# 初始化一个全是1的tensor
a = torch.ones(2,5)
# 初始化一个全零的tensor
a = torch.zeros(3,3)
# 初始化一个对角线为1的tensor
a = torch.eye(3,3)
#定义一个与a形状相同的全零tensor
b = torch.zeros_like(a)
#定义一个与a形状相同的全为1的tensor
b = torch.ones_like(a)
#pytorch的tensor操作类似于numpy,可以进行类比,如下
import numpy as np
b = np.array([
[1,2,3],[2,3,4]])
b = np.ones((2,3))
c = np.zeros_like(b)
# Tensor与numpy非常相似,可以理解为在numpy的基础上套了一层tensor
# 随机tensor
a = torch.rand(2,3) #指定shape,返回[0,1]之间的随机值
# 指定特定分布的tensor,比如正态分布
a = torch.normal(mean=0.0, std=torch.rand(5)) #5代表形状
#均匀分布
a = torch.Tensor(2,3).uniform_(-1,1) #定义[-1,1]之间进行均匀采样,(2,3)是shape
# 标准正态分布
a = torch.randn(10)
# 生成序列
a = torch.arange(0, 10, 1) #生成从[0,10),步长是1的序列
# 等间隔切分
a = torch.linspace(2, 10, 5) #生成从[2,10]之间,等间隔的5个序列
# 打乱序列
a = torch.randperm(10) #生成一个从[0,m)的序列并进行打乱,这里m=10
Tensor的索引与数据筛选:torch.where()、torch.index_select()、torch.masked_select()、torch.gt()、torch.nonzero()、torch.take()
###torch.where
#torch.where(condition, a, b):按条件从a和b中选出满足条件的元素组成新的tensor
a = torch.rand(4, 4)
b = torch.rand(4, 4)
print(a)
print(b)
out = torch.where(a> 0.5, a, b)
print(out)
###torch.index_select
# torch.index_select(input, dim, index, out=None):按照指定索引输出tensor
a = torch.rand(4, 4)
print(a)
out = torch.index_select(a, dim = 0, index=torch.tensor([0, 3, 2]))
print(out)
### torch.masked_select
# torch.masked_select(input, mask, out=None):将满足mask(掩码、遮罩等等,随便翻译)的指示,将满足条件的点选出来输出tensor,输出为向量
# torch.gt(a,b)函数比较a中元素大于(这里是严格大于)b中对应元素,大于则为1,不大于则为0,这里a为Tensor,b可以为与a的size相同的Tensor或常数。
a = torch.linspace(1, 16, 16)
# print(a)
a = a.view(4, 4)
print(a)
mask = torch.gt(a, 8)
print(mask)
out = torch.masked_select(a, mask)
print(out)
### torch.take
#torch.take(input, indices):将输入看成1D-tensor,按照索引得到输出tensor
a = torch.linspace(1, 16, 16).view(4, 4)
b = torch.take(a, index=torch.tensor([0, 15, 13, 10]))
print(a)
print(b)
### torch.nonzero
#torch.nonzero(input, out=None):输出非0元素的坐标
a = torch.tensor([
[0, 1, 2, 0], [2, 3, 0, 1]
])
print(a)
out = torch.nonzero(a)
print(out)
Tensor的组合与拼接
# torch.cat(seq, dim=0, out=None):按照已经存在的维度进行拼接
# torch.stack(seq, dim=0, out=None):按照新的维度进行拼接
# torch.cat
a = torch.zeros((2, 4))
b = torch.ones((2, 4))
print(a)
print(b)
out = torch.cat((a, b), dim=0)
print(out)
print(out.shape)
#torch.stack
a = torch.linspace(1, 9, 9).view(3, 3)
b = torch.linspace(10, 18, 9).view(3, 3)
print(a)
print(b)
out = torch.stack((a, b), dim=2)
print(out)
# print(out.shape)
print(out[:, :, 0])
print(out[:, :, 1])
分割、压缩、变形—torch.chunk()、torch.split()、torch.t()、torch.transpose() 、torch.squeeze()、torch.unsqueeze()、torch.unbind() 、torch.flip()、torch.full()
# torch.chunk() # 按照某个维度平均分块(最后一个可能小于平均值)
a = torch.rand((5,4))
print(a)
out = torch.chunk(a,2,dim = 0) # 按照某个维度平均分块(最后一个可能小于平均值)
print(out)
# torch.split() 切片,每片包含几行或者几列
a = torch.linspace(1,20,20).view(4,5)
print(a)
print("===========================")
out = torch.split(a,2,dim = 1)
print(out)
out1 = torch.split(a,[1,2,2],dim = 0) # 切成1行、2行、3行 sum[1,2,2]等于a的行数
print(out1)
#torch.reshape()变形
a = torch.rand((5,4))
out = torch.reshape(a,(4,5)) # 类似于view
print(out)
# torch.t() 转置
print(torch.t(a))
# torch.transpose() 交换维度
print(out)
out1 = torch.transpose(out,0,1)
print(out1)
# torch.squeeze() 删除维度为1的张量
print(a)
print(a.shape)
out = torch.squeeze(a)
print(out)
print(out.shape)
# torch.unsqueeze() 扩充维度
print(a)
print(a.shape)
print('---------------------')
out = torch.unsqueeze(a,-1) # -1是用来扩充最后一个维度
out = torch.unsqueeze(a,1) # 1是用来扩充第一个维度
print(out)
print(out.shape)
# torch.unbind() 去除某一个维度
print(a)
print(a.shape)
print('---------------------')
out = torch.unbind(a,dim = 1) # 这个意思是在第二个维度上面进行操作,生成两个【1,3】的张量
# 类似于split
print(out)
print(out[0].shape)
# torch.flip():翻转操作
print(a)
print(a.shape)
print('---------------------')
out = torch.flip(a,dims = [1])
print(out)
# torch.full(): 定义一个tensor,并填充指定的数值
out = torch.full((6,6),6)
print(out)
练习1:创建 a、b 两个张量,调整两个张量的 shape,分别采用 tensor.mul()、
tensor.dot()、tensor.matmul()进行计算,比较三者的区别。
import torch
a = torch.linspace(1,16,16).view(4,4)
b = torch.rand(4,4)
out = torch.mul(a,b) # torch.mul(a,b)这里面的a,b必须维度相同
print(out)
a = torch.Tensor([1,2])
b = torch.Tensor([5,6])
print(a.shape,b.shape)
out = torch.dot(a,b) # torch.dot(a,b)只能对1维的tensor进行点成运算
print(out)
a = torch.linspace(1,16,16).view(4,4)
b = torch.rand(4,5)
print(a.shape,b.shape)
out = torch.matmul(a,b) # torch.matmul()满足(m,n)*(n,k)
print(out)
练习2:创建一个张量 c 服从标准正态分布,将 c 中的负数替换成该数的绝对值。
c1 = torch.randn(10)
print(c1)
c3 = torch.where(c1>0,c1,abs(c1))
print(c3)
练习3:创建 d、e 两个张量,tensor.cat()和 tensor.stack()两种方式拼接两个张量,再以任意方式降维
d = torch.rand(3,3)
e = torch.linspace(1,9,9).view(3,3)
out = torch.cat((d,e),dim = 1)# torch.cat(seq, dim=0, out=None):按照已经存在的维度进行拼接
print(out,out.shape)
out_un = torch.unbind(out,dim = 1)
print(out_un)
#也可以通过reshape来降低维度
f2 = torch.reshape(out,[2,9])
print(f2.shape)
out1 = torch.stack((d,e),dim = 1)
out1_un = torch.squeeze(out1)
print(out1)
print(out1_un)
练习4:zeros 张量,shape=(2,4),将张量中的 index=[0,2]和 index=[1,3]的元素填 充为 1
z = torch.zeros(2, 4).scatter_(1, torch.LongTensor([[2], [3]]), 1)
print(z)
torch.LongTensor是64位整型
补充:scatter_()函数
scatter(dim, index, src)
dim:沿着哪个维度进行索引
index: 用来scatter的元素索引
src: 用来scatter的源元素,可以使一个标量也可以是一个张量