a = torch.randn(2, 3) # 随机生成2行3列的tensor, randn代表满足N(0,1)正态分布
a.type() # 'torch.FloatTensor'
isinstance(a, torch.FloatTensor) # True 参数类型检验
isinstance(a, torch.cuda.FloatTensor) # False
a = a.cuda()
isinstance(a, torch.cuda.FloatTensor) # True
大写Tensor的参数是size随机初始化,小写tensor的参数是具体的数据
torch.ones(), torch.empty(), torch.rand(), torch.randn() 参数是size
np.array() 参数是具体的数据
# 生成0维标量
a = torch.tensor(1.3) # tensor(1.300)
a.shape / a.size() # torch.Size([])
len(a.shape) # 0
# 生成1维张量
torch.tensor([1.3]) # tensor([1.300])
torch.tensor([1.1, 1.2]) # tensor([1.100, 1.200])
torch.FloatTensor(2) # 1*2大小的随机初始化的tensor(省略1)
a = torch.ones(2) a.shape # torch.Size([2]) 1*2(省略1)
# 生成2维张量
a = torch.randn(2, 3)
torch.FloatTensor(2,3) # 2*3大小的随机初始化的tensor
a.shape # torch.Size([2,3])
a.shape[1] # 3
a.size(1) # 3
# 生成3维向量
a = torch.rand(1, 2, 3) # 随机生成3维的tensor,rand代表服从均匀分布
a.shape # torch.Size([1,2,3])
list(a.shape) # [1, 2, 3]
a.numel() # 6 = 1 * 2 * 3
a.dim() # 3
# import from numpy (numpy 默认double类型)
data = np.array([2, 3.3]) # array([2.0000, 3.3000])
data = np.ones(2) # array([1. ,1. ]), 1*2(省略1)
torch.from_numpy(data) # tensor([1. ,1. ], dtype=torch.float64)
# import from list
# 大写Tensor的参数是size随机初始化,小写tensor的参数是具体的数据
torch.tensor([[2., 3.2],[1., 22.3]])
torch.empty() # 与torch.ones()类似,参数是size,初始化为0
# torch.tensor()的默认数据类型是 'torch.FloatTensor'
# 更改默认数据类型:torch.set_default_tensor_type(torch.DoubleTensor)
# 索引
a = torch.rand(4,3,28,28)
a[0].shape # torch.Size([3,28,28])
a[0,0].shape # torch.Size([28,28])
a[0,0,2,4] # tensor(0.8082)
# 切片
a[:2].shape # torch.Size([2,3,28,28]) 左侧包含右侧不包含
a[:2,-1:,:,:].shape # torch.Size([2,1,28,28])
a[:,:,0:28:2,0:28:2].shape # torch.Size([4,3,14,14]) 第二个冒号后代表步长
a[:,:,::2,::2].shape # torch.Size([4,3,14,14]) 和上面等价
# 索引
a.index_select(0, torch.tensor([0,2])).shape # 第0维中第0张和第2张图片,torch.Size([2,3,28,28])
a.index_select(2, torch.arange(0,8)).shape # torch.Size([4,3,8,28])
a[0,...].shape # torch.Size([3,28,28]), 省略号代表省略的维度取满
使用mask索引
view():参数为size
unsqueeze() :插入输入的维度
squeeze() : 删除输入的维度
expand : 维度扩展,参数是扩展后的size
repeat : 维度扩展,参数是扩展倍数
transpose : 维度交换
permute : 维度重排
# view :大小变化
a = torch.rand(4,1,28,28)
a.view(4, 28*28) # torch.Size([4, 784])
a.shape # torch.Size([4,1,28,28])
# unsqueeze :插入输入的维度,维度增加
a.unsqueeze(0).shape # torch.Size([1,4,1,28,28])
a.unsqueeze(-4).shape # torch.Size([4,1,1,28,28])
# squeeze : 删除输入的维度,维度减少
a.squeeze(0).shape # torch.Size([4,1,28,28])
a.squeeze(1).shape # torch.Size([4,28,28])
# expand : 维度扩展,被扩张的维度长度只能是1
a = torch.rand(4,32,14,14)
b = torch.rand(1,32, 1, 1)
b.expand(4,32,14,14).shape # torch.Size([4,32,14,14])
# repeat : 维度扩展,内存复制,参数是扩展倍数
b = torch.rand(1,32,1,1)
b.repeat(4,32,1,1) # torch.Size([4,1024,1,1])
# 转置:b.t(), 其中b只能为2维张量(即矩阵)
# transpose : 维度交换
a.shape # torch.Size([4,3,32,32])
a.transpose(1,3) # torch.Size([4,32,32,3])
a1=a.transpose(1,3).contiguous().view(4,3072).view(4,32,32,3).transpose(1,3) # 此时a1与a相等,要注意维度的对应
# permute : 一次性的维度重排
a.shape # torch.Size([4,3,28,32])
a.permute(0,2,3,1) # torch.Size([4,28,32,3])
在前面插入新维度,并扩张为对应的size (此过程中数值采取复制的形式)
feature map : [4, 32, 14, 14] bias : [32,1,1] — [1,32,1,1] — [4,32,14,14]
# cat
a = torch.rand(4,32,8)
b = torch.rand(5,32,8)
torch.cat([a,b],dim=0).shape # torch.Size([9,32,8])
# stack (拼接的两个张量的size必须完全相同)
a1 = torch.rand(4,3,16,32)
a2 = torch.rand(4,3,16,32)
torch.cat([a1,a2],dim=2).shape # torch.Size([4,3,32,32])
torch.stack([a1,a2],dim=2).shape # torch.Size([4,3,2,16,32])
# split
c.shape # torch.Size([3,32,8])
a,b = c.split([1,2],dim=0)
a.shape # torch.Size([1,32,8])
b.shape # torch.Size([2,32,8])
# chunk
c.shape # torch.Size([2,32,8])
a,b = c.chunk(2,dim=0) # a,b : torch.Size([1,32,8])
# 四则运算
a = torch.rand(3,4)
b = torch.rand(4)
a+b / torch.add(a,b) # broadcast机制,b->[3,4]
# 高维张量乘法 :
# 只取最后的两维运算,前面不变(前面要求一样或者可经过broadcast变成一样)
# 幂
a = torch.full([2,2],3) # size 2*2, 值全为3
a.pow(2) # tensor([[9.,9.],[9.,9.]])
a.sqrt() # tensor([[3.,3.],[3.,3.]])
a = torch.exp(torch.ones(2,2)) # [[1,1],[1,1]] -> [[e,e],[e,e]]
a = torch.log(a) # [[e,e],[e,e]] -> [[1,1],[1,1]]
# 矩阵操作
grad = torch.rand(2,3)
grad.max() grad.min() grad.median()
grad.clamp(min,max) # 把矩阵中元素限制在min-max之间
a.max(dim=1,keepdim=True) # keepdim置True代表使得统计前后维数不变
a.topk(3,dim=1,largest=True) # 与max类似,只不过不是求最大的1个,而是最大的k个
a.kthvalue(8,dim=1) # 找1维度上第8小的元素值
# where
condition # [[0.6769,0.7271],[0.8884,0.4143]]
a # [[0,0],[0,0]]
b # [[1,1],[1,1]]
torch.where(condition>0.5,a,b) # [[0,0],[0,1]],满足条件的位置取a,反之取b
# gather
origin # [[7,4,9],[7,4,9][8,1,3],[8,6,0]]
reference # [100,101,...,109]
torch.gather(reference.expand(4,10),dim=1,index=origin)
# [[107,104,109],[107,104,109][108,101,103],[108,106,100]]
首先开启一个监听进程:python -m visdom.server
from visdom import Visdom
viz = Visdom()
# 下面两句话完成了创建小窗口只有原点,不断地把loss.item()的点添加到图上形成曲线
# 创建小窗口,从左至右 :Y X ID 名字
# 默认环境为 main
viz.line([0.], [0.], win='train_loss', opts=dict(title='train_loss'))
# 画线,从左至右:Y X 添加
viz.line([loss.item()],[global_step],win='train_loss',update='append')
# 多条曲线
# Y1,Y2 X ID 名字, legend代表y1,y2的图标
viz.line([0.0, 0.0], [0.], win='test',opts=dict(title='test loss&acc', legend=['loss','acc']))
viz.line([[test_loss,correct/len(test_loader.dataset)]],[global_step],win='test',update='append')
# 可视化 (numpy数据或图像)
viz.images(data.view(-1,1,28,28), win='x')
viz.text(str(pred.detach().cpu().numpy()),win='pred',opts=dict(title='pred'))
','acc']))
viz.line([[test_loss,correct/len(test_loader.dataset)]],[global_step],win='test',update='append')