对numpy和torch数据操作进行对比,避免遗忘。
import torch
import numpy as np
np_data = np.arange(6).reshape((2, 3))
torch_data = torch.arange(6) # 张量
tensor2array = torch_data.numpy()
print(
'\nnumpy array:\n', np_data,
'\ntorch tensor\n', torch_data,
'\ntensor to array\n', tensor2array
)
"""
numpy array:
[[0 1 2]
[3 4 5]]
torch tensor
tensor([0, 1, 2, 3, 4, 5])
tensor to array
[0 1 2 3 4 5]
"""
import torch
import numpy as np
np_data = np.array([[i for i in range(r * 4, (r + 1) * 4)] for r in range(5)], dtype=np.float32)
np_data = np_data.reshape((2, 5, 2)) # 5x4 reshape一下变 2x5x2
print('numpy shape', np_data.shape) # 数组的维度
print('numpy ndim', np_data.ndim) # 数组的轴,也就是rank,也就是shape的的大小
print('numpy size', np_data.size) # 数组元素的个数,也就是shape乘起来
print('numpy dtype', np_data.dtype) # 数组元素数据类型
print('numpy itemsize', np_data.itemsize) # 数组有元素数据类型的大小,float32, 32/8=4
np_data = np_data[:, :, np.newaxis, :] # 增加维度
print('numpy add dim, np.newaxis', np_data.shape)
np_data = np_data.transpose((2, 0, 1, 3)) # 转置 transpose
print('numpy transpose, np.newaxis', np_data.shape)
np_data = np_data.squeeze(0) # 移除0维度,只能移除维数为1的维度
print('numpy remove dim, np.squeeze', np_data.shape)
#######################
print()
#######################
torch_data = torch.Tensor(
[[i for i in range(r * 4, (r + 1) * 4)] for r in range(5)]
)
torch_data = torch_data.view(2, 5, 2) # 方法变成view, 5x4 view一下变 2x5x2
print('torch shape', torch_data.shape)
print('torch ndim', torch_data.ndim)
print('torch size', torch_data.size()) # 要调用函数
print('torch dtype', torch_data.dtype) # 没有itemsize
torch_data = torch_data.unsqueeze(2) # 增加维度
print('torch add dim, torch.unsqueeze', torch_data.shape)
torch_data = torch_data.permute((2, 0, 1, 3)) # 转置 permute
print('torch permute', torch_data.shape)
torch_data = torch_data.squeeze(0) # 移除0维度,只能移除维数是1的维度
print('torch remove dim, torch.squeeze', torch_data.shape)
"""
numpy shape (2, 5, 2)
numpy ndim 3
numpy size 20
numpy dtype float32
numpy itemsize 4
numpy add dim, np.newaxis (2, 5, 1, 2)
numpy transpose, np.newaxis (1, 2, 5, 2)
numpy remove dim, np.squeeze (2, 5, 2)
torch shape torch.Size([2, 5, 2])
torch ndim 3
torch size torch.Size([2, 5, 2])
torch dtype torch.float32
torch add dim, torch.unsqueeze torch.Size([2, 5, 1, 2])
torch permute torch.Size([1, 2, 5, 2])
torch remove dim, torch.squeeze torch.Size([2, 5, 2])
"""
import torch
import numpy as np
np_data = np.array([[1, 2], [1, 2]])
torch_data = torch.tensor(np_data)
np_t = np.array([0, np.pi / 4., np.pi / 2.])
torch_t = torch.tensor(np_t)
# 平均值
np_x = np.arange(5)
torch_x = torch.FloatTensor([i for i in range(5)])
# torch.mean 只能计算float的平均值,不能计算int的平均值,所有必须用FloatTensor
print(
'\nnumpy.abs:\n', np.abs(np_data),
'\ntorch.abs:\n', torch.abs(torch_data),
'\nnumpy.sin:\n', np.sin(np_t),
'\ntorch.sin:\n', torch.sin(torch_t),
'\nnumpy.mean\n', np.mean(np_x),
'\ntorch.mean\n', torch.mean(torch_x)
)
"""
numpy.abs:
[[1 2]
[1 2]]
torch.abs:
tensor([[1, 2],
[1, 2]])
numpy.sin:
[0. 0.70710678 1. ]
torch.sin:
tensor([0.0000, 0.7071, 1.0000], dtype=torch.float64)
numpy.mean
2.0
torch.mean
tensor(2.)
"""
import torch
import numpy as np
# numpy矩阵相乘
na = np.array([[1, 2],
[3, 4]])
nb = np.array([[1, 1],
[0, 1]])
nc = np.matmul(na, nb)
nd = na * nb
print(nc, nd, sep='\n')
# torch矩阵相乘
ta = torch.FloatTensor([[1, 2],
[3, 4]])
tb = torch.FloatTensor([[1, 1],
[0, 1]])
tc = torch.mm(ta, tb)
td = ta * tb
print(tc, td, sep='\n')
"""
[[1 3]
[3 7]]
[[1 2]
[0 4]]
tensor([[1., 3.],
[3., 7.]])
tensor([[1., 2.],
[0., 4.]])
"""
import torch
from torch.autograd import Variable
# 变量
tensor = torch.FloatTensor([[1, 2], [3, 4]])
variable = Variable(tensor, requires_grad=True)
print(tensor)
print(variable)
t_mean = torch.mean(tensor * tensor)
v_mean = torch.mean(variable * variable)
print(t_mean)
print(v_mean)
# 反向传播求导数
v_mean.backward()
print(variable.grad)
print(variable) # Variable形式
print(variable.data) # tensor形式
print(variable.data.numpy()) # numpy形式,随后输出结果一般用numpy形式
import torch
import torch.nn as nn
from torch.autograd import Variable
# 最大池化与反池化
pool = nn.MaxPool2d(
kernel_size=2,
stride=2,
return_indices=True,
ceil_mode=True
)
unpool = nn.MaxUnpool2d(kernel_size=2, stride=2)
in_data = Variable(
torch.Tensor([[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]]])
)
print('\nin_data\n', in_data)
print(in_data.shape)
out_data, indices = pool(in_data)
print('\nout_data\n', out_data)
print('\nindices\n', indices)
un_data = unpool(out_data, indices)
print('\nun_data\n', un_data)
"""
in_data
tensor([[[[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.],
[13., 14., 15., 16.]]]])
torch.Size([1, 1, 4, 4])
out_data
tensor([[[[ 6., 8.],
[14., 16.]]]])
indices
tensor([[[[ 5, 7],
[13, 15]]]])
un_data
tensor([[[[ 0., 0., 0., 0.],
[ 0., 6., 0., 8.],
[ 0., 0., 0., 0.],
[ 0., 14., 0., 16.]]]])
"""