对应关系
number -> scalar, array -> vector, 2d-array -> matrix
分别需要0, 1, 2个索引
a = [1, 2, 3, 4]
a[2]
张量理解成多维数组 nd-array -> nd-tensor 但是n维张量≠n个分量
例如一个三维张量可以有多于三个分量
dd = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
张量的秩是指张量的维数(轴数)
print(dd[0])
[1, 2, 3]
print(dd[0][1])
2
一个张量的秩告诉我们一个张量有多少个轴,最后一个轴的元素总是数字
a = torch.Tensor(2, 2)
b = a.double()
c = a.type(torch.DoubleTensor)
d = a.type_as(b)
a = torch.Tensor(2, 3)
b = torch.DoubleTensor(2,2)
c = torch.Tensor([[1, 2], [3, 4]])
d = torch.zeros(2, 2)
e = torch.ones(2, 2)
f = torch.eye(2, 2)
g = torch.randn(2, 2)
h = torch.arange(1, 6, 2)
i = torch.randperm(4)
j = torch.tensor([1, 2, 3])
a = torch.randn(2, 2)
print(a.shape)
print(a.size())
print(a.numel())
print(a.nelement())
a = torch.Tensor([[1, 2], [3, 4]])
b = torch.Tensor([[5, 6], [7, 8]])
c = torch.cat([a, b], 0) # 以第一维进行拼接
print(c)
d = torch.cat([a, b], 1) # 以第二维进行拼接
print(d)
e = torch.stack([a, b], 0) # 以第0维进行stack
print(e)
f = torch.stack([a, b], 1) # 以第1维进行stack
print(f)
g = torch.stack([a, b], 2) # 以第2维进行stack
print(g)
a = torch.Tensor([[1, 2, 3], [4, 5, 6]])
b = torch.chunk(a, 2, 0) # 沿着第0维进行分块, 一共分两块
print(b)
c = torch.chunk(a, 2, 1) # 沿着第1维进行分块,分割成两个Tensor
print(c)
d = torch.split(a, 2, 0) # 沿着第0维进行分块,每一块维度为2
print(d)
e = torch.split(a, 2, 1) # 沿着第1维进行分块,每一块维度为2
print(e)
f = torch.split(a, [1, 2], 1)
print(f)
a = torch.Tensor([[0, 1], [2, 3]])
print(a[1])
print(a[0, 1])
print(a > 0)
print(a[a > 0])
print(torch.masked_select(a, a > 0))
print(torch.nonzero(a))
print(torch.where(a > 1, torch.full_like(a, 1), a))
print(a.clamp(1, 2))
a = torch.arange(1, 5)
print(a)
b = a.view(2, 2)
print(b)
c = a.resize(4, 1)
print(c)
d = a.reshape(4, 1)
print(d)
b[0, 0] = 0
c[1, 0] = 0
d[2, 0] = 0
print(a)
# 直接改变 c=a.resize_(2,3)
a = torch.randn(2, 2, 2)
b = a.transpose(0, 1)
print(b)
c = a.permute(2, 1, 0)
print(c)
import torch
from torch.autograd import Variable
# create Variable
x = Variable(torch.Tensor([1]), requires_grad=True)
w = Variable(torch.Tensor([2]), requires_grad=True)
b = Variable(torch.Tensor([3]), requires_grad=True)
# Build a computational graph.
y = w * x + b
# Compute gradients
y.backward() # same as y.backward(torch.Tensor([1]))
# Print out the gradients.
print(x.grad)
print(w.grad)
print(b.grad)
import torch
from torch.autograd import Variable
x = torch.randn(3)
x = Variable(x, requires_grad=True)
y = x * 2
print(y)
y.backward(torch.Tensor([1, 0.1, 0.01]))
print(x.grad)
torch.utils.data.Dataset是代表这一数据的抽象类,只需要定义“__ len __ ” 和“ __ getitem __”这两个抽象类
import pandas as pd
from torch.utils.data import Dataset
class MyDataset(Dataset):
def __init__(self, csv_file, txt_file, root_dir, other_file):
self.csv_data = pd.read_csv(csv_file)
with open(txt_file, 'r') as f:
data_list = f.readlines()
self.txt_data = data_list
self.root_dir = root_dir
def __len__(self):
return len(self.csv_data)
def __getitem__(self, idx):
data = (self.csv_data[idx], self.txt_data[idx])
return data
通过torch.utils.data.Dataloader来定义一个新的迭代器
dataiter = DataLoader(MyDataset, batch_size=32, shuffle=True, collate_fn=default_collate)
import torch.nn as nn
class net_name(nn.Module):
def __init__(self, other_arguments):
super(net_name, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size)
# other network_layer
def forward(self, x):
x = self.conv1(x)
return x
import torch.nn as nn
criterion = nn.CrossEntropyLoss()
loss = criterion(output, target)
optimizer = torch.optim.SGD(model.parameters(), lr=o.o1, momentum=0.9)
torch.save(model,'./model.pth')
torch.save(model.state_dict(), './model.pth')
load_model = torch.load('model.pth')
model.load_state_dic(torch.load('model.pth'))