PyTorch 学习笔记

Pytorch学习笔记

  • 一、基本操作
    • 1.使用
    • 2.Tensor Properties
      • ①Device
      • ②Shape
    • 3.Indexing Tensors索引张量
    • 4.Initializing Tensors
    • 5.Basic Functions
  • 二、PyTorch Neural Network Module (torch.nn)
    • 1.nn.Linear
    • 2.nn.ReLU
    • 3.nn.BatchNorm1d
    • 4.nn.Sequential
    • 5.Optimization
    • 6.Training Loop
    • 7.New nn Classes

一、基本操作

1.使用

import torch

2.Tensor Properties

example_tensor = torch.Tensor(
    [
     [[1, 2], [3, 4]], 
     [[5, 6], [7, 8]], 
     [[9, 0], [1, 2]]
    ]
)
example_tensor

output:
tensor([[[1., 2.],
         [3., 4.]],
        [[5., 6.],
         [7., 8.]],
        [[9., 0.],
         [1., 2.]]])

①Device

example_tensor.device

output:
device(type='cpu')

②Shape

example_tensor.shape

output:
torch.Size([3, 2, 2])
print("shape[0] =", example_tensor.shape[0])
print("size(1) =", example_tensor.size(1))

output:
shape[0] = 3
size(1) = 2
print("Rank =", len(example_tensor.shape))
print("Number of elements =", example_tensor.numel())

Rank = 3
Number of elements = 12

3.Indexing Tensors索引张量

example_tensor[1]

out:
tensor([[5., 6.],
        [7., 8.]])
example_tensor[1, 1, 0]

out:
tensor(7.)
#获取标量值
example_scalar = example_tensor[1, 1, 0]
example_scalar.item()

out:
7.0
#每个矩阵左上角元素
example_tensor[:, 0, 0]

out:
tensor([1., 5., 9.])

4.Initializing Tensors

torch.ones_like(example_tensor)

out:
tensor([[[1., 1.],
         [1., 1.]],

        [[1., 1.],
         [1., 1.]],

        [[1., 1.],
         [1., 1.]]])
torch.zeros_like(example_tensor)

out:
tensor([[[0., 0.],
         [0., 0.]],

        [[0., 0.],
         [0., 0.]],

        [[0., 0.],
         [0., 0.]]])
#正太或高斯分布
torch.randn_like(example_tensor)

out:
tensor([[[-0.3675,  0.2242],
         [-0.3378, -1.0944]],

        [[ 1.5371,  0.7701],
         [-0.1490, -0.0928]],

        [[ 0.3270,  0.4642],
         [ 0.1494,  0.1283]]])
torch.randn(2, 2, device='cpu') # Alternatively, for a GPU tensor, you'd use device='cuda'

out:
tensor([[ 0.2235, -1.8912],
        [-1.2873,  0.7405]])

5.Basic Functions

(example_tensor - 5) * 2

out:
tensor([[[ -8.,  -6.],
         [ -4.,  -2.]],

        [[  0.,   2.],
         [  4.,   6.]],

        [[  8., -10.],
         [ -8.,  -6.]]])
print("Mean:", example_tensor.mean())
print("Stdev:", example_tensor.std())

out:
Mean: tensor(4.)
Stdev: tensor(2.9848)
example_tensor.mean(0)

# Equivalently, you could also write:
# example_tensor.mean(dim=0)
# example_tensor.mean(axis=0)
# torch.mean(example_tensor, 0)
# torch.mean(example_tensor, dim=0)
# torch.mean(example_tensor, axis=0)
out:
tensor([[5.0000, 2.6667],
        [3.6667, 4.6667]])

二、PyTorch Neural Network Module (torch.nn)

1.nn.Linear

linear = nn.Linear(10, 2)
example_input = torch.randn(3, 10)
example_output = linear(example_input)
example_output

out:
tensor([[ 0.3909,  0.6160],
        [-0.1559,  0.2788],
        [ 0.4767,  0.3206]], grad_fn=<AddmmBackward>)

2.nn.ReLU

relu = nn.ReLU()
relu_output = relu(example_output)
relu_output

out:
tensor([[0.3909, 0.6160],
        [0.0000, 0.2788],
        [0.4767, 0.3206]], grad_fn=<ReluBackward0>)

3.nn.BatchNorm1d

batchnorm = nn.BatchNorm1d(2)
batchnorm_output = batchnorm(relu_output)
batchnorm_output

out:
tensor([[-1.3570, -0.7070],
        [ 0.3368,  1.4140],
        [ 1.0202, -0.7070]], grad_fn=<NativeBatchNormBackward>)

4.nn.Sequential

mlp_layer = nn.Sequential(
    nn.Linear(5, 2),
    nn.BatchNorm1d(2),
    nn.ReLU()
)
test_example = torch.randn(5,5) + 1
print("input: ")
print(test_example)
print("output: ")
print(mlp_layer(test_example))

out:
input: 
tensor([[ 1.7690,  0.2864,  0.7925,  2.2849,  1.5226],
        [ 0.1877,  0.1367, -0.2833,  2.0905,  0.0454],
        [ 0.7825,  2.2969,  1.2144,  0.2526,  2.5709],
        [-0.4878,  1.9587,  1.6849,  0.5284,  1.9027],
        [ 0.5384,  1.1787,  0.4961, -1.6326,  1.4192]])
output: 
tensor([[0.0000, 1.1865],
        [1.5208, 0.0000],
        [0.0000, 1.1601],
        [0.0000, 0.0000],
        [0.7246, 0.0000]], grad_fn=<ReluBackward0>)

5.Optimization

import torch.optim as optim
adam_opt = optim.Adam(mlp_layer.parameters(), lr=1e-1)

6.Training Loop

train_example = torch.randn(100,5) + 1
adam_opt.zero_grad()

# We'll use a simple loss function of mean distance from 1
# torch.abs takes the absolute value of a tensor
cur_loss = torch.abs(1 - mlp_layer(train_example)).mean()

cur_loss.backward()
adam_opt.step()
print(cur_loss)

out:
tensor(0.7625, grad_fn=<MeanBackward0>)

7.New nn Classes

class ExampleModule(nn.Module):
    def __init__(self, input_dims, output_dims):
        super(ExampleModule, self).__init__()
        self.linear = nn.Linear(input_dims, output_dims)
        self.exponent = nn.Parameter(torch.tensor(1.))

    def forward(self, x):
        x = self.linear(x)

        # This is the notation for element-wise exponentiation, 
        # which matches python in general
        x = x ** self.exponent 
        
        return x
example_model = ExampleModule(10, 2)
list(example_model.parameters())

out:
[Parameter containing:
 tensor(1., requires_grad=True),
 Parameter containing:
 tensor([[-0.2108, -0.0917, -0.0987, -0.1066, -0.2074, -0.0379, -0.0724,  0.2764,
          -0.0583,  0.2797],
         [-0.0687,  0.1698, -0.0337, -0.0082, -0.0894, -0.0422,  0.1552, -0.1489,
           0.1145, -0.0952]], requires_grad=True),
 Parameter containing:
 tensor([-0.1635,  0.3030], requires_grad=True)]
list(example_model.named_parameters())

out:
[('exponent',
  Parameter containing:
  tensor(1., requires_grad=True)),
 ('linear.weight',
  Parameter containing:
  tensor([[ 0.2789,  0.2618, -0.0678,  0.2766,  0.1436,  0.0917, -0.1669, -0.1887,
            0.0913, -0.1998],
          [-0.1757,  0.0361,  0.1140,  0.2152, -0.1200,  0.1712,  0.0944, -0.0447,
            0.1548,  0.2383]], requires_grad=True)),
 ('linear.bias',
  Parameter containing:
  tensor([ 0.1881, -0.0834], requires_grad=True))]
input = torch.randn(2, 10)
example_model(input)

out:
tensor([[-0.0567,  0.4562],
        [ 0.3780,  0.3452]], grad_fn=<PowBackward1>)

你可能感兴趣的:(python,pytorch)