动手学深度学习v2-pytorch神经网络基础

层和块:

import torch
from torch import nn
from torch.nn import functional as F

net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))

X = torch.rand(2, 20)
net(X)

nn.Sequential定义了一种特殊的Module

Module任何一个层和神经网络应该都是Module的子类

自定义块:

class MLP(nn.Module):
    def __init__(self):
        super().__init__()
        self.hidden = nn.Linear(20, 256)
        self.out = nn.Linear(256, 20)
    
    def forward(self, X):
        return self.out(F.relu(self.hidden(X)))

实例化多层感知机的层,然后在每次调用正向传播函数时调用这些层

net = MLP()
net(X)

顺序块:

class MySequential(nn.Module):
    def __init__(self, *args):
        super().__init__()
        for block in args:
            self._modules[block] = block

def forward(self, X):
    for block in self._modules.values():
        X = block(X)
    return X

net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
net(X)
        

当Sequential找个类不能满足需求时,可以在 __init__ 和 forward 中可以做自定义的计算。

在正向传播函数中执行代码

class FixedHiddenMLP(nn.Module):
    def __init__():
        super().__init__()
        self.rand_weight = torch.rand((20,20), requires_grad=False)
        self.linear = nn.Linear(20, 20)

    def forward(self, X):
        X = self.linear(X)
        X = F.relu(torch.mm(X, self,rand_weight) + 1)
        X = self.linear(X)
        while X.abs().sum() > 1:
            X /= 2
        return X.sum()

net = FixedHiddenMLP()
net(X)

自定义层:
构造一个没有任何参数的自定义层


 

参数管理:

我们首先关注具有单隐藏层的多层感知机

import torch
from torch import nn

net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8,1))
X = torch.rand(Size=(2, 4))
net(X)

参数访问:

print(net[2].state_dict())

目标参数:

print(type(net[2].bias))
print(net[2].bias)
print(net[2].bias.data)

net[2].weight.grad == None

一次性访问所有参数:

print(*[(name, param.shape) for name, param in net[0].named_parameters()])
print(*[(name, param.shape) for name, param in net.named_parameters()])

net.state_dict()['2.bias'].data

内置初始化:

def init_normal(m): # m 表示为 module
    if type(m) == nn.Linear:
        nn.init.normal_(m.weight, mean=0, std=0.01)
        nn.init.zeros_(m.bias)

net.apply(init_normal)
net[0].weight.data[0], net[0].bias.data[0]

你可能感兴趣的:(pytorch,神经网络,深度学习)