P28 批量归一化BatchNormalization

数学不是很懂, 不过那个简洁实现的方法还可以看一看

1. 自己建立

#Batch Normolization

import torch
from torch import nn
from d2l import torch as d2l

def batch_norm(X,gamma,beta,moving_mean,moving_var,eps,momentum):
    #X - 这一层的输出,gamma,beta 两个可以学的参数,去看公式
    #moving_mean,moving_var,全局的均值和方差,在做inference/test推理的时候用,找个数据集上的均值和方差,测试时候的均值和方差
    #eps,方差的epsilon,防止除0错误;momentum通常是0.9,更新moving_mean,moving_var的东西

    if not torch.is_grad_enabled(): #如果不算梯度
        X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)
        #在做inference的情况下,用(X-数据集上全局的mean) / (全局的方差+eps)
    else: #做训练
        assert  len(X.shape) in (2,4) #X=2是全连接层,X=4是2d的卷积层
        if len(X.shape) == 2:
            mean = X.mean(dim=0) #按行求均值,mean是一个1*n的行向量
            var = ((X-mean)**2).mean(dim=0) #方差也是一个行向量,这是按特征求均值和方差
        else:#这是2d卷积的情况
            mean = X.mean(dim=(0,2,3), keepdim= True)
            #dim=0,是0到batch内不同的样本,2和3是输出的高和宽,表示这个通道的所有值
            #这里做平均是按照通道做平均
            var =  ((X-mean)**2).mean(dim=(0,2,3),keepdim= True)
            #方差同理,1*N*1*1是一个4d的东西
        X_hat = (X - mean) / torch.sqrt(var + eps)
        #更新moving mean和moving var,详细查看移动加权平均
        moving_mean = momentum * moving_mean
        moving_var = momentum * moving_var

    Y = gamma * X_hat + beta
    return Y, moving_mean.data,moving_var.data

2. 创建一个batchNorm的图层


class BatchNorm(nn.Module):
    def __int__(self,num_features,num_dims):
        super().__init__()
        if num_dims ==2:
            shape = (1,num_features)
            #所有的feature是1*n或者就是1*n*1*1

        else:
            shape= (1,num_features,1,1)
        self.gamma = nn.Parameter(torch.ones(shape)) #gamma是要拟合的方差
        self.beta = nn.Parameter(torch.zeros(shape)) #beta是要拟合的均值
        #gamma,beta是要被迭代的,但是moving mean和moving var不要
        self.moving_mean = torch.zeros(shape) #0
        self.moving_var = torch.ones(shape) #1

    def forward(self,X):
        if self.moving_mean.device != X.device: #确保两个device是一样的
            self.moving_mean = self.moving_mean.to(X.device)
            self.moving_var = self.moving_var.to(X.device)
        Y,self.moving_mean,self.moving_var = batch_norm(
            X,self.gamma,self.beta,self.moving_mean,self.moving_var,
        eps=1e-5,momentum=0.9)
        #eps的值是一个常见取值
        return Y

3. 在LeNet上的应用

net = nn.Sequential(
    nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),
    #输入1,输出通道6,不一样的是加入了前面定义的batchNorm,维度是4
    nn.AvgPool2d(kernel_size=2, stride=2),
    nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),
    #这里batchNorm在sigmoid前面
    nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),
    nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),
    nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),
    #最后一层也用了batchNorm
    nn.Linear(84, 10))#输出层

4. 在Fashion-MNIST数据集上训练网络


lr, num_epochs, batch_size = 1.0, 10, 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())

print(net[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,)))

你可能感兴趣的:(李沐机器学习,深度学习,机器学习,神经网络)