Normalization methods
Batch-Norm
import torch
from torch import nn
bn = nn.BatchNorm2d(num_features=3, eps=0, affine=False, track_running_stats=False)
x = torch.rand(10, 3, 5, 5)*10000
official_bn = bn(x)
x1 = x.permute(1,0,2,3).view(3, -1)
mu = x1.mean(dim=1).view(1,3,1,1)
std = x1.std(dim=1, unbiased=False).view(1,3,1,1)
self_bn = (x - mu) / std
diff = (official_bn - self_bn).sum()
print('diff={}'.format(diff))
Layer-Norm
import torch
from torch import nn
x = torch.rand(10, 3, 5, 5)*10000
# eps=0 排除干扰
# elementwise_affine=False 不作映射
# 这里的映射和 BN 以及下文的 IN 有区别,它是 elementwise 的 affine,
# 即 gamma 和 beta 不是 channel 维的向量,而是维度等于 normalized_shape 的矩阵
ln = nn.LayerNorm(normalized_shape=[3, 5, 5], eps=0, elementwise_affine=False)
official_ln = ln(x)
x1 = x.view(10, -1)
mu = x1.mean(dim=1).view(10, 1, 1, 1)
std = x1.std(dim=1, unbiased=False).view(10, 1, 1, 1)
self_ln = (x - mu) / std
diff = (self_ln - official_ln).sum()
print('diff={}'.format(diff))
Instance-Norm
import torch
from torch import nn
x = torch.rand(10, 3, 5, 5) * 10000
# gamma, beta : [1, num_channels, 1, 1]
In = nn.InstanceNorm2d(num_features=3, eps=0, affine=False, track_running_stats=False)
official_in = In(x)
x1 = x.view(30, -1)
mu = x1.mean(dim=1).view(10, 3, 1, 1)
std = x1.std(dim=1, unbiased=False).view(10, 3, 1, 1)
self_in = (x - mu) / std
diff = (self_in - official_in).sum()
print('diff={}'.format(diff))
Group-Norm
import torch
from torch import nn
x = torch.rand(10, 20, 5, 5)*10000
# gamma, beta : [1, num_channels, 1, 1]
gn = nn.GroupNorm(num_groups=4, num_channels=20, eps=0, affine=False)
official_gn = gn(x)
x1 = x.view(10, 4, -1)
mu = x1.mean(dim=-1).reshape(10, 4, -1)
std = x1.std(dim=-1).reshape(10, 4, -1)
x1_norm = (x1 - mu) / std
self_gn = x1_norm.reshape(10, 20, 5, 5)
diff = (self_gn - official_gn).sum()
print('diff={}'.format(diff))
Reference