Pytorch——基于DataParallel单机多GPU并行之batch normalization

import torch.nn as nn
import torch
import os
import torch.nn.functional as F


class Model(nn.Module):
    def __init__(self,):
        super(Model, self).__init__()

        self.l = nn.Linear(2,1,bias=False)
        self.b = nn.BatchNorm1d(1)

        self.l.weight.data.fill_(1.0)
        # self.l.bias.data.fill_(0.0)

        self.b.weight.data.fill_(1.0)
        self.b.bias.data.fill_(0.0)

    def forward(self, x):
        l = self.l(x)
        print('before bn')
        print(f'running_mean={self.b.running_mean}')
        print(f'running_var={self.b.running_var}')
        o = self.b(l)
        print('after bn')
        print(f'running_mean={self.b.running_mean}')
        print(f'running_var={self.b.running_var}')
        return o, l


def comput_mean_var(l, mean, var, momentum=0.9):
    mean = mean*momentum + torch.mean(l)*(1-momentum)
    var = var*momentum + vv(l)*(1-momentum)
    print(f'mean={mean},var={var}')

    return mean, var


def vv(l):
    m = l.mean()
    v = torch.mean((l-m)**2)
    return v

os.environ['CUDA_VISIBLE_DEVICES'] = '1,2'
model = Model()

model = nn.DataParallel(model, device_ids=[0,1]).cuda()



data0 = torch.Tensor([[1,1],[2,2]]).cuda()
data1 = torch.Tensor([[3,3],[4,4]]).cuda()

data00 = torch.cat([data0, data0], 0)
data01 = torch.cat([data0, data1], 0)

model.train()
o,l = model(data01)
m1, v1 = comput_mean_var(l[:2],0.0,1.0)
m2, v2 = comput_mean_var(l[2:],0.0,1.0)

o,l = model(data01)
m1, v1 = comput_mean_var(l[:2],0.3,1.1)
m2, v2 = comput_mean_var(l[2:],0.3,1.1)


print((l[:2]-l[:2].mean())/vv(l[:2]).sqrt_(), (l[2:]-l[2:].mean())/vv(l[2:]).sqrt_())
print(o)

print((l-l.mean())/vv(l).sqrt_())


model.eval()
o,l = model(data01)
print(o)
print((l-model.module.b.running_mean)/model.module.b.running_var.sqrt_())

 

before bn
running_mean=tensor([0.], device='cuda:0')
running_var=tensor([1.], device='cuda:0')
before bn
running_mean=tensor([0.], device='cuda:1')
running_var=tensor([1.], device='cuda:1')
after bn
running_mean=tensor([0.3000], device='cuda:0')
running_var=tensor([1.1000], device='cuda:0')
after bn
running_mean=tensor([0.7000], device='cuda:1')
running_var=tensor([1.1000], device='cuda:1')
mean=0.30000001192092896,var=1.0
mean=0.699999988079071,var=1.0
before bn
before bn
running_mean=tensor([0.3000], device='cuda:1')
running_mean=tensor([0.3000], device='cuda:0')
running_var=tensor([1.1000], device='cuda:1')
running_var=tensor([1.1000], device='cuda:0')
after bn
after bn
running_mean=tensor([0.9700], device='cuda:1')running_mean=tensor([0.5700], device='cuda:0')

running_var=tensor([1.1900], device='cuda:1')
running_var=tensor([1.1900], device='cuda:0')
mean=0.5700000524520874,var=1.090000033378601
mean=0.9700000286102295,var=1.090000033378601
tensor([[-1.],
        [ 1.]], device='cuda:0', grad_fn=) tensor([[-1.],
        [ 1.]], device='cuda:0', grad_fn=)
tensor([[-1.0000],
        [ 1.0000],
        [-1.0000],
        [ 1.0000]], device='cuda:0', grad_fn=)
tensor([[-1.3416],
        [-0.4472],
        [ 0.4472],
        [ 1.3416]], device='cuda:0', grad_fn=)
before bn
before bn
running_mean=tensor([0.5700], device='cuda:0')running_mean=tensor([0.5700], device='cuda:1')

running_var=tensor([1.1900], device='cuda:1')
running_var=tensor([1.1900], device='cuda:0')
after bn
after bn
running_mean=tensor([0.5700], device='cuda:1')
running_mean=tensor([0.5700], device='cuda:0')
running_var=tensor([1.1900], device='cuda:1')
running_var=tensor([1.1900], device='cuda:0')
tensor([[1.3109],
        [3.1443],
        [4.9777],
        [6.8110]], device='cuda:0', grad_fn=)
tensor([[1.3109],
        [3.1443],
        [4.9777],
        [6.8111]], device='cuda:0', grad_fn=)

 

你可能感兴趣的:(Pytorch那些事儿)