python batchnorm2d_Python nn.BatchNorm2d方法代碼示例

本文整理匯總了Python中torch.nn.BatchNorm2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.BatchNorm2d方法的具體用法?Python nn.BatchNorm2d怎麽用?Python nn.BatchNorm2d使用的例子?那麽恭喜您, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在模塊torch.nn的用法示例。

在下文中一共展示了nn.BatchNorm2d方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於我們的係統推薦出更棒的Python代碼示例。

示例1: __init__

​點讚 7

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def __init__(self, input_size, n_channels, ngf, n_layers, activation='tanh'):

super(ImageDecoder, self).__init__()

ngf = ngf * (2 ** (n_layers - 2))

layers = [nn.ConvTranspose2d(input_size, ngf, 4, 1, 0, bias=False),

nn.BatchNorm2d(ngf),

nn.ReLU(True)]

for i in range(1, n_layers - 1):

layers += [nn.ConvTranspose2d(ngf, ngf // 2, 4, 2, 1, bias=False),

nn.BatchNorm2d(ngf // 2),

nn.ReLU(True)]

ngf = ngf // 2

layers += [nn.ConvTranspose2d(ngf, n_channels, 4, 2, 1, bias=False)]

if activation == 'tanh':

layers += [nn.Tanh()]

elif activation == 'sigmoid':

layers += [nn.Sigmoid()]

else:

raise NotImplementedError

self.main = nn.Sequential(*layers)

開發者ID:jthsieh,項目名稱:DDPAE-video-prediction,代碼行數:25,

示例2: __init__

​點讚 7

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def __init__(self):

super(GoogLeNet, self).__init__()

self.pre_layers = nn.Sequential(

nn.Conv2d(3, 192, kernel_size=3, padding=1),

nn.BatchNorm2d(192),

nn.ReLU(True),

)

self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)

self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)

self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)

self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)

self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)

self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)

self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)

self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)

self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)

self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)

self.avgpool = nn.AvgPool2d(8, stride=1)

self.linear = nn.Linear(1024, 10)

開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:26,

示例3: __init__

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def __init__(self):

super(Model, self).__init__()

self.conv1 = nn.Conv2d(3, 16, 3, padding=1)

self.bn1 = nn.BatchNorm2d(16)

self.conv2 = nn.Conv2d(16, 32, 3, padding=1)

self.bn2 = nn.BatchNorm2d(32)

self.conv3 = nn.Conv2d(32, 64, 3, padding=1)

self.bn3 = nn.BatchNorm2d(64)

self.conv4 = nn.Conv2d(64, 128, 3, padding=1)

self.bn4 = nn.BatchNorm2d(128)

self.conv5 = nn.Conv2d(128, 128, 3, dilation=2, padding=2)

self.bn5 = nn.BatchNorm2d(128)

self.conv6 = nn.Conv2d(128, 128, 3, dilation=4, padding=4)

self.bn6 = nn.BatchNorm2d(128)

self.conv7 = nn.Conv2d(128, 1+9, 3, padding=1)

開發者ID:aleju,項目名稱:cat-bbs,代碼行數:23,

示例4: _make_layer

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def _make_layer(self, block, planes, blocks, stride=1, dilation=1):

downsample = None

if stride != 1 or self.inplanes != planes * block.expansion:

downsample = nn.Sequential(

nn.Conv2d(self.inplanes, planes * block.expansion,

kernel_size=1, stride=stride, bias=False),

nn.BatchNorm2d(planes * block.expansion),

)

layers = []

layers.append(block(self.inplanes, planes, stride, 1, downsample))

self.inplanes = planes * block.expansion

for i in range(1, blocks):

# here with dilation

layers.append(block(self.inplanes, planes, dilation=dilation))

return nn.Sequential(*layers)

開發者ID:aleju,項目名稱:cat-bbs,代碼行數:19,

示例5: __init__

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def __init__(self, block, layers, num_classes=1000):

self.inplanes = 64

super(ResNet, self).__init__()

self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,

bias=False)

self.bn1 = nn.BatchNorm2d(64)

self.relu = nn.ReLU(inplace=True)

# maxpool different from pytorch-resnet, to match tf-faster-rcnn

self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

self.layer1 = self._make_layer(block, 64, layers[0])

self.layer2 = self._make_layer(block, 128, layers[1], stride=2)

self.layer3 = self._make_layer(block, 256, layers[2], stride=2)

# use stride 1 for the last conv4 layer (same as tf-faster-rcnn)

self.layer4 = self._make_layer(block, 512, layers[3], stride=1)

for m in self.modules():

if isinstance(m, nn.Conv2d):

n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels

m.weight.data.normal_(0, math.sqrt(2. / n))

elif isinstance(m, nn.BatchNorm2d):

m.weight.data.fill_(1)

m.bias.data.zero_()

開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:24,

示例6: _make_layer

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def _make_layer(self, block, planes, blocks, stride=1):

downsample = None

if stride != 1 or self.inplanes != planes * block.expansion:

downsample = nn.Sequential(

nn.Conv2d(self.inplanes, planes * block.expansion,

kernel_size=1, stride=stride, bias=False),

nn.BatchNorm2d(planes * block.expansion),

)

layers = []

layers.append(block(self.inplanes, planes, stride, downsample))

self.inplanes = planes * block.expansion

for i in range(1, blocks):

layers.append(block(self.inplanes, planes))

return nn.Sequential(*layers)

開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:18,

示例7: init_weights

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def init_weights(self, pretrained=None):

"""Initialize the weights in the module.

Args:

pretrained (str, optional): Path to pre-trained weights.

Defaults to None.

"""

if isinstance(pretrained, str):

logger = get_root_logger()

load_checkpoint(self, pretrained, strict=False, logger=logger)

elif pretrained is None:

for m in self.modules():

if isinstance(m, nn.Conv2d):

kaiming_init(m)

elif isinstance(m, nn.BatchNorm2d):

constant_init(m, 1)

else:

raise TypeError('pretrained must be a str or None')

開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:20,

示例8: fuse_module

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def fuse_module(m):

last_conv = None

last_conv_name = None

for name, child in m.named_children():

if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)):

if last_conv is None: # only fuse BN that is after Conv

continue

fused_conv = fuse_conv_bn(last_conv, child)

m._modules[last_conv_name] = fused_conv

# To reduce changes, set BN as Identity instead of deleting it.

m._modules[name] = nn.Identity()

last_conv = None

elif isinstance(child, nn.Conv2d):

last_conv = child

last_conv_name = name

else:

fuse_module(child)

return m

開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:21,

示例9: __init__

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def __init__(self):

super(CW2_Net, self).__init__()

self.conv1 = nn.Conv2d(3, 32, 3)

self.bnm1 = nn.BatchNorm2d(32, momentum=0.1)

self.conv2 = nn.Conv2d(32, 64, 3)

self.bnm2 = nn.BatchNorm2d(64, momentum=0.1)

self.conv3 = nn.Conv2d(64, 128, 3)

self.bnm3 = nn.BatchNorm2d(128, momentum=0.1)

self.conv4 = nn.Conv2d(128, 128, 3)

self.bnm4 = nn.BatchNorm2d(128, momentum=0.1)

self.fc1 = nn.Linear(3200, 256)

#self.dropout1 = nn.Dropout(p=0.35, inplace=False)

self.bnm5 = nn.BatchNorm1d(256, momentum=0.1)

self.fc2 = nn.Linear(256, 256)

self.bnm6 = nn.BatchNorm1d(256, momentum=0.1)

self.fc3 = nn.Linear(256, 10)

#self.dropout2 = nn.Dropout(p=0.35, inplace=False)

#self.dropout3 = nn.Dropout(p=0.35, inplace=False)

開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:20,

示例10: __init__

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):

super(Block, self).__init__()

group_width = cardinality * bottleneck_width

self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)

self.bn1 = nn.BatchNorm2d(group_width)

self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)

self.bn2 = nn.BatchNorm2d(group_width)

self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False)

self.bn3 = nn.BatchNorm2d(self.expansion*group_width)

self.shortcut = nn.Sequential()

if stride != 1 or in_planes != self.expansion*group_width:

self.shortcut = nn.Sequential(

nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False),

nn.BatchNorm2d(self.expansion*group_width)

)

開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:18,

示例11: __init__

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):

super(Bottleneck, self).__init__()

self.out_planes = out_planes

self.dense_depth = dense_depth

self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)

self.bn1 = nn.BatchNorm2d(in_planes)

self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)

self.bn2 = nn.BatchNorm2d(in_planes)

self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)

self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)

self.shortcut = nn.Sequential()

if first_layer:

self.shortcut = nn.Sequential(

nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),

nn.BatchNorm2d(out_planes+dense_depth)

)

開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:20,

示例12: __init__

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def __init__(self, in_planes, planes, stride=1):

super(BasicBlock, self).__init__()

self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)

self.bn1 = nn.BatchNorm2d(planes)

self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)

self.bn2 = nn.BatchNorm2d(planes)

self.shortcut = nn.Sequential()

if stride != 1 or in_planes != planes:

self.shortcut = nn.Sequential(

nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),

nn.BatchNorm2d(planes)

)

# SE layers

self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear

self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)

開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:19,

示例13: _make_layers

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def _make_layers(self, cfg):

layers = []

in_channels = 3

for x in cfg:

if x == 'M':

layers += [nn.MaxPool2d(kernel_size=2, stride=2)]

else:

layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),

nn.BatchNorm2d(x),

nn.ReLU(inplace=True)]

in_channels = x

layers += [nn.AvgPool2d(kernel_size=1, stride=1)]

return nn.Sequential(*layers)

# net = VGG('VGG11')

# x = torch.randn(2,3,32,32)

# print(net(Variable(x)).size())

開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:19,

示例14: __init__

​點讚 6

# 需要導入模塊: from torch import nn [as 別名]

# 或者: from torch.nn import BatchNorm2d [as 別名]

def __init__(self, in_planes, out_planes, stride, groups):

super(Bottleneck, self).__init__()

self.stride = stride

mid_planes = out_planes/4

g = 1 if in_planes==24 else groups

self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)

self.bn1 = nn.BatchNorm2d(mid_planes)

self.shuffle1 = ShuffleBlock(groups=g)

self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)

self.bn2 = nn.BatchNorm2d(mid_planes)

self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)

self.bn3 = nn.BatchNorm2d(out_planes)

self.shortcut = nn.Sequential()

if stride == 2:

self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))

開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:19,

注:本文中的torch.nn.BatchNorm2d方法示例整理自Github/MSDocs等源碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。

你可能感兴趣的:(python,batchnorm2d)