python constant_Python init.constant方法代码示例

本文整理汇总了Python中torch.nn.init.constant方法的典型用法代码示例。如果您正苦于以下问题:Python init.constant方法的具体用法?Python init.constant怎么用?Python init.constant使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块torch.nn.init的用法示例。

在下文中一共展示了init.constant方法的25个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: conv

​点赞 7

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, bias=False, transposed=False):

if transposed:

layer = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=1, output_padding=1, dilation=dilation, bias=bias)

# Bilinear interpolation init

w = torch.Tensor(kernel_size, kernel_size)

centre = kernel_size % 2 == 1 and stride - 1 or stride - 0.5

for y in range(kernel_size):

for x in range(kernel_size):

w[y, x] = (1 - abs((x - centre) / stride)) * (1 - abs((y - centre) / stride))

layer.weight.data.copy_(w.div(in_planes).repeat(in_planes, out_planes, 1, 1))

else:

padding = (kernel_size + 2 * (dilation - 1)) // 2

layer = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)

if bias:

init.constant(layer.bias, 0)

return layer

# Returns 2D batch normalisation layer

开发者ID:Kaixhin,项目名称:FCN-semantic-segmentation,代码行数:21,

示例2: __init__

​点赞 6

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def __init__(self, num_classes, pretrained_net):

super().__init__()

self.pretrained_net = pretrained_net

self.relu = nn.ReLU(inplace=True)

self.conv5 = conv(512, 256, stride=2, transposed=True)

self.bn5 = bn(256)

self.conv6 = conv(256, 128, stride=2, transposed=True)

self.bn6 = bn(128)

self.conv7 = conv(128, 64, stride=2, transposed=True)

self.bn7 = bn(64)

self.conv8 = conv(64, 64, stride=2, transposed=True)

self.bn8 = bn(64)

self.conv9 = conv(64, 32, stride=2, transposed=True)

self.bn9 = bn(32)

self.conv10 = conv(32, num_classes, kernel_size=7)

init.constant(self.conv10.weight, 0) # Zero init

开发者ID:Kaixhin,项目名称:FCN-semantic-segmentation,代码行数:18,

示例3: init_params

​点赞 6

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def init_params(net):

'''Init layer parameters.'''

for m in net.modules():

if isinstance(m, nn.Conv2d):

init.kaiming_normal(m.weight, mode='fan_out')

if m.bias:

init.constant(m.bias, 0)

elif isinstance(m, nn.BatchNorm2d):

init.constant(m.weight, 1)

init.constant(m.bias, 0)

elif isinstance(m, nn.Linear):

init.normal(m.weight, std=1e-3)

if m.bias:

init.constant(m.bias, 0)

#_, term_width = os.popen('stty size', 'r').read().split()

# term_width = int(term_width)

开发者ID:leehomyc,项目名称:mixup_pytorch,代码行数:20,

示例4: reset_parameters

​点赞 6

# 需要导入模块: from torch.nn import init [as 别名]

# 或者: from torch.nn.init import constant [as 别名]

def reset_parameters(self):

"""

Initialize parameters following the way proposed in the paper.

"""

# The input-to-hidden weight matrix is initialized orthogonally.

init.orthogonal(self.weight_ih.data)

# The hidden-to-hidden weight matrix is initialized as an identity

# matrix.

weight_hh_data = torch.eye(self.hidden_size)

weight_hh_data = weight_hh_data.repeat(1, 4)

self.weight_hh.data.set_(weight_hh_data)

# The bias is just set to zero vectors.

init.constant(self.bias.data, val=0)

# Initialization of BN parameters.

self.bn_ih.reset_parameters()

self.bn_hh.reset_parameters()

self.bn_c.reset_parameters()

self.bn_ih.bias.data.fill_(0)

self.bn_hh.bias.data.fill_(0)

self.bn_ih.weight.data.fill_(0.1)

self.bn_h

你可能感兴趣的:(python,constant)