本文整理匯總了Python中tensorflow.truncated_normal_initializer方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.truncated_normal_initializer方法的具體用法?Python tensorflow.truncated_normal_initializer怎麽用?Python tensorflow.truncated_normal_initializer使用的例子?那麽恭喜您, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在模塊tensorflow的用法示例。
在下文中一共展示了tensorflow.truncated_normal_initializer方法的22個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於我們的係統推薦出更棒的Python代碼示例。
示例1: cifarnet_arg_scope
點讚 6
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def cifarnet_arg_scope(weight_decay=0.004):
"""Defines the default cifarnet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=5e-2),
activation_fn=tf.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=tf.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc:
return sc
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:22,
示例2: _variable_with_weight_decay
點讚 6
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:27,
示例3: _variable_with_weight_decay
點讚 6
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:24,
示例4: _Deconv
點讚 6
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def _Deconv(self, net, out_filters, kernel_size, stride):
shape = net.get_shape().as_list()
in_filters = shape[3]
kernel_shape = [kernel_size, kernel_size, out_filters, in_filters]
weights = tf.get_variable(
name='weights',
shape=kernel_shape,
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.01))
out_height = shape[1] * stride
out_width = shape[2] * stride
batch_size = shape[0]
output_shape = [batch_size, out_height, out_width, out_filters]
net = tf.nn.conv2d_transpose(net, weights, output_shape,
[1, stride, stride, 1], padding='SAME')
slim.batch_norm(net)
return net
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:23,
示例5: _deconvolutional_layer
點讚 6
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def _deconvolutional_layer(input, is_training, filters):
# Implements transposed convolutional layers. Returns data with double the shape of input
output = tf.layers.conv2d_transpose(
input,
filters=filters,
kernel_size=(3, 3),
strides=2,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001)
)
#output = tf.layers.batch_normalization(output, training=is_training)
output = tf.layers.conv2d_transpose(
output,
filters=filters,
kernel_size=(3, 3),
strides=2,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001)
)
#output = tf.layers.batch_normalization(output, training=is_training)
return output
開發者ID:MaxSobolMark,項目名稱:HardRLWithYoutube,代碼行數:27,
示例6: _convolutional_layer
點讚 6
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def _convolutional_layer(input, filters, strides, is_training):
"""Constructs a conv2d layer followed by batch normalization, and max pooling"""
x = tf.layers.conv2d(
input,
filters=filters,
kernel_size=(3, 3),
strides=strides,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001)
)
x = tf.layers.batch_normalization(x, training=is_training)
output = tf.layers.max_pooling2d(x, 2, 2)
return output
開發者ID:MaxSobolMark,項目名稱:HardRLWithYoutube,代碼行數:20,
示例7: M_step
點讚 6
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def M_step(log_R, log_activation, vote, lambda_val=0.01):
R_shape = tf.shape(log_R)
log_R = log_R + log_activation
R_sum_i = cl.reduce_sum(tf.exp(log_R), axis=-3, keepdims=True)
log_normalized_R = log_R - tf.reduce_logsumexp(log_R, axis=-3, keepdims=True)
pose = cl.reduce_sum(vote * tf.exp(log_normalized_R), axis=-3, keepdims=True)
log_var = tf.reduce_logsumexp(log_normalized_R + cl.log(tf.square(vote - pose)), axis=-3, keepdims=True)
beta_v = tf.get_variable('beta_v',
shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
initializer=tf.truncated_normal_initializer(mean=15., stddev=3.))
cost = R_sum_i * (beta_v + 0.5 * log_var)
beta_a = tf.get_variable('beta_a',
shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
initializer=tf.truncated_normal_initializer(mean=100.0, stddev=10))
cost_sum_h = cl.reduce_sum(cost, axis=-1, keepdims=True)
logit = lambda_val * (beta_a - cost_sum_h)
log_activation = tf.log_sigmoid(logit)
return(pose, log_var, log_activation)
開發者ID:naturomics,項目名稱:CapsLayer,代碼行數:25,
示例8: create_initializer
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
開發者ID:Socialbird-AILab,項目名稱:BERT-Classification-Tutorial,代碼行數:5,
示例9: embed
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def embed(inputs, vocab_size, num_units, zero_pad=True, scope="embedding", reuse=None):
'''Embeds a given tensor.
Args:
inputs: A `Tensor` with type `int32` or `int64` containing the ids
to be looked up in `lookup table`.
vocab_size: An int. Vocabulary size.
num_units: An int. Number of embedding hidden units.
zero_pad: A boolean. If True, all the values of the fist row (id 0)
should be constant zeros.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A `Tensor` with one more rank than inputs's. The last dimensionality
should be `num_units`.
'''
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable('lookup_table',
dtype=tf.float32,
shape=[vocab_size, num_units],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
outputs = tf.nn.embedding_lookup(lookup_table, inputs)
return outputs
開發者ID:Kyubyong,項目名稱:dc_tts,代碼行數:32,
示例10: get_weight_variable
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def get_weight_variable(shape, regularizer):
weights = tf.get_variable(
"weigths", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
# 如果給出了正則生成函數,加入 losses 集合
if regularizer is not None:
tf.add_to_collection('losses', regularizer(weights))
return weights
# 定義前向傳播
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:13,
示例11: conv_relu
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def conv_relu(inputs, filters, k_size, stride, padding, scope_name):
'''
A method that does convolution + relu on inputs
'''
with tf.compat.v1.variable_scope(scope_name, reuse=tf.compat.v1.AUTO_REUSE) as scope:
in_channels = inputs.shape[-1]
kernel = tf.compat.v1.get_variable('kernel',
[k_size, k_size, in_channels, filters],
initializer=tf.truncated_normal_initializer())
biases = tf.compat.v1.get_variable('biases',
[filters],
initializer=tf.random_normal_initializer())
conv = tf.nn.conv2d(inputs, kernel, strides=[1, stride, stride, 1], padding=padding)
return tf.nn.relu(conv + biases, name=scope.name)
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:16,
示例12: fully_connected
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def fully_connected(inputs, out_dim, scope_name='fc'):
'''
A fully connected linear layer on inputs
'''
with tf.compat.v1.variable_scope(scope_name, reuse=tf.compat.v1.AUTO_REUSE) as scope:
in_dim = inputs.shape[-1]
w = tf.compat.v1.get_variable('weights', [in_dim, out_dim],
initializer=tf.truncated_normal_initializer())
b = tf.compat.v1.get_variable('biases', [out_dim],
initializer=tf.constant_initializer(0.0))
out = tf.matmul(inputs, w) + b
return out
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:14,
示例13: mobilenet_v1_arg_scope
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def mobilenet_v1_arg_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False):
"""Defines the default MobilenetV1 arg scope.
Args:
is_training: Whether or not we're training the model.
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
regularize_depthwise: Whether or not apply regularization on depthwise.
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'is_training': is_training,
'center': True,
'scale': True,
'decay': 0.9997,
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:40,
示例14: lenet_arg_scope
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def lenet_arg_scope(weight_decay=0.0):
"""Defines the default lenet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
activation_fn=tf.nn.relu) as sc:
return sc
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:17,
示例15: __init__
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def __init__(self, image_size, num_channels, hidden_dim):
self.image_size = image_size
self.num_channels = num_channels
self.hidden_dim = hidden_dim
self.matrix_init = tf.truncated_normal_initializer(stddev=0.1)
self.vector_init = tf.constant_initializer(0.0)
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:8,
示例16: __init__
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def __init__(self, key_dim, memory_size, vocab_size,
choose_k=256, alpha=0.1, correct_in_top=1, age_noise=8.0,
var_cache_device='', nn_device='',
num_hashes=None, num_libraries=None):
super(LSHMemory, self).__init__(
key_dim, memory_size, vocab_size,
choose_k=choose_k, alpha=alpha, correct_in_top=1, age_noise=age_noise,
var_cache_device=var_cache_device, nn_device=nn_device)
self.num_libraries = num_libraries or int(self.choose_k ** 0.5)
self.num_per_hash_slot = max(1, self.choose_k // self.num_libraries)
self.num_hashes = (num_hashes or
int(np.log2(self.memory_size / self.num_per_hash_slot)))
self.num_hashes = min(max(self.num_hashes, 1), 20)
self.num_hash_slots = 2 ** self.num_hashes
# hashing vectors
self.hash_vecs = [
tf.get_variable(
'hash_vecs%d' % i, [self.num_hashes, self.key_dim],
dtype=tf.float32, trainable=False,
initializer=tf.truncated_normal_initializer(0, 1))
for i in xrange(self.num_libraries)]
# map representing which hash slots map to which mem keys
self.hash_slots = [
tf.get_variable(
'hash_slots%d' % i, [self.num_hash_slots, self.num_per_hash_slot],
dtype=tf.int32, trainable=False,
initializer=tf.random_uniform_initializer(maxval=self.memory_size,
dtype=tf.int32))
for i in xrange(self.num_libraries)]
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:34,
示例17: _build_initializer
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def _build_initializer(initializer):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf initializer.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof = initializer.WhichOneof('initializer_oneof')
if initializer_oneof == 'truncated_normal_initializer':
return tf.truncated_normal_initializer(
mean=initializer.truncated_normal_initializer.mean,
stddev=initializer.truncated_normal_initializer.stddev)
if initializer_oneof == 'variance_scaling_initializer':
enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer.
DESCRIPTOR.enum_types_by_name['Mode'])
mode = enum_descriptor.values_by_number[initializer.
variance_scaling_initializer.
mode].name
return slim.variance_scaling_initializer(
factor=initializer.variance_scaling_initializer.factor,
mode=mode,
uniform=initializer.variance_scaling_initializer.uniform)
raise ValueError('Unknown initializer function: {}'.format(
initializer_oneof))
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:31,
示例18: __init__
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def __init__(self, env_spec, internal_dim,
fixed_std=True, recurrent=True,
input_prev_actions=True):
self.env_spec = env_spec
self.internal_dim = internal_dim
self.rnn_state_dim = self.internal_dim
self.fixed_std = fixed_std
self.recurrent = recurrent
self.input_prev_actions = input_prev_actions
self.matrix_init = tf.truncated_normal_initializer(stddev=0.01)
self.vector_init = tf.constant_initializer(0.0)
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:14,
示例19: weightVariable
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def weightVariable(shape,std=1.0,name=None):
# Create a set of weights initialized with truncated normal random values
name = 'weights' if name is None else name
return tf.get_variable(name,shape,initializer=tf.truncated_normal_initializer(stddev=std/math.sqrt(shape[0])))
開發者ID:robb-brown,項目名稱:IntroToDeepLearning,代碼行數:6,
示例20: deconv2d
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def deconv2d(input, output_shape, is_train, info=False, k=3, s=2, stddev=0.01,
activation_fn=tf.nn.relu, norm='batch', name='deconv2d'):
with tf.variable_scope(name):
_ = layers.conv2d_transpose(
input,
num_outputs=output_shape,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=tf.zeros_initializer(),
activation_fn=None,
kernel_size=[k, k], stride=[s, s], padding='SAME'
)
_ = norm_and_act(_, is_train, norm=norm, activation_fn=activation_fn)
if info: print_info(name, _.get_shape().as_list(), activation_fn)
return _
開發者ID:clvrai,項目名稱:SSGAN-Tensorflow,代碼行數:16,
示例21: conv2d
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def conv2d(input, output_shape, is_train, k_h=4, k_w=4, s=2,
stddev=0.02, name="conv2d", activation_fn=lrelu, batch_norm=True):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input.get_shape()[-1], output_shape],
initializer=tf.truncated_normal_initializer(stddev=stddev))
_ = tf.nn.conv2d(input, w, strides=[1, s, s, 1], padding='SAME')
biases = tf.get_variable('biases', [output_shape],
initializer=tf.constant_initializer(0.0))
_ = tf.reshape(tf.nn.bias_add(_, biases), _.get_shape())
return bn_act(_, is_train, batch_norm=batch_norm, activation_fn=activation_fn)
開發者ID:clvrai,項目名稱:Generative-Latent-Optimization-Tensorflow,代碼行數:15,
示例22: deconv2d
點讚 5
# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import truncated_normal_initializer [as 別名]
def deconv2d(input, deconv_info, is_train, name="deconv2d",
stddev=0.02, activation_fn=tf.nn.relu, batch_norm=True):
with tf.variable_scope(name):
output_shape = deconv_info[0]
k = deconv_info[1]
s = deconv_info[2]
_ = layers.conv2d_transpose(
input,
num_outputs=output_shape,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=tf.zeros_initializer(),
kernel_size=[k, k], stride=[s, s], padding='SAME'
)
return bn_act(_, is_train, batch_norm=batch_norm, activation_fn=activation_fn)
開發者ID:clvrai,項目名稱:Generative-Latent-Optimization-Tensorflow,代碼行數:17,
注:本文中的tensorflow.truncated_normal_initializer方法示例整理自Github/MSDocs等源碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。