tensorflow scope()函数大型指导

目录

  • get_model_varibales()和get_variables()

  • tf.valuable_scope()

  • tf.name_scope()名称覆盖机制

  • slim升级至tf.contrib.framework.arg_scope()/tf.contrib.layers


get_model_varibales()和get_variables()

import tensorflow as tf
import tensorflow.contrib.slim as slim
tf.reset_default_graph()
  
# Model Variables
weights = slim.model_variable('weights',
                              shape=[5, 2],
                              initializer=tf.truncated_normal_initializer(stddev=0.1),
                              regularizer=slim.l2_regularizer(0.05))
model_variables = slim.get_model_variables()
 
 
# Regular variables
my_var = slim.variable('my_var',
                       shape=[10, 1],
                       initializer=tf.zeros_initializer())

regular_variables_and_model_variables = slim.get_variables()
 
print(weights.name)
print(my_var.name)
 
with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    a=sess.run(model_variables)
    b=sess.run(regular_variables_and_model_variables)
    
print(a)
print(b)

weights:0
my_var:0
[array([[ 0.07020444,  0.02122229],
       [-0.03895606, -0.05004985],
       [ 0.0740179 ,  0.09496083],
       [-0.09135272,  0.03352658],
       [ 0.00048627,  0.02789292]], dtype=float32)]
[array([[ 0.07020444,  0.02122229],
       [-0.03895606, -0.05004985],
       [ 0.0740179 ,  0.09496083],
       [-0.09135272,  0.03352658],
       [ 0.00048627,  0.02789292]], dtype=float32), array([[0.],
       [0.],
       [0.],
       [0.],
       [0.],
       [0.],
       [0.],
       [0.],
       [0.],
       [0.]], dtype=float32)]


tf.valuable_scope()

import tensorflow as tf

with tf.variable_scope('a') as a:
    print(a.name)
    print(a.original_name_scope)

with tf.variable_scope('a') as b:
    print(b.name)
    print(b.original_name_scope)

a
a/
a
a_1/

import tensorflow as tf

a1 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1)) 

with tf.variable_scope('V1'):     
    a2 = tf.Variable(tf.random_normal(shape=[2,3], mean=0, stddev=1), name='a2') 
        
with tf.variable_scope('V2'): 
    a3 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(80)) 
    
print (a1.name)
print (a2.name)
print (a3.name)

with tf.Session() as sess: 
    sess.run(tf.global_variables_initializer())    
    print(sess.run(a1))
    print(sess.run(a2))
    print(sess.run(a3))

输出

a1:0
V1/a2:0
V2/a1:0
[ 1.]
[[ 0.42836785 -1.23881435  0.5126422 ]
 [-0.49294728 -1.00942028  0.16770303]]
[ 80.]

输出结果可以看出,V1是范围,变量名a2在V1范围。变量名a1在V2范围。后面的输出value可以看出,此函数主要是用于name管理。然而此a1非彼a1,一般我们用的是tensor.Variable。

type(a1)
Out[2]: tensorflow.python.ops.variables.Variable

type(a3)
Out[3]: tensorflow.python.ops.variables.Variable


import tensorflow as tf
tf.reset_default_graph()
 
with tf.variable_scope("foo"):
    c = tf.get_variable('constant1', initializer=([1.,2.],[3.,4.]))
 
with tf.variable_scope("foo",reuse=True):
    v = tf.get_variable('constant1')
 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(c.eval())
    print(v.eval())

输出

[[ 1.  2.]
 [ 3.  4.]]
[[ 1.  2.]
 [ 3.  4.]]

这个例子是典型的通过名称获得变量值的例子。注意dtype必须是float,不能是int。如果不设定reuse=True,则会报错:ValueError: Variable foo/constant1 already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope?


import tensorflow as tf
tf.reset_default_graph()

with tf.name_scope('name_scope_test'):
	v1 = tf.get_variable('v', shape=[1],  initializer=tf.constant_initializer(1.0))
	v2 = tf.Variable(tf.constant(1.0, shape=[1]), name='v')
	v3 = tf.Variable(tf.constant(1.0, shape=[1]), name='v')

init_op = tf.global_variables_initializer()

with tf.Session() as sess:
	sess.run(init_op)
	print('the name of v1:', v1.name)
	print('the name of v2:', v2.name)
	print('the name of v3:', v3.name)

输出

the name of v1: v:0
the name of v2: name_scope_test/v:0
the name of v3: name_scope_test/v_1:0

此例说明重名变量系统命名方法,而且tf.name_scope()不能作用于tf.get_variable。


import tensorflow as tf
tf.reset_default_graph()

with tf.variable_scope('variable_scope_test'):
	v1 = tf.get_variable('v', shape=[1],  initializer=tf.constant_initializer(1.0))
	v2 = tf.Variable(tf.constant(1.0, shape=[1]), name='v')
	v3 = tf.Variable(tf.constant(1.0, shape=[1]), name='v')

print('the name of v1:', v1.name)
print('the name of v2:', v2.name)
print('the name of v3:', v3.name)

输出

the name of v1: variable_scope_test/v:0
the name of v2: variable_scope_test/v_1:0
the name of v3: variable_scope_test/v_2:0


import tensorflow as tf
tf.reset_default_graph()

with tf.variable_scope('variable_scope_test'):		
    v1 = tf.get_variable('v1', shape=[1], initializer=tf.constant_initializer(1.0))
    v2 = tf.Variable(tf.constant(1.0, shape=[1]), name='v')
    v3 = tf.Variable(tf.constant(1.0, shape=[1]), name='v')
    v4 = tf.get_variable('v', shape=[1], initializer=tf.constant_initializer(1.0))
    
with tf.variable_scope('variable_scope_test',reuse=True):	
    v5 = tf.get_variable('v1')
    v6 = tf.get_variable('v')
  
print('v1:',v1.name)
print('v2:',v2.name)
print('v3:',v3.name)
print('v4:',v4.name)
print('v5:',v5.name)
print('v6:',v6.name)

输出

v1: variable_scope_test/v1:0
v2: variable_scope_test/v:0
v3: variable_scope_test/v_1:0
v4: variable_scope_test/v_2:0
v5: variable_scope_test/v1:0
v6: variable_scope_test/v_2:0

该例子说明通过名称获取变量的值。


import numpy as np
import tensorflow as tf
tf.reset_default_graph()

x = np.arange(25).reshape(5,5)
x = tf.cast(x,tf.float32)
x = tf.reshape(x, [1, 5, 5, 1])

f=tf.Variable(tf.random_uniform([3,3,1,1],-1,1))

with tf.variable_scope('encoder'):
    valid_pad = tf.nn.conv2d(x, f, [1,1,1,1], padding='VALID')
    
with tf.variable_scope('decoder'):
    y = valid_pad+100

same_pad = tf.nn.conv2d(x, f, [1,1,1,1], padding='SAME')


print(valid_pad.name)
print(valid_pad.get_shape())

print(y.name)
print(y.get_shape())

print(same_pad.name)
print(same_pad.get_shape())

输出

encoder/Conv2D:0
(1, 3, 3, 1)
Conv2D:0
(1, 5, 5, 1)
decoder/add:0
(1, 3, 3, 1)

 

该例是直接将scope编码于卷积中,而且名称是覆盖机制的。而且变量传递并不受名称影响。因此,所谓名称管理仅仅是名称集中一下而已。


import numpy as np
import tensorflow as tf
tf.reset_default_graph()
 
y,x = np.mgrid[1:5,60:63]
 
z = x + 1j*y
 
xs = tf.constant(z.astype('complex64'))
 
zs = tf.Variable(xs,name='zs')
 
get1 = tf.get_variable("zs",[2,2])
 
print('xs name:',xs.name)
print('zs name:',zs.name)
print('get1 name',get1.name)
 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print('zs:')
    print(sess.run(zs))
    print('get1:')
    print(sess.run(get1))

输出

xs name: Const:0
zs name: zs:0
get1 name zs_1:0
zs:
[[60.+1.j 61.+1.j 62.+1.j]
 [60.+2.j 61.+2.j 62.+2.j]
 [60.+3.j 61.+3.j 62.+3.j]
 [60.+4.j 61.+4.j 62.+4.j]]
get1:
[[-0.45420784  1.0448159 ]
 [-1.1204109   0.3832376 ]]

Gets an existing variable with these parameters or create a new one.

单独用tf.get_variable()只能创建新的变量,并不能得到已定义的变量。只会触发系统重名机制。


tf.name_scope()名称覆盖机制

import tensorflow as tf
tf.reset_default_graph()
 
name = 'a1'
 
x = tf.Variable(tf.random_normal(shape=[5,5], mean=0, stddev=1), name='a2') 
x = tf.reshape(x,[1,5,5,1])
 
with tf.name_scope(name): 
    x = tf.nn.relu(x, name=name+'_relu')
    x = tf.layers.conv2d(x,
                         filters=14,
                         kernel_size=[3, 3],
                         strides=[1, 1],
                         padding='SAME',
                         dilation_rate=[1, 1],
                         use_bias = False,
                         activation = None,
                         kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),
                         name=name+'_conv3x3')
 
print(x.name)

with tf.name_scope(name): 
    x = tf.nn.relu(x, name=name+'_relu')
 
print(x.name)

a1/a1_conv3x3/BiasAdd:0
a1_1/a1_relu:0

如果设置use_bias = False, 那么输出:a1/a1_conv3x3/Conv2D:0

tf.layers.conv2d(
    inputs,
    filters,
    kernel_size,
    strides=(1, 1),
    padding='valid',
    data_format='channels_last',
    dilation_rate=(1, 1),
    activation=None,
    use_bias=True,
    kernel_initializer=None,
    bias_initializer=tf.zeros_initializer(),
    kernel_regularizer=None,
    bias_regularizer=None,
    activity_regularizer=None,
    kernel_constraint=None,
    bias_constraint=None,
    trainable=True,
    name=None,
    reuse=None
)

slim升级至tf.contrib.framework.arg_scope()

import tensorflow as tf
import numpy as np
slim = tf.contrib.slim
tf.reset_default_graph()

inputs = np.ones((256,256)).reshape(1,256,256,1)
with tf.contrib.framework.arg_scope([slim.conv2d], padding='SAME',
                      weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                      weights_regularizer=slim.l2_regularizer(0.0005)):
    net = tf.contrib.layers.conv2d(inputs, 64, [11, 11],scope = 'conv1')
    net = tf.contrib.layers.conv2d(net, 128, [11, 11], padding='VALID',scope = 'conv2')
    net = tf.contrib.layers.conv2d(net, 256, [11, 11],scope = 'conv3')
    
print(net.name)

conv3/Relu:0

tf.contrib.layers.conv2d(
    inputs,
    num_outputs,
    kernel_size,
    stride=1,
    padding='SAME',
    data_format=None,
    rate=1,
    activation_fn=tf.nn.relu,
    normalizer_fn=None,
    normalizer_params=None,
    weights_initializer=initializers.xavier_initializer(),
    weights_regularizer=None,
    biases_initializer=tf.zeros_initializer(),
    biases_regularizer=None,
    reuse=None,
    variables_collections=None,
    outputs_collections=None,
    trainable=True,
    scope=None
)
  • activation_fn: Activation function. The default value is a ReLU function. Explicitly set it to None to skip it and maintain a linear activation.
  • outputs_collections: Collection to add the outputs.

可见,默认relu激活运算,因此conv3/Relu:0

 

 

 

 

 

你可能感兴趣的:(T型牌坊,医疗影像与人工智能)