相关资料:
tf.clip_by_value
tf.ones(shape, type=tf.float32, name=None)
tf.ones_like(tensor, dype=None, name=None)
tf.zeros([2, 3], int32)
tf.zeros_like(tensor, dype=None, name=None)
tf.fill(shape, value, name=None)
tf.constant(value, dtype=None, shape=None, name=’Const’)
tf.random_normal(shape,mean=0.0,stddev=1.0,dtype=tf.float32,seed=None,name=None)
tf.random_uniform(shape,minval=0,maxval=None,dtype=tf.float32,seed=None,name=None)
get_variable(name, shape=None, dtype=dtypes.float32, initializer=None,regularizer=None, trainable=True, collections=None,caching_device=None, partitioner=None, validate_shape=True,custom_getter=None):
tf.shape(Tensor)
tf.expand_dims(Tensor, axis)
# 't' is a tensor of shape [2]
shape(expand_dims(t, 0)) ==> [1, 2]
shape(expand_dims(t, 1)) ==> [2, 1]
shape(expand_dims(t, -1)) ==> [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) ==> [4, 3]
tf.shape(tf.concat([t3, t4], 1)) ==> [2, 6]
# 'x' is [1, 4]
# 'y' is [2, 5]
# 'z' is [3, 6]
pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
tf.random_shuffle(value,seed=None,name=None)
tf.argmax(input=tensor,axis=None,name=None)
# 之前是各种构建模型graph的操作(矩阵相乘,sigmoid等等....)
saver = tf.train.Saver() # 生成saver
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) # 先对模型初始化
# 然后将数据丢入模型进行训练blablabla
# 训练完以后,使用saver.save 来保存
saver.save(sess, "save_path/file_name") #file_name如果不存在的话,会自动创建
saver = tf.train.Saver()
with tf.Session() as sess:
#参数可以进行初始化,也可不进行初始化。即使初始化了,初始化的值也会被restore的值给覆盖
sess.run(tf.global_variables_initializer())
saver.restore(sess, "save_path/file_name") #会将已经保存的变量值resotre到 变量中。
# 1. 由之前的各种运算得到此批数据的loss
loss = .....
# 2.使用tf.scalar_summary来收集想要显示的变量,命名为loss
tf.summary.scalar('loss',loss)
# 3.定义一个summury op, 用来汇总由scalar_summary记录的所有变量
merged_summary_op = tf.summary.merge_all()
# 4.生成一个summary writer对象,需要指定写入路径,例如我这边就是/tmp/logdir
sess = tf.Session()
summary_writer = tf.summary.FileWriter('/tmp/logdir', sess.graph)
# 开始训练,分批喂数据
for(i in range(batch_num)):
# 5.使用sess.run来得到merged_summary_op的返回值
_, loss_val, summary_str = sess.run([aptimize, loss, merged_summary_op], feed_dict={})
# 6.使用summary writer将运行中的loss值写入
summary_writer.add_summary(summary_str,i)
def conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None,data_format=None, name=None):
input:待卷积的数据。格式要求为一个张量,[batch, in_height, in_width, in_channels]. 分别表示 批次数,图像高度,宽度,输入通道数。
filter: 卷积核。格式要求为[filter_height, filter_width, in_channels, out_channels]. 分别表示 卷积核的高度,宽度,输入通道数,输出通道数。
strides :一个长为4的list. 表示每次卷积以后卷积窗口在input中滑动的距离
padding :有SAME和VALID两种选项,表示是否要保留图像边上那一圈不完全卷积的部分。如果是SAME,则保留
use_cudnn_on_gpu :是否使用cudnn加速。默认是True
- tf.nn.max_pool
- 进行最大值池化操作,而avg_pool 则进行平均值池化操作.函数的定义为:
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
value: 一个4D张量,格式为[batch, height, width, channels],与conv2d中input格式一样
ksize: 长为4的list,表示池化窗口的尺寸
strides: 池化窗口的滑动值,与conv2d中的一样
padding: 与conv2d中用法一样。