1.TensorFlow的算术操作
tf.add(x, y, name=None) # 求和
tf.sub(x, y, name=None) # 减法
tf.mul(x, y, name=None) # 乘法
tf.div(x, y, name=None) # 除法
tf.mod(x, y, name=None) # 取模
tf.abs(x, name=None) # 求绝对值
tf.neg(x, name=None) # 取负 (y = -x).
tf.sign(x, name=None) # 返回符号 y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0.
tf.inv(x, name=None) # 取反
tf.square(x, name=None) # 计算平方 (y = x * x = x^2).
tf.round(x, name=None) # 舍入最接近的整数 # ‘a' is [0.9, 2.5, 2.3, -4.4] tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]
tf.sqrt(x, name=None) # 开根号 (y = \sqrt{x} = x^{1/2}).
tf.pow(x, y, name=None) # 幂次方 # tensor ‘x' is [[2, 2], [3, 3]] tensor ‘y' is [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]]
tf.exp(x, name=None) # 计算e的次方
tf.log(x, name=None) # 计算log,一个输入计算e的ln,两输入以第二输入为底
tf.maximum(x, y, name=None) #返回最大值 (x > y ? x : y)
tf.minimum(x, y, name=None) #返回最小值 (x < y ? x : y)
tf.cos(x, name=None) #三角函数cosine
tf.sin(x, name=None) #三角函数sine
tf.tan(x, name=None) #三角函数tan
tf.atan(x, name=None) #三角函数ctan
tf.multiply()# 对应相加减
tf.matmul()# 矩阵相加减
2.张量操作Tensor Transformations
tf.string_to_number(string_tensor, out_type=None, name=None) # 字符串转为数字
tf.to_double(x, name='ToDouble') # 转为64位浮点类型–float64
tf.to_float(x, name='ToFloat') # 转为32位浮点类型–float32
tf.to_int32(x, name='ToInt32') # 转为32位整型–int32
tf.to_int64(x, name='ToInt64') # 转为64位整型–int64
tf.cast(x, dtype, name=None) # 将x或者x.values转换为dtype # tensor a is [1.8, 2.2], dtype=tf.float tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
3.形状操作Shapes and Shaping
# 返回数据的shape
# ‘t' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
# shape(t) ==> [2, 2, 3]
tf.shape(input, name=None)
# 返回数据的元素数量
# ‘t' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
# size(t) ==> 12
tf.size(input, name=None)
# 返回tensor的rank
# 注意:此rank不同于矩阵的rank,
# tensor的rank表示一个tensor需要的索引数目来唯一表示任何一个元素
# 也就是通常所说的 “order”, “degree”或”ndims”
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
# shape of tensor ‘t' is [2, 2, 3]
# rank(t) ==> 3
tf.rank(input, name=None)
# 改变tensor的形状
# tensor ‘t' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor ‘t' has shape [9]
# reshape(t, [3, 3]) ==>
# [[1, 2, 3],
# [4, 5, 6],
# [7, 8, 9]]
# 如果shape有元素[-1],表示在该维度打平至一维
# -1 将自动推导得为 9:
# reshape(t, [2, -1]) ==>
# [[1, 1, 1, 2, 2, 2, 3, 3, 3],
# [4, 4, 4, 5, 5, 5, 6, 6, 6]]
tf.reshape(tensor, shape, name=None)
#插入维度1进入一个tensor中
##该操作要求-1-input.dims()
## ‘t' is a tensor of shape [2]
#shape(expand_dims(t, 0)) ==> [1, 2]
#shape(expand_dims(t, 1)) ==> [2, 1]
#shape(expand_dims(t, -1)) ==> [2, 1] <= dim <= input.dims()
tf.expand_dims(input, dim, name=None)
4.切片与合并(Slicing and Joining)
tf.slice(input_, begin, size, name=None)
tf.split(split_dim, num_split, value, name='split')
tf.concat(concat_dim, values, name='concat')
tf.pack(values, axis=0, name='pack')
tf.reverse(tensor, dims, name=None)
5.归约计算(Reduction)
tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None) #求tensor中平均值
tf.reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.reduce_prod(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.reduce_min(input_tensor, reduction_indices=None, keep_dims=False, name=None) #求tensor中最小值
tf.reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None) #求tensor中最大值
tf.reduce_all(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.reduce_any(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.accumulate_n(inputs, shape=None, tensor_dtype=None, name=None)
tf.cumsum(x, axis=0, exclusive=False, reverse=False, name=None)
6.矩阵相关运算
tf.diag(diagonal, name=None)
tf.diag_part(input, name=None)
tf.trace(x, name=None)
tf.transpose(a, perm=None, name=’transpose’)
tf.matmul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None) #矩阵相乘
tf.matrix_determinant(input, name=None)
tf.matrix_inverse(input, adjoint=None, name=None)
tf.cholesky(input, name=None)
tf.matrix_solve(matrix, rhs, adjoint=None, name=None)
7.复数操作
tf.complex(real, imag, name=None)
tf.complex_abs(x, name=None)
tf.conj(input, name=None)
tf.imag(input, name=None)
tf.real(input, name=None)
tf.fft(input, name=None)
8.激活函数 (Activation Functions)
tf.sigmoid(x, name=None) #y = 1 / (1 + exp(-x))
tf.tanh(x, name=None) #双曲线切线激活函数
tf.nn.relu(features, name=None) #整流函数:max(features, 0)
tf.nn.relu6(features, name=None)#以6为阈值的整流函数:min(max(features, 0), 6)
tf.nn.elu(features, name=None) #elu函数,exp(features) - 1 if < 0,否则features Exponential Linear Units (ELUs)
tf.nn.softplus(features, name=None) #计算softplus:log(exp(features) + 1)
tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None) #计算dropout,keep_prob为keep概率 noise_shape为噪声的shape
tf.nn.bias_add(value, bias, data_format=None, name=None) # 对value加一偏置量此函数为tf.add的特殊情况,bias仅为一维,函数通过广播机制进行与value求和,数据格式可以与value不同,返回为与value相同格式
9.卷积函数(Convolution)
tf.nn.avg_pool(value, ksize, strides, padding, data_format='NHWC', name=None) #平均方式池化
tf.nn.max_pool(value, ksize, strides, padding, data_format='NHWC', name=None) #最大值方法池化
tf.nn.max_pool_with_argmax(input, ksize, strides, padding, Targmax=None, name=None) #返回一个二维元组(output,argmax),最大值pooling,返回最大值及其相应的索引
tf.nn.avg_pool3d(input, ksize, strides, padding, name=None) #3D平均值pooling
tf.nn.max_pool3d(input, ksize, strides, padding, name=None) #3D最大值pooling
10.数据标准化(Normalization)
tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None) #对维度dim进行L2范式标准化 output = x / sqrt(max(sum(x**2), epsilon))
tf.nn.sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None) #计算与均值和方差有关的完全统计量 返回4维元组,*元素个数,*元素总和,*元素的平方和,*shift结果
tf.nn.normalize_moments(counts, mean_ss, variance_ss, shift, name=None) #基于完全统计量计算均值和方差
tf.nn.moments(x, axes, shift=None, name=None, keep_dims=False) #直接计算均值与方差
11.损失函数(Losses)
tf.nn.l2_loss(t, name=None) # output = sum(t ** 2) / 2
tf.nn.sigmoid_cross_entropy_with_logits()
tf.nn.softmax_cross_entropy_with_logits()
tf.nn.sparse_softmax_cross_entropy_with_logits()
tf.nn.weighted_cross_entropy_with_logits()
12.正则化 # L1 L2 dropout 提起停止
regularizer = tf.contrib.layers.l1_regularizer(scale, scope=None)
regularizer = tf.contrib.layers.l2_regularizer(scale, scope=None)
tf.contrib.layers.apply_regularization(regularizer, weights_list=None)
tf.add_to_collection('losses',regularizer(fc1_weights))
13.drop处理
# dropout
tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None)
tf.nn.rnn_cell.DropoutWrapper(rnn_cell, input_keep_prob=1.0, output_keep_prob=1.0)
x = tf.nn.dropout(x, keep_prob=0.5)
# 普通的dropout
def dropout(x, keep_prob, noise_shape=None, seed=None, name=None)
w = tf.get_variable("w1",shape=[size, out_size])
x = tf.placeholder(tf.float32, shape=[batch_size, size])
x = tf.nn.dropout(x, keep_prob=0.5)
y = tf.matmul(x,w)
# rnn中的dropout
def rnn_cell.DropoutWrapper(rnn_cell, input_keep_prob=1.0, output_keep_prob=1.0):
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=0.5)
# 提前停止,当loss值不发生变化就停止
14.学习率
# 指数衰减法
learning_rate = tf.train.exponential_decay(0.1,global_step,100,0.96,staircase=True)
15.梯度下降优化器
SGD Momentum NesterovMomentum AdaGrad RMSProp Adam
train = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
train = tf.train.AdadeltaOptimizer()
train = tf.train.AdagradOptimizer()
train = tf.train.MomentumOptimizer()
train = tf.train.AdamOptimizer()
16.隐藏层
# 隐藏层
tf.layer.dense()
tf.layer.conv2d()
tf.max_pooling2d()
17.分类函数(Classification)
tf.nn.sigmoid_cross_entropy_with_logits(logits, targets, name=None) #计算输入logits, targets的交叉熵
tf.nn.softmax(logits, name=None) #计算softmax softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
tf.nn.log_softmax(logits, name=None) #logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
tf.nn.softmax_cross_entropy_with_logits (logits, labels, name=None) #计算logits和labels的softmax交叉熵 logits, labels必须为相同的shape与数据类型
tf.nn.softmax_cross_entropy_with_logits_v2 (logits, labels, name=None) #计算logits和labels的softmax交叉熵 logits, labels必须为相同的shape与数据类型
tf.nn.sparse_softmax_cross_entropy_with_logits (logits, labels, name=None) #计算logits和labels的softmax交叉熵
tf.nn.weighted_cross_entropy_with_logits (logits, targets, pos_weight, name=None) #与sigmoid_cross_entropy_with_logits()相似, 但给正向样本损失加了权重pos_weight
18.保存与恢复变量
tf.train.Saver(Saving and Restoring Variables)
tf.train.Saver.save(sess, save_path, global_step=None,latest_filename=None, meta_graph_suffix='meta', write_meta_graph=True) #保存变量
tf.train.Saver.restore(sess, save_path) #恢复变量
tf.train.Saver.last_checkpoints #列出最近未删除的checkpoint 文件名
tf.train.Saver.set_last_checkpoints(last_checkpoints) #设置checkpoint文件名列表
tf.train.Saver.set_last_checkpoints_with_time(last_checkpoints_with_time) #设置checkpoint文件名列表和时间戳
19.TF Learn
tf.contrib.learn
20 初始化
int = tf.global_variables_initializer()
int = tf.initialize_all_variables()
21.运行
with tf.Session() as sess:
sess.run(int)
21.构建神经网络的基础操作
tf.Variables()
tf.get_variable()
tf.placeholder()
sess.run(y, feed_dict={x1: X_1, x2: X_2})
tf.name_scope()
tf.variable_scope()
tf.constant([1,2,3])
22.卷积层
# 卷积层
conv4=tf.layers.conv2d(
inputs=pool3,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
23.池化层
pool2=tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
24.池化层
logits= tf.layers.dense(inputs=pool2,
units=5,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.nn.l2_loss)
25.改变参数
'''改变参数'''
网络结构:
激活函数
损失函数
梯度下降方式(optimizer)
学习率大小
正则化项
训练过程:
batch_size
num_epoch
Train_test_split