import tensorflow as tf
import numpy as np
# 添加层
def add_layer(inputs, in_size, out_size, activation_function=None):
# add one more layer and return the output of this layer
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
# 1.训练的数据
# Make up some real data
x_data = np.linspace(-1,1,300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
# 2.定义节点准备接收数据
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
# 3.定义神经层:隐藏层和预测层
# add hidden layer 输入值是 xs,在隐藏层有 10 个神经元
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
# add output layer 输入值是隐藏层 l1,在预测层输出 1 个结果
prediction = add_layer(l1, 10, 1, activation_function=None)
# 4.定义 loss 表达式
# the error between prediciton and real data
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[1]))
# 5.选择 optimizer 使 loss 达到最小
# 这一行定义了用什么方式去减少 loss,学习率是 0.1
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# important step 对所有变量进行初始化
init = tf.initialize_all_variables()
sess = tf.Session()
# 上面定义的都没有运算,直到 sess.run 才会开始运算
sess.run(init)
# 迭代 1000 次学习,sess.run optimizer
for i in range(1000):
# training train_step 和 loss 都是由 placeholder 定义的运算,所以这里要用 feed 传入参数
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
# to see the step improvement
print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))
参考:https://www.jianshu.com/p/e112012a4b2d
有3种调用的方式tf.nn; tf.layers; tf.contrib
三者封装的程度逐渐变高
tf.layers.dense
tf.layers.conv1d
tf.layers.conv2d_transpose()
反卷积tf.contrib.rnn.LSTMCell
tf.contrib.rnn.GRUCell
tf.nn.dynamic_rnn(cell, input, sequence_length)
注:有的写的形式是tf.compact.v1.nn.conv1d
是tf.1x版本特有的标示,在2.x出了以后,1.x的版本不再维护,就加了这个标记。
tf.placeholder(dtype, shape=None, name=None)
# Create a variable.
w = tf.Variable(, name=)
e.g.‘v’
,接着编号‘v_1’
W = tf.get_variable(name, shape=None, dtype=tf.float32, initializer=None,
regularizer=None, trainable=True, collections=None)
with tf.variable_scope("one"):
a = tf.get_variable("v", [1]) #a.name == "one/v:0"
with tf.variable_scope("one"):
b = tf.get_variable("v", [1]) #创建两个名字一样的变量会报错 ValueError: Variable one/v already exists
with tf.variable_scope("one", reuse = True): #注意reuse的作用。
c = tf.get_variable("v", [1]) #c.name == "one/v:0" 成功共享,因为设置了reuse
trainable_vars = tf.trainable_variables()
print('trainable_vars',trainable_vars)
var_list = [t for t in trainable_vars if 'ln_layer' in t.name]
预加载数据
python读入
文件读入
参考https://blog.csdn.net/lujiandong1/article/details/53376802
https://blog.csdn.net/u014061630/article/details/80712635
1. tf.constant_initializer() 常数初始化
2. tf.ones_initializer() 全1初始化
3. tf.zeros_initializer() 全0初始化
4. tf.random_uniform_initializer() 均匀分布初始化
5. tf.random_normal_initializer() 正态分布初始化
6. tf.truncated_normal_initializer() 截断正态分布初始化
7. tf.uniform_unit_scaling_initializer() 这种方法输入方差是常数
8. tf.variance_scaling_initializer() 自适应初始化
9. tf.orthogonal_initializer() 生成正交矩阵
其中tf.truncated_normal_initializer() 截断正态分布初始化
用的比较多
https://blog.csdn.net/qq_30815237/article/details/96975332