1.demo1
import tensorflow as tf
#seed设定随机种子,每次运行结果一样
w1 = tf.Variable(tf.random_normal((2, 3), stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal((3, 1), stddev=1, seed=1))
x = tf.constant([[0.7, 0.9]])
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
with tf.Session() as sess:
#print(sess.run(w1.initializer))
#print(sess.run(w2.initializer))
sess.run(tf.global_variables_initializer())
print(sess.run(w1))
print(sess.run(w2))
print(sess.run(y))
"""
[[-0.8113182 1.4845988 0.06532937]
[-2.4427042 0.0992484 0.5912243 ]]
[[-0.8113182 ]
[ 1.4845988 ]
[ 0.06532937]]
[[3.957578]]
"""
2.demo2
import tensorflow as tf
#seed设定随机种子,每次运行结果一样
w1 = tf.Variable(tf.random_normal((2, 3), stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal((3, 1), stddev=1, seed=1))
#x = tf.constant([[0.7, 0.9]])
x = tf.placeholder(dtype=tf.float32, shape=(3, 2))
#用于提供数据,定义位置,数据在运行时再指定
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(w1))
print(sess.run(w2))
print(sess.run(y, feed_dict={x: [[0.7, 0.9], [0.1, 0.4], [0.5, 0.8]]}))
#feed_dict提供三个样例数据
"""
[[-0.8113182 1.4845988 0.06532937]
[-2.4427042 0.0992484 0.5912243 ]]
[[-0.8113182 ]
[ 1.4845988 ]
[ 0.06532937]]
[[3.957578 ]
[1.1537654]
[3.1674924]]
"""
3.demo3
import tensorflow as tf
#NumPy工具包生成模拟数据集
from numpy.random import RandomState
#定义训练数据集batch的大小
batch_size = 8
#定义神经网络参数
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
#shape一个维度使用None方便使用不同的batch的大小
x = tf.placeholder(tf.float32, shape=(None, 2), name='x-input')
y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input')
#定义神经网络前向传播过程
a = tf.matmul(x ,w1)
y = tf.matmul(a, w2)
#定义损失函数和方向传播算法
y = tf.sigmoid(y)
cross_entropy = -tf.reduce_mean(
y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))
+ (1-y)*tf.log(tf.clip_by_value(1-y, 1e-10, 1.0)))
train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
#随机数生成一个模拟数据集
rdm = RandomState(1)
dataset_size = 128
X = rdm.rand(dataset_size, 2)
Y = [[int(x1+x2 < 1)] for (x1, x2) in X]
#创建会话
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
print(sess.run(w1))
print(sess.run(w2))
#设定训练轮数
STEPS = 5000
for i in range(STEPS):
#每次选取batch个样本进行训练
start = (i * batch_size) % dataset_size
end = min(start+batch_size, dataset_size)
#通过选取的样本训练神经网络并更新参数
sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
if i % 1000 == 0:
#每隔一段时间计算在所有数据上的交叉熵并输出
total_cross_entropy = sess.run(cross_entropy, feed_dict={x:X, y_:Y})
print("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy))
print(sess.run(w1))
print(sess.run(w2))
"""
[[-0.8113182 1.4845988 0.06532937]
[-2.4427042 0.0992484 0.5912243 ]]
[[-0.8113182 ]
[ 1.4845988 ]
[ 0.06532937]]
After 0 training step(s), cross entropy on all data is 0.314006
After 1000 training step(s), cross entropy on all data is 0.0684551
After 2000 training step(s), cross entropy on all data is 0.033715
After 3000 training step(s), cross entropy on all data is 0.020558
After 4000 training step(s), cross entropy on all data is 0.0136867
[[-2.548655 3.0793087 2.8951712]
[-4.1112747 1.6259071 3.3972702]]
[[-2.3230937]
[ 3.3011687]
[ 2.4632082]]
"""