重点是初步理解tensorflow的思想:
最重要的是建立一个网络模型,训练这个事情,可以直接用tf来帮你
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#添加层的函数,层有输入参数,输出参数,偏置,还有激活函数
def add_layer(inputs,in_size,out_size,activation_function=None):
#随机初始化参数和偏置
Weights = tf.Variable(tf.random_normal([in_size,out_size]))
biases = tf.Variable(tf.zeros([1,out_size])+0.1)
#计算(W*a+b)
Wx_plus_b = tf.matmul(inputs,Weights)+biases
#如果有激励函数就A(W*a+b)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
#生成训练数据
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data)-0.5 + noise
#定义输入输出
xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
#生成l1层,1输入,19输出,relu激活
l1 = add_layer(xs,1,10,activation_function=tf.nn.relu)
#生成prediction层,10输入,1输出,输出直接进行预测
prediction = add_layer(l1,10,1,activation_function=None)
#定义loss函数,均方根误差
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
reduction_indices=[1]))
#定义训练方式:梯度下降法,误差最小化
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#初始化变量
init = tf.initialize_all_variables()
#建立会话
sess = tf.Session()
#初始化
sess.run(init)
#绘图模块
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
plt.ion()
plt.show()
#进行一千次训练
for i in range(1000):
#凡是使用到了placeholder的地方都需要feed_dict={xx:xxx,yy:yyy}
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i % 50 == 0:
#print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
#绘图
try:
ax.lines.remove(lines[0])
except Exception:
pass
prediction_value = sess.run(prediction,feed_dict={xs:x_data})
lines = ax.plot(x_data,prediction_value,'r-',lw=5)
#图像暂停
plt.pause(0.1)
增加了对tensorboard的使用,包括graphs,scaler,histogram
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#添加层的函数,层有输入参数,输出参数,偏置,还有激活函数
def add_layer(inputs,in_size,out_size,n_layer,activation_function=None):
layer_name = 'layer%s'%n_layer
#随机初始化参数和偏置
with tf.name_scope('layer'):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size,out_size]))
tf.summary.histogram(layer_name+'/weights',Weights)
with tf.name_scope('biase'):
biases = tf.Variable(tf.zeros([1,out_size])+0.1)
tf.summary.histogram(layer_name+'/biases',biases)
#计算(W*a+b)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.matmul(inputs,Weights)+biases
#如果有激励函数就A(W*a+b)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
tf.summary.histogram(layer_name+'/outputs',outputs)
return outputs
#生成训练数据
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data)-0.5 + noise
#定义输入输出
with tf.name_scope('inputs'):
xs=tf.placeholder(tf.float32,[None,1],name='x_input')
ys=tf.placeholder(tf.float32,[None,1],name='y_input')
#生成l1层,1输入,19输出,relu激活
l1 = add_layer(xs,1,10,n_layer=1,activation_function=tf.nn.relu)
#生成prediction层,10输入,1输出,输出直接进行预测
prediction = add_layer(l1,10,1,n_layer=1,activation_function=None)
#定义loss函数,均方根误差
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
reduction_indices=[1]))
tf.summary.scalar('loss',loss)
#定义训练方式:梯度下降法,误差最小化
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#初始化变量
init = tf.initialize_all_variables()
#建立会话
sess = tf.Session()
merged = tf.summary.merge_all()
writter = tf.summary.FileWriter("logs/",sess.graph)
#初始化
sess.run(init)
for i in range(1000):
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
if i%50 == 0:
result = sess.run(merged,feed_dict={xs:x_data,ys:y_data})
writter.add_summary(result,i)
https://www.bilibili.com/video/av16001891/?p=16