def add_layer(inputs,in_size,out_size,n_layer,activation_function=None):
layer_name='layer%s'%n_layer#输出结果为layern_layer,以字符串形式
tf.summary.histogram(layer_name+'weights',Weights)#纵轴是weights,表示的是Weights
tf.summary.histogram(layer_name+'biases',biases)
tf.summmary.histogram(layer_name+'outputs', outputs)
l1=add_layer(xs,1,10,n_layer=1,activation_function=tf.nn.relu)
prediction=add_layer(l1,10,1,n_layer=2,activation_function=None)
tf.summary.scalar('loss',loss)#标量,在events里面看
merged=tf.summary.merge_all()#合并所有summary
result=sess.run(merged,feed_dict={xs:x_data,ys:y_data})#merged也要run
writer.add_summary(result,i)#在events里每隔50步记一个点
全部代码:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#添加神经层
def add_layer(inputs,in_size,out_size,n_layer,activation_function=None):
layer_name='layer%s'%n_layer#输出结果为layern_layer,以字符串形式
with tf.name_scope('layer_name'):
with tf.name_scope('weight'):
Weights=tf.Variable(tf.random_normal([in_size,out_size]),name='W')#用random比用0好
tf.summary.histogram(layer_name+'weights',Weights)#纵轴是weights,表示的是Weights
with tf.name_scope('biases'):
biases=tf.Variable(tf.zeros([1,out_size])+0.1,name='b') #biases推荐的值不为0
tf.summary.histogram(layer_name+'biases',biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b=tf.matmul(inputs,Weights)+biases
if activation_function is None:
outputs=Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
tf.summary.histogram(layer_name+'outputs', outputs)
return outputs
#创建数据
x_data=np.linspace(-1,1,300)[:,np.newaxis]
#x_data范围是(-1,1),[:,newaxis]使增加维度,变成300行,如果使[nexaxis,:]是300列
noise=np.random.normal(0,0.05,x_data.shape)
#添加噪声,方差是0.05,格式和x_data一样
y_data=np.square(x_data)-0.5+noise
with tf.name_scope('inputs'):
xs=tf.placeholder(tf.float32,[None,1],name='x_input')#None是无论给多少sample都OK
ys=tf.placeholder(tf.float32,[None,1],name='y_input')
#输入层一个神经元,假设隐藏层有10个神经元,输出层有一个神经元
#add hidden layer
l1=add_layer(xs,1,10,n_layer=1,activation_function=tf.nn.relu)
#add output layer
prediction=add_layer(l1,10,1,n_layer=2,activation_function=None)
#计算loss fuction
with tf.name_scope('loss'):
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
#平方和相加再平均,reduction_indices=[1]时,第一维对应位置相加
tf.summary.scalar('loss',loss)#标量,在events里面看
with tf.name_scope('train'):
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)#通常learning rate 小于1
init=tf.initialize_all_variables()#初始所有变量
sess=tf.Session()
merged=tf.summary.merge_all()#合并所有summary
writer= tf.summary.FileWriter('/pycharmfiles/tensorboard/logs/',sess.graph)#放到浏览器之后才能观看
sess.run(tf.initialize_all_variables())#上面所有步骤都没有激活直到这里
fig=plt.figure()#生成图片框
ax=fig.add_subplot(1,1,1)#编号
ax.scatter(x_data,y_data)#画真实数据
plt.ion()#为了plot之后不暂停
plt.show()#plot.show之后整个程序会暂停
for i in range(1000):#学习1000步
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
#用xs占位,因为gradientdescent可能用到部分x_data
if i % 50==0:#每隔50次输出loss
print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
prediction_value=(sess.run(prediction,feed_dict={xs:x_data}))
result=sess.run(merged,feed_dict={xs:x_data,ys:y_data})#merged也要run
writer.add_summary(result,i)#在events里每隔50步记一个点
try:#把这一步提前是为了紧密衔接
ax.lines.remove(lines[0]) # 去除掉第一个plot
except Exception:
pass
lines=ax.plot(x_data,prediction_value,'r-',lw=5)
#以曲线的形式plot,横轴是x_data,纵轴是prediction_value,绿色,宽度为5
#如果想连续plot,每次要先抹除掉
plt.pause(0.1)#暂停0.1s
plt.pause(0)#不用这个画完图就消失