模型的保存与可视化
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
train_X = np.float32(np.linspace(-1,1,100))
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.3
tf.reset_default_graph()
plt.plot(train_X,train_Y,'ro')
plt.show()
# X = tf.placeholder('float')
# Y = tf.placeholder('float')
inputdict = {
'X': tf.placeholder('float'),
'Y': tf.placeholder('float')
}
# W = tf.Variable(tf.random_normal([1]),name='weight')
# b = tf.Variable(tf.zeros([1]),name='bias')
paradict = {
'W': tf.Variable(tf.random_normal([1])),
'b': tf.Variable(tf.zeros([1]))
}
Z = tf.multiply(inputdict['X'], paradict['W']) + paradict['b']
#tf.summary用于记录变量,便于在tensorboard中可视化
tf.summary.histogram("Z",Z) #将预测值以直方图形式显示
cost = tf.reduce_mean(tf.square(inputdict['Y'] - Z))
tf.summary.scalar('loss_function', cost) #将损失值以标量形式显示
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
training_epochs = 20
display_step = 2
#保存模型
saver = tf.train.Saver(max_to_keep=1)#max_to_keep=1表示最多只保存一个检查点文件
# saver = tf.train.Saver({'weight':paradict['W'],'bias':paradict['b']})
savedir = 'log/'
plotdata = {"batchsize": [], "loss": []}
def moving_average(a, w=10):
if len(a) < w:
return a[:]
return [val if idx < w else sum(a[(idx-w):idx])/w for idx, val in enumerate(a)]
with tf.Session() as sess:
sess.run(init)
# 合并所有summary
mergend_summary_op = tf.summary.merge_all()
# 创建summary_writer ,用于写文件
summary_writer = tf.summary.FileWriter('log/mnist_with_summaries', sess.graph)
for epoch in range(training_epochs):
for (x,y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={inputdict['X']:x, inputdict['Y']:y})
#生成summary
summary_str = sess.run(mergend_summary_op, {inputdict['X']:x, inputdict['Y']:y})
#将summary写入文件
summary_writer.add_summary(summary_str, epoch)
if epoch % display_step == 0:
loss = sess.run(cost, feed_dict={inputdict['X']:train_X, inputdict['Y']:train_Y})
print('Epoch:', epoch+1,' cost=',loss,' W=',sess.run(paradict['W']),' b=',sess.run(paradict['b']))
if not (loss == 'NA'):
plotdata['batchsize'].append(epoch)
plotdata['loss'].append(loss)
saver.save(sess, savedir+"linermodel.ckpt", global_step=epoch)
print('Finished!')
# saver.save(sess, savedir+"linermodel.ckpt")
# print('cost=',sess.run(cost, feed_dict={X:train_X, Y:train_Y})," W=",sess.run(W)," b=",sess.run(b))
print('cost:', cost.eval({inputdict['X']:train_X, inputdict['Y']:train_Y}))
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(paradict['W']) * train_X + sess.run(paradict['b']), label='Fittedline')
plt.legend()
plt.show()
# plotdata['avgloss'] = moving_average(plotdata['loss'])
plt.figure(1)
plt.subplot(211)
plt.plot(plotdata['batchsize'], plotdata['loss'],'b--')
plt.xlabel('Minibatch number')
plt.ylabel('Loss')
plt.title('Minibatch run vs. Training loss')
plt.show()
#提取保存的模型
# load_epoch = 19
# with tf.Session() as sess2:
# sess2.run(tf.global_variables_initializer())
# # saver.restore(sess2, savedir+"linermodel.ckpt-" + str(load_epoch))
# # ckpt = tf.train.get_checkpoint_state(savedir)
# # if ckpt and ckpt.model_checkpoint_path:
# # saver.restore(sess2, ckpt.model_checkpoint_path)
# kpt = tf.train.latest_checkpoint(savedir)
# if kpt != None:
# saver.restore(sess2, kpt)
# print('z=',sess2.run(Z, {inputdict['X']:0.2}))
# import tensorflow as tf
# from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
# print_tensors_in_checkpoint_file(savedir+'linermodel.ckpt',None,True)
# with tf.Session() as sess2:
# sess2.run(tf.global_variables_initializer())
# saver.restore(sess2, savedir+"linermodel.ckpt")
# print("Z=",sess2.run(Z,{inputdict['X']:0.2}))