tenosflow的图操作比较重要
通过图形操作可以让对图有跟进一步了解
上一个简单的训练的代码
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#载入数据集
mnist = input_data.read_data_sets(r'E:\python\mnist_data', one_hot=True)
#每个批次100张照片
batch_size = 100
#计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size
#定义两个placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#创建一个简单的神经网络,输入层784个神经元,输出层10个神经元
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W)+b)
#二次代价函数
# loss = tf.reduce_mean(tf.square(y-prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#初始化变量
init = tf.global_variables_initializer()
#结果存放在一个布尔型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
#求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
for epoch in range(11):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
#保存模型
saver.save(sess,'net/my_net.ckpt')
结果是:
0.8241
再把图restore
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
print_tensors_in_checkpoint_file('net/my_net.ckpt', None, True,True)
#载入数据集
mnist = input_data.read_data_sets(r'E:\python\mnist_data', one_hot=True)
#每个批次100张照片
batch_size = 100
#计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size
#定义两个placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#创建一个简单的神经网络,输入层784个神经元,输出层10个神经元
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W)+b)
#二次代价函数
# loss = tf.reduce_mean(tf.square(y-prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#初始化变量
init = tf.global_variables_initializer()
#结果存放在一个布尔型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
#求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))
saver.restore(sess,'net/my_net.ckpt')
print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))
结果是:
0.8241
高潮来了
上面的restore需要再写一遍图就是
with tf.Session() as sess:前面所有的代码:下面这的不需要
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
reader = tf.train.NewCheckpointReader('net/my_net.ckpt').get_variable_to_shape_map()
for variable in reader:#遍历变量的名称和维度
print(variable ,reader[variable])
print_tensors_in_checkpoint_file('net/my_net.ckpt', None, True,True)
#载入数据集
mnist = input_data.read_data_sets(r'E:\python\mnist_data', one_hot=True)
saver = tf.train.import_meta_graph('net/my_net.ckpt.meta')
with tf.Session() as sess:
saver.restore(sess,'net/my_net.ckpt')
g = tf.get_default_graph()#获取图
op = g.get_operations()#获取图中的操作,主要是为了查看参数名字(为查看未命名参数)
# print(g,op)
sofmax = tf.get_default_graph().get_tensor_by_name('Softmax_5:0')
x = tf.get_default_graph().get_tensor_by_name('Placeholder:0')
accuracy = tf.get_default_graph().get_tensor_by_name('Mean_1:0')
y = tf.get_default_graph().get_tensor_by_name('Placeholder_1:0')
print(sofmax,x)
print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))
结果是:
0.8241
你需要通过op找到sofmax ,x,accuracy,y的tensor的名称,再 用tf.get_default_graph().get_tensor_by_name('Mean_1:0')得到tensor实体,
这句saver = tf.train.import_meta_graph('net/my_net.ckpt.meta')直接加载图,这样极大简化了restore编写。
当然你在编写代码时给sofmax ,x,accuracy,y取一个方便的名字会更加方便。
这句print_tensors_in_checkpoint_file('net/my_net.ckpt', None, True,True)是查看checkpoint的参数,也就是weight。
还有一个save成一个文件的方法,保存为pb文件:
看了(公输睚信)的博客改成了下面的格式
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import graph_util
#载入数据集
mnist = input_data.read_data_sets(r'E:\python\mnist_data', one_hot=True)
#每个批次100张照片
batch_size = 100
#计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size
#定义两个placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#创建一个简单的神经网络,输入层784个神经元,输出层10个神经元
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W)+b)
#二次代价函数
# loss = tf.reduce_mean(tf.square(y-prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
#初始化变量
init = tf.global_variables_initializer()
#结果存放在一个布尔型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
#求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
#saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
for epoch in range(1):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
print("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))
#保存模型
graph_def = tf.get_default_graph().as_graph_def()
output_graph_def = graph_util.convert_variables_to_constants(sess,graph_def,['Softmax','Placeholder','Mean'] )
with tf.gfile.GFile('net/my_net.pb', 'wb') as fid:
serialized_graph = output_graph_def.SerializeToString()
fid.write(serialized_graph)
restore已经保存的pb文件:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
#载入数据集
mnist = input_data.read_data_sets(r'E:\python\mnist_data', one_hot=True)
import os
def load_model(path_to_model):
if not os.path.exists(path_to_model):
raise ValueError("'path_to_model.pb' is not exist.")
model_graph = tf.Graph()
with model_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_model, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return model_graph
model_graph = load_model('net/my_net.pb')
with model_graph.as_default():
with tf.Session(graph=model_graph) as sess:
sofmax = model_graph.get_tensor_by_name('Softmax:0')
x= model_graph.get_tensor_by_name('Placeholder:0')
accuracy= model_graph.get_tensor_by_name('Mean:0')
y = model_graph.get_tensor_by_name('Placeholder_1:0')
print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))