最全Tensorflow模型保存和提取的方法——附实例

一、Tensorflow模型的保存和提取方法

1、Tensorflow通过tf.train.Saver类实现神经网络模型的保存和提取。tf.train.Saver对象saver的save方法将Tensoreflow模型保存到指定的路径中,saver.save(sess,'model/model.ckpt).实际在这个文件目录下有4个文件:

最全Tensorflow模型保存和提取的方法——附实例_第1张图片

checkpoint 文件保存了一个录下多有的模型文件列表,model.ckpt.meta保存了tensorflow计算图的结构信息,model.ckpt保存每个变量的取值,此处文件名的写入方式会因不同参数的设置不同,但加载restore时的文件路径名是以checkpoint文件的‘model_checkpoint_path’值决定的。

2.加载这个已保存的tensorflow模型的方法是saver.restore(sess,'./model/model.ckpt'),加载模型的代码中也要定义tensorflow计算图上的所有运算并声明一个tf.train.Saver类,不同的是加载模型时不需要进行变量的初始化,而是将变量的取值通过保存的模型加载进来,如果不希望重复定义计算图上的运算,可直接加载已经持久化的图,saver = tf.train.import_meta_graph('model/model.ckpt.meta')

3.tf.train.Saver类也支持在保存和加载时给变量重命名,声明Saver类对象的时候使用一个字典dict重命名变量即可,{已知保存的变量名:重命名变量名},saver.tf.train.Saver({v1:u1,v2:u2})即原来名称name为v1的变量现在加载到变量u1中。

4.第三条做的目的之一就是方便使用变量的滑动平均值。如果在加载模型时直接将影子变量映射到变量自身,则在使用训练好的模型时就不需要再调用函数来获取变量的滑动平均值了。载入时,声明Saver类对象时通过一个字典将滑动平均值直接加载到新的变量中,saver = tf.train.Saver({v/ExponentialMovingAverage:v}) 另通过tf.train.ExponentialMovingAverage的variables_to_restore()函数获取变量重命名字典。通过convert_variables_to_constants函数将计算图中的变量以及其取值痛殴常量的方式保存于一个文件中

Tensorflow程序实现

import tensorflow as tf  
  
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name="v1")  
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name="v2")  
result = v1 + v2  
  
saver = tf.train.Saver()  
  
with tf.Session() as sess:  
    sess.run(tf.global_variables_initializer())  
    saver.save(sess, "Model/model.ckpt")  
  
  
# Part2: 加载TensorFlow模型的方法  
  
import tensorflow as tf  
  
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name="v1")  
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name="v2")  
result = v1 + v2  
  
saver = tf.train.Saver()  
  
with tf.Session() as sess:  
    saver.restore(sess, "./Model/model.ckpt") # 注意此处路径前添加"./"  
    print(sess.run(result)) # [ 3.]  
  
  
# Part3: 若不希望重复定义计算图上的运算,可直接加载已经持久化的图  
  
import tensorflow as tf  
  
saver = tf.train.import_meta_graph("Model/model.ckpt.meta")  
  
with tf.Session() as sess:  
    saver.restore(sess, "./Model/model.ckpt") # 注意路径写法  
    print(sess.run(tf.get_default_graph().get_tensor_by_name("add:0"))) # [ 3.]  
  
  
# Part4: tf.train.Saver类也支持在保存和加载时给变量重命名  
  
import tensorflow as tf  
  
# 声明的变量名称name与已保存的模型中的变量名称name不一致  
u1 = tf.Variable(tf.constant(1.0, shape=[1]), name="other-v1")  
u2 = tf.Variable(tf.constant(2.0, shape=[1]), name="other-v2")  
result = u1 + u2  
  
# 若直接生命Saver类对象,会报错变量找不到  
# 使用一个字典dict重命名变量即可,{"已保存的变量的名称name": 重命名变量名}  
# 原来名称name为v1的变量现在加载到变量u1(名称name为other-v1)中  
saver = tf.train.Saver({"v1": u1, "v2": u2})  
  
with tf.Session() as sess:  
    saver.restore(sess, "./Model/model.ckpt")  
    print(sess.run(result)) # [ 3.]  
  
  
# Part5: 保存滑动平均模型  
  
import tensorflow as tf  
  
v = tf.Variable(0, dtype=tf.float32, name="v")  
for variables in tf.global_variables():  
    print(variables.name) # v:0  
  
ema = tf.train.ExponentialMovingAverage(0.99)  
maintain_averages_op = ema.apply(tf.global_variables())  
for variables in tf.global_variables():  
    print(variables.name) # v:0  
                          # v/ExponentialMovingAverage:0  
  
saver = tf.train.Saver()  
  
with tf.Session() as sess:  
    sess.run(tf.global_variables_initializer())  
    sess.run(tf.assign(v, 10))  
    sess.run(maintain_averages_op)  
    saver.save(sess, "Model/model_ema.ckpt")  
    print(sess.run([v, ema.average(v)])) # [10.0, 0.099999905]  
  
  
# Part6: 通过变量重命名直接读取变量的滑动平均值  
  
import tensorflow as tf  
  
v = tf.Variable(0, dtype=tf.float32, name="v")  
saver = tf.train.Saver({"v/ExponentialMovingAverage": v})  
  
with tf.Session() as sess:  
    saver.restore(sess, "./Model/model_ema.ckpt")  
    print(sess.run(v)) # 0.0999999  
  
  
# Part7: 通过tf.train.ExponentialMovingAverage的variables_to_restore()函数获取变量重命名字典  
  
import tensorflow as tf  
  
v = tf.Variable(0, dtype=tf.float32, name="v")  
# 注意此处的变量名称name一定要与已保存的变量名称一致  
ema = tf.train.ExponentialMovingAverage(0.99)  
print(ema.variables_to_restore())  
# {'v/ExponentialMovingAverage': }  
# 此处的v取自上面变量v的名称name="v"  
  
saver = tf.train.Saver(ema.variables_to_restore())  
  
with tf.Session() as sess:  
    saver.restore(sess, "./Model/model_ema.ckpt")  
    print(sess.run(v)) # 0.0999999  
  
  
# Part8: 通过convert_variables_to_constants函数将计算图中的变量及其取值通过常量的方式保存于一个文件中  
  
import tensorflow as tf  
from tensorflow.python.framework import graph_util  
  
v1 = tf.Variable(tf.constant(1.0, shape=[1]), name="v1")  
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name="v2")  
result = v1 + v2  
  
with tf.Session() as sess:  
    sess.run(tf.global_variables_initializer())  
    # 导出当前计算图的GraphDef部分,即从输入层到输出层的计算过程部分  
    graph_def = tf.get_default_graph().as_graph_def()  
    output_graph_def = graph_util.convert_variables_to_constants(sess,  
                                                        graph_def, ['add'])  
  
    with tf.gfile.GFile("Model/combined_model.pb", 'wb') as f:  
        f.write(output_graph_def.SerializeToString())  
  
  
# Part9: 载入包含变量及其取值的模型  
  
import tensorflow as tf  
from tensorflow.python.platform import gfile  
  
with tf.Session() as sess:  
    model_filename = "Model/combined_model.pb"  
    with gfile.FastGFile(model_filename, 'rb') as f:  
        graph_def = tf.GraphDef()  
        graph_def.ParseFromString(f.read())  
  
    result = tf.import_graph_def(graph_def, return_elements=["add:0"])  
    print(sess.run(result)) # [array([ 3.], dtype=float32)]  
'''
Save and Restore a model using TensorFlow.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)

Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''

from __future__ import print_function

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

import tensorflow as tf

# Parameters
learning_rate = 0.001
batch_size = 100
display_step = 1
model_path = "/tmp/model.ckpt"

# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)

# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])


# Create model
def multilayer_perceptron(x, weights, biases):
    # Hidden layer with RELU activation
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    layer_1 = tf.nn.relu(layer_1)
    # Hidden layer with RELU activation
    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
    layer_2 = tf.nn.relu(layer_2)
    # Output layer with linear activation
    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
    return out_layer

# Store layers weight & bias
weights = {
    'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
    'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
    'b1': tf.Variable(tf.random_normal([n_hidden_1])),
    'b2': tf.Variable(tf.random_normal([n_hidden_2])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

# Construct model
pred = multilayer_perceptron(x, weights, biases)

# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Initializing the variables
init = tf.global_variables_initializer()

# 'Saver' op to save and restore all the variables
saver = tf.train.Saver()

# Running first session
print("Starting 1st session...")
with tf.Session() as sess:
    # Initialize variables
    sess.run(init)

    # Training cycle
    for epoch in range(3):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples/batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
                                                          y: batch_y})
            # Compute average loss
            avg_cost += c / total_batch
        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch:", '%04d' % (epoch+1), "cost=", \
                "{:.9f}".format(avg_cost))
    print("First Optimization Finished!")

    # Test model
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    # Calculate accuracy
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

    # Save model weights to disk
    save_path = saver.save(sess, model_path)
    print("Model saved in file: %s" % save_path)

# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
    # Initialize variables
    sess.run(init)

    # Restore model weights from previously saved model
    saver.restore(sess, model_path)
    print("Model restored from file: %s" % save_path)

    # Resume training
    for epoch in range(7):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples / batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
                                                          y: batch_y})
            # Compute average loss
            avg_cost += c / total_batch
        # Display logs per epoch step
        if epoch % display_step == 0:
            print("Epoch:", '%04d' % (epoch + 1), "cost=", \
                "{:.9f}".format(avg_cost))
    print("Second Optimization Finished!")

    # Test model
    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    # Calculate accuracy
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    print("Accuracy:", accuracy.eval(
        {x: mnist.test.images, y: mnist.test.labels}))
# -*- coding: utf-8 -*-
"""
Created on Sun Jun  4 10:29:48 2017

@author: Administrator
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)

x = tf.placeholder(tf.float32, [None, 784])
y_=tf.placeholder(tf.int32,[None,])

dense1 = tf.layers.dense(inputs=x, 
                      units=1024, 
                      activation=tf.nn.relu,
                      kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                      kernel_regularizer=tf.nn.l2_loss)
dense2= tf.layers.dense(inputs=dense1, 
                      units=512, 
                      activation=tf.nn.relu,
                      kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                      kernel_regularizer=tf.nn.l2_loss)
logits= tf.layers.dense(inputs=dense2, 
                        units=10, 
                        activation=None,
                        kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                        kernel_regularizer=tf.nn.l2_loss)

loss=tf.losses.sparse_softmax_cross_entropy(labels=y_,logits=logits)
train_op=tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_)    
acc= tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

sess=tf.InteractiveSession()  
sess.run(tf.global_variables_initializer())

saver=tf.train.Saver(max_to_keep=1)
for i in range(100):
  batch_xs, batch_ys = mnist.train.next_batch(100)
  sess.run(train_op, feed_dict={x: batch_xs, y_: batch_ys})
  val_loss,val_acc=sess.run([loss,acc], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
  print('epoch:%d, val_loss:%f, val_acc:%f'%(i,val_loss,val_acc))
  saver.save(sess,'ckpt/mnist.ckpt',global_step=i+1)
sess.close()

代码中红色部分就是保存模型的代码,虽然我在每训练完一代的时候,都进行了保存,但后一次保存的模型会覆盖前一次的,最终只会保存最后一次。因此我们可以节省时间,将保存代码放到循环之外(仅适用max_to_keep=1,否则还是需要放在循环内).

在实验中,最后一代可能并不是验证精度最高的一代,因此我们并不想默认保存最后一代,而是想保存验证精度最高的一代,则加个中间变量和判断语句就可以了。

saver=tf.train.Saver(max_to_keep=1)
max_acc=0
for i in range(100):
  batch_xs, batch_ys = mnist.train.next_batch(100)
  sess.run(train_op, feed_dict={x: batch_xs, y_: batch_ys})
  val_loss,val_acc=sess.run([loss,acc], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
  print('epoch:%d, val_loss:%f, val_acc:%f'%(i,val_loss,val_acc))
  if val_acc>max_acc:
      max_acc=val_acc
      saver.save(sess,'ckpt/mnist.ckpt',global_step=i+1)
sess.close()


saver=tf.train.Saver(max_to_keep=3)
max_acc=0
f=open('ckpt/acc.txt','w')
for i in range(100):
  batch_xs, batch_ys = mnist.train.next_batch(100)
  sess.run(train_op, feed_dict={x: batch_xs, y_: batch_ys})
  val_loss,val_acc=sess.run([loss,acc], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
  print('epoch:%d, val_loss:%f, val_acc:%f'%(i,val_loss,val_acc))
  f.write(str(i+1)+', val_acc: '+str(val_acc)+'\n')
  if val_acc>max_acc:
      max_acc=val_acc
      saver.save(sess,'ckpt/mnist.ckpt',global_step=i+1)
f.close()

sess.close()

sess=tf.InteractiveSession()  
sess.run(tf.global_variables_initializer())

is_train=False
saver=tf.train.Saver(max_to_keep=3)

#训练阶段
if is_train:
    max_acc=0
    f=open('ckpt/acc.txt','w')
    for i in range(100):
      batch_xs, batch_ys = mnist.train.next_batch(100)
      sess.run(train_op, feed_dict={x: batch_xs, y_: batch_ys})
      val_loss,val_acc=sess.run([loss,acc], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
      print('epoch:%d, val_loss:%f, val_acc:%f'%(i,val_loss,val_acc))
      f.write(str(i+1)+', val_acc: '+str(val_acc)+'\n')
      if val_acc>max_acc:
          max_acc=val_acc
          saver.save(sess,'ckpt/mnist.ckpt',global_step=i+1)
    f.close()

#验证阶段
else:
    model_file=tf.train.latest_checkpoint('ckpt/')
    saver.restore(sess,model_file)
    val_loss,val_acc=sess.run([loss,acc], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
    print('val_loss:%f, val_acc:%f'%(val_loss,val_acc))
sess.close()



你可能感兴趣的:(人工智能之深度学习)