tf_serving-模型训练、导出、部署(解析)

参考:
1、https://tensorflow.google.cn/
2、https://www.tensorflow.org/
3、https://zhuanlan.zhihu.com/p/23361413


参考官网Serving a TensorFlow Model以及TensorFlow Serving 尝尝鲜,对Serving a TensorFlow Model进行分层次解析。


模型训练

以官网mnist_saved_model.py进行解析,简单化

代码:

cd ~/serving/tensorflow_serving
mkdir test
cd test 
vim mnist_saved_model.py
# 写入以下内容
# -*- coding: UTF-8 -*-

from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
import argparse
import os


tf.app.flags.DEFINE_integer('training_iteration', 100,
                            'number of training iterations.')
tf.app.flags.DEFINE_integer('model_version', 1, 'version number of the model.')
tf.app.flags.DEFINE_string('work_dir', '/tmp/model', 'Working directory.')
FLAGS = tf.app.flags.FLAGS

mnist = input_data.read_data_sets("/tmp/MNIST_data", one_hot=True)
sess = tf.InteractiveSession()
x=tf.placeholder(tf.float32,[None,28*28*1],name='x')
y_=tf.placeholder(tf.float32,[None,10],name='y_')

with tf.variable_scope('test'):
    w = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    # sess.run(tf.global_variables_initializer())
    y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')

    # cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
    cross_entropy=tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

    # values, indices = tf.nn.top_k(y, 10)
    # table = tf.contrib.lookup.index_to_string_table_from_tensor(
    #       tf.constant([str(i) for i in range(10)]))
    # prediction_classes = table.lookup(tf.to_int64(indices))

saver = tf.train.Saver()

# 验证之前是否已经保存了检查点文件
ckpt = tf.train.get_checkpoint_state(FLAGS.work_dir)
if ckpt and ckpt.model_checkpoint_path:
    saver.restore(sess, ckpt.model_checkpoint_path)
else:
    tf.global_variables_initializer().run()


for step in range(FLAGS.training_iteration):
    batch = mnist.train.next_batch(50)
    train_step.run(feed_dict={x: batch[0], y_: batch[1]})
    if step%20==0:
        saver.save(sess, os.path.join(FLAGS.work_dir,'test.ckpt'), global_step=step)

print('training accuracy %g' % sess.run(
      accuracy, feed_dict={x: mnist.test.images,
                           y_: mnist.test.labels}))
print('Done training!')

模型导出

# 以上完成模型训练,并将变量参数保存

# 接下来完成模型导出,导入到pb文件
# Export model
# WARNING(break-tutorial-inline-code): The following code snippet is
# in-lined in tutorials, please update tutorial documents accordingly
# whenever code changes.
# export_path_base = sys.argv[-1]
export_path_base = os.path.join('/tmp','test')
export_path = os.path.join(
  tf.compat.as_bytes(export_path_base),
  tf.compat.as_bytes(str(FLAGS.model_version)))
print ('Exporting trained model to', export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)

"""
# -----------------虚线部分可以舍弃------------------------------------------
# Build the signature_def_map.
classification_inputs = tf.saved_model.utils.build_tensor_info(
  serialized_tf_example)
classification_outputs_classes = tf.saved_model.utils.build_tensor_info(
  prediction_classes)
classification_outputs_scores = tf.saved_model.utils.build_tensor_info(values)

classification_signature = (
  tf.saved_model.signature_def_utils.build_signature_def(
      inputs={
          tf.saved_model.signature_constants.CLASSIFY_INPUTS:
              classification_inputs
      },
      outputs={
          tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:
              classification_outputs_classes,
          tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:
              classification_outputs_scores
      },
      method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))
# --------------------------------------------------------------------------
"""


tensor_info_x = tf.saved_model.utils.build_tensor_info(x) # 输入
tensor_info_y = tf.saved_model.utils.build_tensor_info(y) # 输出

prediction_signature = (
  tf.saved_model.signature_def_utils.build_signature_def(
      inputs={'x': tensor_info_x},
      outputs={'y': tensor_info_y},
      method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))

legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(
  sess, [tf.saved_model.tag_constants.SERVING],
  signature_def_map={
      'predict_images':
          prediction_signature,
      # tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
      #     classification_signature,
  },
  legacy_init_op=legacy_init_op)

builder.save()

print('Done exporting!')
# 直接运行 
cd ~/serving
python tensorflow_serving/test/mnist_saved_model.py

# 或者
bazel build -c opt //tensorflow_serving/test:mnist_saved_model
bazel-bin/tensorflow_serving/test/mnist_saved_model /tmp/test

导出结果:
这里写图片描述

模型部署

# 启动tensorflow serving
bazel build -c opt //tensorflow_serving/model_servers:tensorflow_model_server


# 启动刚才导出的模型
bazel-bin/tensorflow_serving/model_servers/tensorflow_model_server --port=9000 --model_name=test --model_base_path=/tmp/test
# 或者
tensorflow_model_server --port=9000 --model_name=test --model_base_path=/tmp/test

或者
如果安装 tensorflow-model-server

这里写图片描述

客户端

接下来我们写一个简单的 Client 来调用下我们部署好的 Model。
参考mnist_client.py

cd ~/serving/tensorflow_serving/test
vim test_client.py
# 写入以下内容

from grpc.beta import implementations
import numpy as np
import tensorflow as tf

from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("/tmp/MNIST_data", one_hot=True)
tf.app.flags.DEFINE_string('server', 'localhost:9000',
                           'PredictionService host:port')
FLAGS = tf.app.flags.FLAGS

n_samples = 100

host, port = FLAGS.server.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

# Generate test data
# x_data = np.arange(n_samples, step=1, dtype=np.float32)
# x_data = np.reshape(x_data, (n_samples, 1))
x_data=mnist.test.images
y_data=mnist.test.labels


# Send request
request = predict_pb2.PredictRequest()
request.model_spec.name = 'test'
request.inputs['x'].CopyFrom(tf.contrib.util.make_tensor_proto(x_data, shape=[100, 28*28*1]))
result = stub.Predict(request, 10.0)  # 10 secs timeout

print(result)
cd ~/tensorflow_serving/test
vim BUILD

# 添加以下语句
py_binary(
    name = "test_client",
    srcs = [
        "test_client.py",
    ],
    deps = [
        "//tensorflow_serving/apis:predict_proto_py_pb2",
        "//tensorflow_serving/apis:prediction_service_proto_py_pb2",
        "@org_tensorflow//tensorflow:tensorflow_py",
    ],
)

这里写图片描述

最后编译运行,就能看到在线预测结果啦!

cd ~/serving
bazel build //tensorflow_serving/test:test_client && ./bazel-bin/tensorflow_serving/test/test_client

# 或
bazel build -c opt //tensorflow_serving/test:test_client
bazel-bin/tensorflow_serving/test/test_client #--num_tests=1000 --server=localhost:9000

# 或
python tensorflow_serving/test/test_client.py #--num_tests=1000 --server=localhost:9000

你可能感兴趣的:(tensorflow)