Tensorflow Serving入门之二(调用keras-retinanet训练的h5模型)

  • 准备:keras-retinanet训练好的模型文件
  • keras h5模型文件转pb文件代码:
from keras import backend as K
import tensorflow as tf
from tensorflow.python import saved_model
from tensorflow.python.saved_model.signature_def_utils_impl import (
    build_signature_def, predict_signature_def
)
from keras_retinanet import models
import shutil
import os

export_path = 'retinanet_savedmodel'
# change to your model train num_classes
num_classes = 80
model = models.convert_model(
    model=models.backbone(backbone_name='resnet50').retinanet(num_classes=num_classes),
    nms=True,
    class_specific_filter=True,
    anchor_params=None
)

model.load_weights('your keras-retinanet model path')

print('Output layers', [o.name[:-2] for o in model.outputs])
print('Input layer', model.inputs[0].name[:-2])
if os.path.isdir(export_path):
    shutil.rmtree(export_path)
builder = saved_model.builder.SavedModelBuilder(export_path)

signature = predict_signature_def(
    inputs={'images': model.input},
    outputs={
        'output1': model.outputs[0],
        'output2': model.outputs[1],
        'output3': model.outputs[2]
    }
)

sess = K.get_session()
builder.add_meta_graph_and_variables(sess=sess,
                                     tags=[saved_model.tag_constants.SERVING],
                                     signature_def_map={'predict': signature})
builder.save()
  • 使用tensor serving调用模型文件,启动rpc服务(注:如使用tensorflow-gpu-1.14.0镜像,依赖cuda10.0)
    docker run --runtime=nvidia -p 8501:8501 \
    --mount type=bind,\
    source={pb模型文件保存目录},\
    target=/models/{model_name} \
    -e MODEL_NAME={model_name} -t tensorflow/serving:1.12.0-gpu &
  • client调用代码(未完待续)
    from tensorflow_serving.apis import predict_pb2
    from tensorflow_serving.apis import prediction_service_pb2_grpc
    import tensorflow as tf
    import numpy as np
    import grpc
    
    def request_server(img_np,
                       server_url,
                       model_name,
                       signature_name,
                       input_name,
                       output_name):
        """
        below info about model
        :param model_name: 这个name跟tensorflow_model_server  --model_name="username" 对应
        :param signature_name: 这个signature_name  跟signature_def_map 对应
        :param output_name:
        :param input_name:
    
        :param img_np: processed img , numpy.ndarray type [h,w,c]
        :param server_url: TensorFlow Serving url,str type,e.g.'0.0.0.0:8500'
        :return: type numpy array
        """
        characters = 'abcdefghijklmnopqrstuvwxyz'
        # connect channel
        channel = grpc.insecure_channel(server_url)
        print(server_url)
        stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
        # set up request
        request = predict_pb2.PredictRequest()
        request.model_spec.name = model_name
        # request.model_spec.version.value = 1
        request.model_spec.signature_name = signature_name
        print(img_np.shape)
        request.inputs[input_name].CopyFrom(
            tf.contrib.util.make_tensor_proto(img_np, shape=img_np.shape))
        # get response
        response = stub.Predict(request, 10.0)
        # res_from_server_np = np.asarray(response.outputs[output_name].float_val)
        res_from_server_np = tf.make_ndarray(response.outputs[output_name])
        res = response.outputs
        return res
    

     

你可能感兴趣的:(tensorflow,深度学习,keras,tensorflow,keras,tensorflow,serving,深度学习,人工智能)