paddlepaddle部署训好的模型到server上

在paddlepaddle训练自己的数据集中我们训好了自己的模型,由于其含有训练所需的信息,因此可能会比较大,我们可以用save_inference_model来保存推理时的模型,可以到1/4左右。

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    model_dir=args.pretrained_model_dir+"/"+args.model+"-"+str(args.scale)+"-"+str(args.resize_h)
    def if_exist(var):
        return os.path.exists(os.path.join(model_dir, var.name))
    test_prog = fluid.default_main_program().clone(for_test=True)
    fluid.io.load_vars(exe, model_dir, main_program=test_prog, predicate=if_exist)
    fluid.io.save_inference_model(model_dir+"-fluid",[image.name],[fetech.name],exe,test_prog,
        model_filename=None,params_filename="params")

可以写一段代码来验证上面保存的模型是否能正确预测

import os
import numpy as np
import paddle.fluid as fluid

imgfile="test.jpg"

def get_shape(fluid, program, name):
    for var in program.list_vars():
        if var.name == 'data':
            return list(var.shape[1:])

    raise ValueError('not found shape for input layer[%s], '
                     'you can specify by yourself' % (name))

def load_inference_model(dirname, exe):
    """ load fluid's inference model
    """
    model_fn = '__model__'
    params_fn = 'params'
    if os.path.exists(os.path.join(dirname, model_fn)) \
            and os.path.exists(os.path.join(dirname, params_fn)):
        program, feed_names, fetch_targets = fluid.io.load_inference_model(\
                dirname, exe, model_fn, params_fn)
    else:
        raise ValueError('not found model files in direcotry[%s]' % (dirname))
    input_shape = get_shape(fluid, program, feed_names[0])
    feed_shapes = [input_shape]

    return program, feed_names, fetch_targets, feed_shapes
mean = np.array([104., 117., 124.], dtype=np.float32).reshape([3, 1, 1])
def load_data(imgfile, shape):
    h, w = shape[1:]
    from PIL import Image
    im = Image.open(imgfile)

    # The storage order of the loaded image is W(widht),
    # H(height), C(channel). PaddlePaddle requires
    # the CHW order, so transpose them.
    im = im.resize((w, h), Image.ANTIALIAS)
    im = np.array(im).astype(np.float32)
    im = im.transpose((2, 0, 1))  # CHW
    im = im[(2, 1, 0), :, :]  # BGR

    # The mean to be subtracted from each image.
    # By default, the per-channel ImageNet mean.
    im = im - mean
    return im.reshape([1] + shape)

def test_infer_model():
    model_path="fluid_model"
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    try:
        ret = load_inference_model(model_path, exe)
        test_program, feed_names, fetch_targets, feed_shapes = ret
        print('load model sucess')
        image = feed_names[0]
        input_shape = feed_shapes[0]
        np_images = load_data(imgfile, input_shape)
        results = exe.run(program=test_program,
                        feed={image: np_images},
                        fetch_list=fetch_targets)
        result = results[0]
        print('succeed infer with results[class:%d]' % (np.argmax(result)))
    except ValueError as e:
        print(e)
    
def main():
    test_infer_model()

if __name__=="__main__":
    main()

接下来就可以在此基础上进行改造了

创建一个index.htm文件




    
    预测图像



选择预测的图像:

再写一个server.py,在把预测模型部署到Android手机上中其采用把request的数据写入文件再读进来的方式进行推理,众所周知,读写文件是一个相当耗时的操作,其实我们已经拿到了图片文件,只不过其是字符串格式的,好在opencv提供了imdecode接口从字符串解析图片,这样效率就大大提升了,还有一个可以改造的地方就是每次预测其都从磁盘重新加载模型文件,显然也是很耗时的,我们可以写一个单例,使其只在开始的时候加载一次即可。

import os
import numpy as np
import cv2
import paddle.fluid as fluid
from flask import Flask, request
app = Flask(__name__)

def get_shape(fluid, program, name):
    for var in program.list_vars():
        if var.name == 'data':
            return list(var.shape[1:])
    raise ValueError('not found shape for input layer[%s], '
                     'you can specify by yourself' % (name))

def load_inference_model(dirname, exe):
    model_fn = '__model__'
    params_fn = 'params'
    if os.path.exists(os.path.join(dirname, model_fn)) \
            and os.path.exists(os.path.join(dirname, params_fn)):
        program, feed_names, fetch_targets = fluid.io.load_inference_model(\
                dirname, exe, model_fn, params_fn)
    else:
        raise ValueError('not found model files in direcotry[%s]' % (dirname))
    input_shape = get_shape(fluid, program, feed_names[0])
    feed_shapes = [input_shape]
    return program, feed_names, fetch_targets, feed_shapes

means = np.array([104., 117., 124.], dtype=np.float32).reshape([3, 1, 1])
def preprocess(img,shape):
    img=cv2.resize(img,(shape[1],shape[2]))
    img = img.transpose((2, 0, 1))
    img=img-means
    return img.reshape([1] + shape)

class ImageClassifier:
    __instance=None
    def __init__(self):
        place=fluid.CPUPlace()
        self.exe = fluid.Executor(place)
        self.exe.run(fluid.default_startup_program())
        model_path="fluid_model"
        self.test_program, self.feed_names, self.fetch_targets, self.feed_shapes = load_inference_model(model_path, self.exe)
    @classmethod
    def getInstance(cls):
        if not cls.__instance:
            cls.__instance=ImageClassifier()
        return cls.__instance
    def predict(self,img):
        img=preprocess(img,self.feed_shapes[0])
        results = self.exe.run(program=self.test_program,
                     feed={self.feed_names[0]: img},
                     fetch_list=self.fetch_targets)
        result = results[0]
        print('succeed infer with results[class:%d]' % (np.argmax(result)))
        r =str(np.argmax(result))
        return r

@app.route('/infer', methods=['POST'])
def infer():
    img = cv2.imdecode(np.fromstring(request.files['img'].read(), np.uint8), cv2.IMREAD_UNCHANGED)
    r=ImageClassifier.getInstance().predict(img)
    return r
if __name__ == '__main__':
    app.run(port=8090)

如果不想用单例还是用原来的话:

import os
import numpy as np
import cv2
import paddle.fluid as fluid
from flask import Flask, request
app = Flask(__name__)

def get_shape(fluid, program, name):
    for var in program.list_vars():
        if var.name == 'data':
            return list(var.shape[1:])
    raise ValueError('not found shape for input layer[%s], '
                     'you can specify by yourself' % (name))

def load_inference_model(dirname, exe):
    model_fn = '__model__'
    params_fn = 'params'
    if os.path.exists(os.path.join(dirname, model_fn)) \
            and os.path.exists(os.path.join(dirname, params_fn)):
        program, feed_names, fetch_targets = fluid.io.load_inference_model(\
                dirname, exe, model_fn, params_fn)
    else:
        raise ValueError('not found model files in direcotry[%s]' % (dirname))
    input_shape = get_shape(fluid, program, feed_names[0])
    feed_shapes = [input_shape]
    return program, feed_names, fetch_targets, feed_shapes

means = np.array([104., 117., 124.], dtype=np.float32).reshape([3, 1, 1])
def preprocess(img,shape):
    img=cv2.resize(img,(shape[1],shape[2]))
    img = img.transpose((2, 0, 1))
    img=img-means
    return img.reshape([1] + shape)

@app.route('/infer', methods=['POST'])
def infer():
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    model_path = 'fluid_model/'
    test_program, feed_names, fetch_targets, feed_shapes = load_inference_model(model_path, exe)
    img = cv2.imdecode(np.fromstring(request.files['img'].read(), np.uint8), cv2.IMREAD_UNCHANGED)
    img=preprocess(img,feed_shapes[0])
    results = exe.run(program=test_program,
                     feed={feed_names[0]: img},
                     fetch_list=fetch_targets)
    result = results[0]
    print('succeed infer with results[class:%d]' % (np.argmax(result)))
    r =str(np.argmax(result))
    return r
if __name__ == '__main__':
    app.run(port=8090)

 

参考:

把预测模型部署到Android手机上

 

 

你可能感兴趣的:(paddlepaddle部署训好的模型到server上)