Paddle Inference——基于python API在Jetson上部署PaddleSeg模型

文章目录

    • 环境准备
    • 部署
      • 主程序
      • 预测结果

环境准备

  • paddlepaddle-gpu
  • paddleseg
pip3 install numpy==1.13.3
pip3 install paddleseg --no-dependencies
pip3 install filelock pyyaml visualdl>=2.0.0 yapf==0.26.0 tqdm prettytable pre-commit flake8

部署

import cv2
import numpy as np
import yaml
import random
import os
import codecs
from paddle.inference import Config
from paddle.inference import PrecisionType
from paddle.inference import create_predictor

import paddleseg.transforms as T
from paddleseg.cvlibs import manager
from paddleseg.utils import get_sys_env, logger, get_image_list
from paddleseg.utils.visualize import get_pseudo_color_map

class DeployYmlConfig:
    def __init__(self, path):
        yml_path = os.path.join(path, "deploy.yaml")
        with codecs.open(yml_path, 'r', 'utf-8') as file:
            self.dic = yaml.load(file)

        self._transforms = self._load_transforms(
            self.dic['Deploy']['transforms'])
        self._dir = path

    @property
    def transforms(self):
        print("transforms")
        return self._transforms

    @property
    def model_file(self):
        return os.path.join(self._dir, self.dic['Deploy']['model'])

    @property
    def params_file(self):
        return os.path.join(self._dir, self.dic['Deploy']['params'])

    def _load_transforms(self, t_list):
        com = manager.TRANSFORMS
        transforms = []
        for t in t_list:
            ctype = t.pop('type')
            transforms.append(com[ctype](**t))

        return T.Compose(transforms)

class Paddle_Seg:
    def __init__(self, model_folder_dir, infer_img_size=224, use_gpu=False, 
                 gpu_memory=500, use_tensorrt=False, precision_mode="fp32"):
        self.model_folder_dir = model_folder_dir
        self.infer_img_size = infer_img_size       # 模型预测的输入图像尺寸
        self.use_gpu = use_gpu                     # 是否使用GPU,默认False
        self.gpu_memory = gpu_memory               # GPU的显存,默认500
        self.use_tensorrt = use_tensorrt           # 是否使用TensorRT,默认False
        self.precision = precision_mode            # TensorRT的precision_mode为"fp16"、"fp32"、"int8"
    
    def init(self,camera_width=640,camera_height=480):
        img = np.zeros(shape=(int(camera_height), int(camera_width),3),dtype="float32")
        # 从deploy.yml中读出模型相关信息
        self.cfg = DeployYmlConfig(self.model_folder_dir)
        # 初始化预测模型
        self.predictor = self.predict_config() 
        self.color_list = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for i in range(10)]



    def predict_config(self):
    # ——————————————模型配置、预测相关函数————————————————— #
        # 根据预测部署的实际情况,设置Config
        config = Config()
        # 读取模型文件
        config.set_prog_file(self.cfg.model_file)
        config.set_params_file(self.cfg.params_file)
        precision_map = {
                    "int8": PrecisionType.Int8,
                    "fp16": PrecisionType.Half,
                    "fp32": PrecisionType.Float32}
        if self.use_gpu == True:
            config.enable_use_gpu(self.gpu_memory, 0)
            if self.use_tensorrt == True:
                if self.precision == "int8":
                    use_calib_mode = True
                    use_static = True
                if self.precision == "fp16":
                    use_calib_mode = False
                    use_static = True
                else:
                    use_calib_mode = False
                    use_static = True
                config.enable_tensorrt_engine(workspace_size=1 << 30, precision_mode=precision_map[self.precision],
                                            max_batch_size=1, min_subgraph_size=50, 
                                            use_static=use_static, use_calib_mode=use_calib_mode)
        print("----------------------------------------------") 
        print("                 RUNNING CONFIG                 ") 
        print("----------------------------------------------") 
        print("Model input size: {}".format([self.infer_img_size, self.infer_img_size, 3])) # 0
        print("Use GPU is: {}".format(config.use_gpu())) # True
        print("GPU device id is: {}".format(config.gpu_device_id())) # 0
        print("Init mem size is: {}".format(config.memory_pool_init_size_mb())) # 100
        print("Use TensorRT: {}".format(self.use_tensorrt)) # 0
        print("Precision mode: {}".format(precision_map[self.precision])) # 0
        print("----------------------------------------------") 
        # 可以设置开启IR优化、开启内存优化
        config.switch_ir_optim()
        config.enable_memory_optim()
        predictor = create_predictor(config)
        return predictor

    def preprocess(self, img):
        data = np.array([self.cfg.transforms(img)[0]])
        return data

    def predict(self, predictor, data):
        input_names = predictor.get_input_names()
        input_handle = predictor.get_input_handle(input_names[0])
        output_names = predictor.get_output_names()
        output_handle = predictor.get_output_handle(output_names[0])
        input_handle.reshape(data.shape)
        input_handle.copy_from_cpu(data)
        # 执行Predictor
        predictor.run()
        # 获取输出
        results = []
        results = output_handle.copy_to_cpu()
        # 获取输出
        return results

    def infer(self, img):
        data = self.preprocess(img)
        result = self.predict(self.predictor, data)
        return result

    def draw_img(self, res, img):
        res = res[0].astype("int8")
        mask = cv2.merge([res, res*255, res])
        img = cv2.addWeighted(img, 1, mask, 0.5, 0)
        return img
        
    def save_img(self, result, save_name='res.png'):
        result_img = get_pseudo_color_map(result[0])
        result_img.save(save_name)

主程序

if __name__ == "__main__":
    ###################
    model_folder_dir="../../../big_model/deeplab1.4"
    infer_img_size=512
    use_gpu=True
    gpu_memory=500
    use_tensorrt=True
    precision_mode="fp16"
    ###################
    paddle_seg = Paddle_Seg(model_folder_dir=model_folder_dir, 
                            infer_img_size=infer_img_size, use_gpu=use_gpu, 
                            gpu_memory=gpu_memory, use_tensorrt=use_tensorrt, 
                            precision_mode=precision_mode)
    paddle_seg.init(928, 800)
    img = cv2.imread("61.jpg")
    
    from color_dist import *
    color="chin_yellow_blind_path"
    # HSV
    color_lower = np.array(color_dist[color]["Lower"], np.uint8) 
    color_upper = np.array(color_dist[color]["Upper"], np.uint8) 
    dilate_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))  # 矩形结构
    erode_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))  # 矩形结构

    # 图像处理
    hsvFrame = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    color_mask = cv2.inRange(hsvFrame, color_lower, color_upper) 
    print(color_mask.shape)
    mask = np.zeros((800, 928))
    imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 
    ret,binary = cv2.threshold(imgray,125,255,cv2.THRESH_BINARY)
    
    res = paddle_seg.infer(img)
    
    res = paddle_seg.draw_img(res, img)
    # res = res[0].astype("int8")*255
    cv2.imwrite("res.jpg", res)

预测结果

Paddle Inference——基于python API在Jetson上部署PaddleSeg模型_第1张图片

部署不易,觉得好可以点个赞呀~

你可能感兴趣的:(嵌入式电脑,树莓派和Jetson,paddle,python,paddlepaddle)