OAK相机如何将 MMYOLO 内模型转换成blob格式?

编辑:OAK中国
首发:oakchina.cn
喜欢的话,请多多⭐️✍
内容可能会不定期更新,官网内容都是最新的,请查看首发地址链接。

▌前言

Hello,大家好,这里是OAK中国,我是助手君。

最近咱社群里有几个朋友在将yolox转换成blob的过程有点不清楚,所以我就写了这篇博客。(请夸我贴心!咱的原则:合理要求,有求必应!)

1.其他Yolo转换及使用教程请参考
2.检测类的yolo模型建议使用在线转换(地址),如果在线转换不成功,你再根据本教程来做本地转换。

.pt 转换为 .onnx

使用下列脚本 (将脚本放到 MMYOLO 根目录中) 将 pytorch 模型转换为 onnx 模型,若已安装 openvino_dev,则可进一步转换为 OpenVINO 模型:

示例用法:

# python export_onnx.py config  checkpoint --img-size 320 
# 例如 
python export_yolo.py \
ppyoloe_plus_s_fast_8xb8-80e_coco.py \
ppyoloe_plus_s_fast_8xb8-80e_coco_20230101_154052-9fee7619.pth \
--work-dir work_dir --img-size 320 

python export_onnx.py -m work_dir/ppyoloe_plus_s_fast_8xb8-80e_coco.onnx -v ppyoloe

export_yolo.py :

usage: export_yolo.py [-h] [--work-dir WORK_DIR]
                      [-imgsz IMG_SIZE [IMG_SIZE ...]] [-op OPSET]
                      config checkpoint

positional arguments:
  config                Config file
  checkpoint            Checkpoint file

options:
  -h, --help            show this help message and exit
  --work-dir WORK_DIR   Path to save export model (default: work_dir)
  -imgsz IMG_SIZE [IMG_SIZE ...], --img-size IMG_SIZE [IMG_SIZE ...]
                        Image size of height and width (default: [640, 640])
  -op OPSET, --opset OPSET
                        ONNX opset version (default: 12)
# coding=utf-8
import argparse
import json
import warnings
from io import BytesIO
from argparse import ArgumentDefaultsHelpFormatter
from pathlib import Path

import onnx
import torch
import torch.nn as nn
from mmdet.apis import init_detector
from mmdet.models.backbones.csp_darknet import CSPLayer, Focus
from mmengine.utils.path import mkdir_or_exist
from rich import print

from mmyolo.models import RepVGGBlock
from mmyolo.models.layers import CSPLayerWithTwoConv

warnings.filterwarnings(action="ignore", category=torch.jit.TracerWarning)
warnings.filterwarnings(action="ignore", category=torch.jit.ScriptWarning)
warnings.filterwarnings(action="ignore", category=UserWarning)
warnings.filterwarnings(action="ignore", category=FutureWarning)
warnings.filterwarnings(action="ignore", category=ResourceWarning)


def parse_args():
    parser = argparse.ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument("config", type=Path, help="Config file")
    parser.add_argument("checkpoint", type=Path, help="Checkpoint file")
    parser.add_argument(
        "--work-dir",
        default=Path("./work_dir"),
        type=Path,
        help="Path to save export model",
    )
    parser.add_argument(
        "-imgsz",
        "--img-size",
        nargs="+",
        type=int,
        default=[640, 640],
        help="Image size of height and width",
    )
    parser.add_argument("-op", "--opset", type=int, default=12, help="ONNX opset version")
    args = parser.parse_args()
    args.img_size *= 2 if len(args.img_size) == 1 else 1
    args.work_dir = args.work_dir.resolve().absolute()
    print(args)
    return args


def build_model_from_cfg(config_path, checkpoint_path, device):
    model = init_detector(config_path, checkpoint_path, device=device)
    model.eval()
    return model


class DeployFocus(nn.Module):
    def __init__(self, orin_Focus: nn.Module):
        super().__init__()
        self.__dict__.update(orin_Focus.__dict__)

    def forward(self, x):
        batch_size, channel, height, width = x.shape
        x = x.reshape(batch_size, channel, -1, 2, width)
        x = x.reshape(batch_size, channel, x.shape[2], 2, -1, 2)
        half_h = x.shape[2]
        half_w = x.shape[4]
        x = x.permute(0, 5, 3, 1, 2, 4)
        x = x.reshape(batch_size, channel * 4, half_h, half_w)

        return self.conv(x)


class DeployC2f(nn.Module):
    def __init__(self, *args, **kwargs):
        super().__init__()

    def forward(self, x):
        x_main = self.main_conv(x)
        x_main = [x_main, x_main[:, self.mid_channels :, ...]]
        x_main.extend(blocks(x_main[-1]) for blocks in self.blocks)
        x_main.pop(1)
        return self.final_conv(torch.cat(x_main, 1))


class HardSigmoid(nn.Module):
    """Hard Sigmoid Module"""

    def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0):
        super(HardSigmoid, self).__init__()
        assert divisor != 0, "divisor is not allowed to be equal to zero"
        self.bias = bias
        self.divisor = divisor
        self.min_value = min_value
        self.max_value = max_value

    def forward(self, x):
        """forward"""

        x = (x + self.bias) / self.divisor
        return x.clamp_(self.min_value, self.max_value)


def switch_deploy(baseModel):
    for layer in baseModel.modules():
        if isinstance(layer, RepVGGBlock):
            layer.switch_to_deploy()
        elif isinstance(layer, Focus):
            baseModel.backbone.stem = DeployFocus(layer)
        elif isinstance(layer, CSPLayerWithTwoConv):
            setattr(layer, "__class__", DeployC2f)
        elif isinstance(layer, CSPLayer):
            if hasattr(layer, "attention"):
                if isinstance(layer.attention.act, nn.Hardsigmoid):
                    layer.attention.act = HardSigmoid()


def main():
    args = parse_args()
    mkdir_or_exist(args.work_dir)
    device = "cpu"  # 'cuda:0'

    output_names = None

    baseModel = build_model_from_cfg(args.config.as_posix(), args.checkpoint.as_posix(), device)

    switch_deploy(baseModel)

    baseModel.eval()

    fake_input = torch.randn(1, 3, *args.img_size).to(device)
    # dry run
    baseModel(fake_input)

    save_onnx_path = args.work_dir.joinpath(args.config.with_suffix(".onnx").name)
    # export onnx
    with BytesIO() as f:
        torch.onnx.export(
            baseModel,
            fake_input,
            f,
            input_names=["images"],
            output_names=output_names,
            opset_version=args.opset,
        )
        f.seek(0)
        onnx_model = onnx.load(f)
        onnx.checker.check_model(onnx_model)

        try:
            import onnxsim

            onnx_model, check = onnxsim.simplify(onnx_model)
            assert check, "assert check failed"
        except Exception as e:
            print(f"Simplify failure: {e}")

    onnx.save(onnx_model, save_onnx_path)
    print(f"ONNX export success, save into {save_onnx_path}")

    num_classes = baseModel.bbox_head.num_classes
    strides = baseModel.bbox_head.featmap_strides

    labels = baseModel.dataset_meta["classes"]

    anchors = (
        torch.tensor(baseModel.cfg.anchors).flatten().tolist()
        if hasattr(baseModel.cfg, "anchors")
        else []
    )
    masks = (
        {
            f"side{int(args.img_size[0] // num)}": list(range(i * 3, i * 3 + 3))
            for i, num in enumerate(strides)
        }
        if anchors
        else {}
    )

    export_json = args.work_dir.joinpath("model.json")
    export_json.write_text(
        json.dumps(
            {
                "nn_config": {
                    "output_format": "detection",
                    "NN_family": "YOLO",
                    "input_size": f"{args.img_size[0]}x{args.img_size[1]}",
                    "NN_specific_metadata": {
                        "classes": num_classes,
                        "coordinates": 4,
                        "anchors": anchors,
                        "anchor_masks": masks,
                        "iou_threshold": 0.5,
                        "confidence_threshold": 0.5,
                    },
                },
                "mappings": {"labels": labels},
            },
            indent=4,
        )
    )


if __name__ == "__main__":
    main()

然后使用脚本转换:

export_onnx.py:

usage: export_onnx.py [-h] -m INPUT_MODEL
                      [-v {yolox,yolov5,yolov6,yolov7,yolov8,ppyoloe}]
                      [-n NAME] [-o OUTPUT_DIR] [-b] [-s]
                      [-sh {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}]
                      [-t {docker,blobconverter,local}]

Tool for converting YOLO models to the blob format used by OAK

options:
  -h, --help            show this help message and exit
  -m INPUT_MODEL, -i INPUT_MODEL, -w INPUT_MODEL, --input_model INPUT_MODEL
                        Path to ONNX .onnx file (default: None)
  -v {yolox,yolov5,yolov6,yolov7,yolov8,ppyoloe}, --version {yolox,yolov5,yolov6,yolov7,yolov8,ppyoloe}
                        YOLO version (default: yolov5)
  -n NAME, --name NAME  The name of the model to be saved, none means using
                        the same name as the input model (default: None)
  -o OUTPUT_DIR, --output_dir OUTPUT_DIR
                        Directory for saving files, none means using the same
                        path as the input model (default: None)
  -b, --blob            turn on OAK Blob export (default: False)
  -s, --spatial_detection
                        Inference with depth information (default: False)
  -sh {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}, --shaves {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}
                        Specifies number of SHAVE cores that converted model
                        will use (default: None)
  -t {docker,blobconverter,local}, --convert_tool {docker,blobconverter,local}
                        Which tool is used to convert, docker: should already
                        have docker (https://docs.docker.com/get-docker/) and
                        docker-py (pip install docker) installed;
                        blobconverter: uses an online server to convert the
                        model and should already have blobconverter (pip
                        install blobconverter); local: use openvino-dev (pip
                        install openvino-dev) and openvino 2022.1 ( https://do
                        cs.oakchina.cn/en/latest/pages/Advanced/Neural_network
                        s/local_convert_openvino.html#id2 ) to convert
                        (default: blobconverter)
# coding=utf-8
import argparse
import logging
import time
import warnings
from argparse import ArgumentDefaultsHelpFormatter
from pathlib import Path

import onnx

warnings.filterwarnings("ignore")

try:
    from rich import print
    from rich.logging import RichHandler

    logging.basicConfig(
        level="INFO",
        format="%(message)s",
        datefmt="[%X]",
        handlers=[
            RichHandler(
                rich_tracebacks=False,
                show_path=False,
            )
        ],
    )

except ImportError:
    logging.basicConfig(
        level="INFO",
        format="%(asctime)s\t%(levelname)s\t%(message)s",
        datefmt="[%X]",
    )


def parse_args():
    parser = argparse.ArgumentParser(
        description="Tool for converting YOLO models to the blob format used by OAK",
        formatter_class=ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument(
        "-m",
        "-i",
        "-w",
        "--input_model",
        type=Path,
        required=True,
        help="Path to ONNX .onnx file",
    )
    parser.add_argument(
        "-v",
        "--version",
        type=str,
        choices=["yolox", "yolov5", "yolov6", "yolov7", "yolov8", "ppyoloe"],
        default="yolov5",
        help="YOLO version",
    )
    parser.add_argument(
        "-n",
        "--name",
        type=str,
        help="The name of the model to be saved, "
        + "none means using the same name as the input model",
    )
    parser.add_argument(
        "-o",
        "--output_dir",
        type=Path,
        help="Directory for saving files, "
        + "none means using the same path as the input model",
    )
    parser.add_argument(
        "-b",
        "--blob",
        action="store_true",
        help="turn on OAK Blob export",
    )
    parser.add_argument(
        "-s",
        "--spatial_detection",
        action="store_true",
        help="Inference with depth information",
    )
    parser.add_argument(
        "-sh",
        "--shaves",
        type=int,
        choices=range(1, 17),
        help="Specifies number of SHAVE cores that converted model will use",
    )
    parser.add_argument(
        "-t",
        "--convert_tool",
        type=str,
        help="Which tool is used to convert, "
        + "docker: should already have docker (https://docs.docker.com/get-docker/) "
        + "and docker-py (pip install docker) installed; "
        + "blobconverter: uses an online server to convert the model "
        + "and should already have blobconverter (pip install blobconverter); "
        + "local: use openvino-dev (pip install openvino-dev) "
        + "and openvino 2022.1 ( https://docs.oakchina.cn/en/latest/pages/"
        + "Advanced/Neural_networks/local_convert_openvino.html#id2 ) to convert",
        default="blobconverter",
        choices=["docker", "blobconverter", "local"],
    )

    args = parser.parse_args()
    args.input_model = args.input_model.resolve().absolute()
    if args.name is None:
        args.name = args.input_model.stem

    if args.output_dir is None:
        args.output_dir = args.input_model.parent

    if args.shaves is None:
        args.shaves = 5 if args.spatial_detection else 6

    return args


def modify_yolox(input_model, output_model):
    t = time.time()

    logging.info("Start to modify yolox with onnx %s..." % onnx.__version__)

    onnx_model = onnx.load(input_model)

    N, C, H, W = [
        dim.dim_value for dim in onnx_model.graph.input[0].type.tensor_type.shape.dim
    ]
    removed_outputs = [n for n in onnx_model.graph.output]
    xyhw_conf_classes = int(
        removed_outputs[0].type.tensor_type.shape.dim[1].dim_value + 5
    )
    logging.info("remove old outputs")
    for n in removed_outputs:
        onnx_model.graph.output.remove(n)

    logging.info("get the node to be modify:")

    cls_preds = []
    reg_preds = []
    obj_preds = []
    for i, n in enumerate(onnx_model.graph.node):
        if "multi_level_conv_cls" in n.name:
            cls_preds.append(i)
        elif "multi_level_conv_reg" in n.name:
            reg_preds.append(i)
        elif "multi_level_conv_obj" in n.name:
            obj_preds.append(i)

    logging.info(f"{cls_preds, reg_preds, obj_preds = }")

    num = len(cls_preds)

    for i, (cls, reg, obj) in enumerate(zip(cls_preds, reg_preds, obj_preds)):
        if num == 2:
            H_ = int(H / 2 ** (i + 4))
            W_ = int(W / 2 ** (i + 4))
        elif num == 3:
            H_ = int(H / 2 ** (i + 3))
            W_ = int(W / 2 ** (i + 3))

        sigmoid_cls = onnx.helper.make_node(
            "Sigmoid",
            inputs=[onnx_model.graph.node[cls].output[0]],
            outputs=[f"Sigmoid_{cls}"],
        )
        onnx_model.graph.node.append(sigmoid_cls)

        sigmoid_obj = onnx.helper.make_node(
            "Sigmoid",
            inputs=[onnx_model.graph.node[obj].output[0]],
            outputs=[f"Sigmoid_{obj}"],
        )
        onnx_model.graph.node.append(sigmoid_obj)

        concat = onnx.helper.make_node(
            "Concat",
            inputs=[
                onnx_model.graph.node[reg].output[0],
                f"Sigmoid_{obj}",
                f"Sigmoid_{cls}",
            ],
            outputs=[f"output{i+1}_yolov6"],
            axis=1,
        )
        onnx_model.graph.node.append(concat)

        new_output = onnx.helper.make_tensor_value_info(
            f"output{i+1}_yolov6",
            onnx.TensorProto.FLOAT,
            [N, xyhw_conf_classes, H_, W_],
        )
        onnx_model.graph.output.extend([new_output])

    onnx.save(onnx_model, output_model)

    logging.info("Modify complete (%.2fs).\n" % (time.time() - t))


def modify_yolov5(input_model, output_model):
    t = time.time()

    logging.info("Start to modify yolov5  with onnx %s..." % onnx.__version__)

    onnx_model = onnx.load(input_model)

    N, C, H, W = [
        dim.dim_value for dim in onnx_model.graph.input[0].type.tensor_type.shape.dim
    ]
    removed_outputs = [n for n in onnx_model.graph.output]
    xyhw_conf_classes = int(
        removed_outputs[0].type.tensor_type.shape.dim[1].dim_value + 5
    )
    logging.info("remove old outputs")
    for n in removed_outputs:
        onnx_model.graph.output.remove(n)

    logging.info("get the node to be modify:")

    convs_preds = []
    for i, n in enumerate(onnx_model.graph.node):
        if "convs_pred" in n.name:
            convs_preds.append(i)

    logging.info(f"{convs_preds = }")

    num = len(convs_preds)

    for i, cls in enumerate(convs_preds):
        if num == 2:
            H_ = int(H / 2 ** (i + 4))
            W_ = int(W / 2 ** (i + 4))
        elif num == 3:
            H_ = int(H / 2 ** (i + 3))
            W_ = int(W / 2 ** (i + 3))

        sigmoid = onnx.helper.make_node(
            "Sigmoid",
            inputs=[onnx_model.graph.node[cls].output[0]],
            outputs=[f"output{i+1}_yolov5"],
        )
        onnx_model.graph.node.append(sigmoid)

        new_output = onnx.helper.make_tensor_value_info(
            f"output{i+1}_yolov5",
            onnx.TensorProto.FLOAT,
            [N, xyhw_conf_classes, H_, W_],
        )
        onnx_model.graph.output.extend([new_output])

    onnx.save(onnx_model, output_model)

    logging.info("Modify complete (%.2fs).\n" % (time.time() - t))


def modify_yolov6(input_model, output_model):
    t = time.time()

    logging.info("Start to modify yolov6 with onnx %s..." % onnx.__version__)

    onnx_model = onnx.load(input_model)

    N, C, H, W = [
        dim.dim_value for dim in onnx_model.graph.input[0].type.tensor_type.shape.dim
    ]
    removed_outputs = [n for n in onnx_model.graph.output]
    xyhw_conf_classes = int(
        removed_outputs[0].type.tensor_type.shape.dim[1].dim_value + 5
    )
    logging.info("remove old outputs")
    for n in removed_outputs:
        onnx_model.graph.output.remove(n)

    logging.info("get the node to be modify:")

    cls_preds = []
    for i, n in enumerate(onnx_model.graph.node):
        if "cls_preds" in n.name:
            cls_preds.append(i)

    logging.info(f"{cls_preds = }")

    num = len(cls_preds)

    for i, cls in enumerate(cls_preds):
        if num == 2:
            H_ = int(H / 2 ** (i + 4))
            W_ = int(W / 2 ** (i + 4))
        elif num == 3:
            H_ = int(H / 2 ** (i + 3))
            W_ = int(W / 2 ** (i + 3))

        sigmoid = onnx.helper.make_node(
            "Sigmoid",
            inputs=[onnx_model.graph.node[cls].output[0]],
            outputs=[f"Sigmoid_{cls}"],
        )
        onnx_model.graph.node.append(sigmoid)

        reduceMax = onnx.helper.make_node(
            "ReduceMax",
            inputs=[f"Sigmoid_{cls}"],
            outputs=[f"ReduceMax_{cls}"],
            keepdims=1,
            axes=[1],
        )
        onnx_model.graph.node.append(reduceMax)

        concat = onnx.helper.make_node(
            "Concat",
            inputs=[
                onnx_model.graph.node[cls + 1].output[0],
                f"ReduceMax_{cls}",
                f"Sigmoid_{cls}",
            ],
            outputs=[f"output{i+1}_yolov6r2"],
            axis=1,
        )
        onnx_model.graph.node.append(concat)

        new_output = onnx.helper.make_tensor_value_info(
            f"output{i+1}_yolov6r2",
            onnx.TensorProto.FLOAT,
            [N, xyhw_conf_classes, H_, W_],
        )
        onnx_model.graph.output.extend([new_output])

    onnx.save(onnx_model, output_model)

    logging.info("Modify complete (%.2fs).\n" % (time.time() - t))


def modify_yolov7(input_model, output_model):
    t = time.time()

    logging.info("Start to modify yolov7  with onnx %s..." % onnx.__version__)

    onnx_model = onnx.load(input_model)

    N, C, H, W = [
        dim.dim_value for dim in onnx_model.graph.input[0].type.tensor_type.shape.dim
    ]
    removed_outputs = [n for n in onnx_model.graph.output]
    xyhw_conf_classes = int(
        removed_outputs[0].type.tensor_type.shape.dim[1].dim_value + 5
    )
    logging.info("remove old outputs")
    for n in removed_outputs:
        onnx_model.graph.output.remove(n)

    logging.info("get the node to be modify:")

    convs_preds = []
    for i, n in enumerate(onnx_model.graph.node):
        if "convs_pred" in n.name:
            convs_preds.append(i)

    logging.info(f"{convs_preds = }")

    num = len(convs_preds)

    for i, cls in enumerate(convs_preds):
        if num == 2:
            H_ = int(H / 2 ** (i + 4))
            W_ = int(W / 2 ** (i + 4))
        elif num == 3:
            H_ = int(H / 2 ** (i + 3))
            W_ = int(W / 2 ** (i + 3))

        sigmoid = onnx.helper.make_node(
            "Sigmoid",
            inputs=[onnx_model.graph.node[cls].output[0]],
            outputs=[f"output{i+1}_yolov5"],
        )
        onnx_model.graph.node.append(sigmoid)

        new_output = onnx.helper.make_tensor_value_info(
            f"output{i+1}_yolov7",
            onnx.TensorProto.FLOAT,
            [N, xyhw_conf_classes, H_, W_],
        )
        onnx_model.graph.output.extend([new_output])

    onnx.save(onnx_model, output_model)

    logging.info("Modify complete (%.2fs).\n" % (time.time() - t))


def modify_yolov8(input_model, output_model):
    t = time.time()

    logging.info("Start to modify yolov8 with onnx %s..." % onnx.__version__)

    onnx_model = onnx.load(input_model)

    N, C, H, W = [
        dim.dim_value for dim in onnx_model.graph.input[0].type.tensor_type.shape.dim
    ]
    removed_outputs = [n for n in onnx_model.graph.output]
    xyhw_conf_classes = int(
        removed_outputs[0].type.tensor_type.shape.dim[1].dim_value + 5
    )
    logging.info("remove old outputs")
    for n in removed_outputs:
        onnx_model.graph.output.remove(n)

    logging.info("get the node to be modify:")

    cls_preds = []
    reg_preds = []
    for i, n in enumerate(onnx_model.graph.node):
        if "cls_preds" in n.name:
            if "2/Conv" in n.name:
                cls_preds.append(i)
        elif "reg_preds" in n.name:
            if "2/Conv" in n.name:
                reg_preds.append(i + 6)

    logging.info(f"{cls_preds, reg_preds = }")

    num = len(cls_preds)

    for i, (cls, reg) in enumerate(zip(cls_preds, reg_preds)):
        if num == 2:
            H_ = int(H / 2 ** (i + 4))
            W_ = int(W / 2 ** (i + 4))
        elif num == 3:
            H_ = int(H / 2 ** (i + 3))
            W_ = int(W / 2 ** (i + 3))

        sigmoid = onnx.helper.make_node(
            "Sigmoid",
            inputs=[onnx_model.graph.node[cls].output[0]],
            outputs=[f"Sigmoid_{cls}"],
        )
        onnx_model.graph.node.append(sigmoid)

        reduceMax = onnx.helper.make_node(
            "ReduceMax",
            inputs=[f"Sigmoid_{cls}"],
            outputs=[f"ReduceMax_{cls}"],
            keepdims=1,
            axes=[1],
        )
        onnx_model.graph.node.append(reduceMax)

        concat = onnx.helper.make_node(
            "Concat",
            inputs=[
                onnx_model.graph.node[reg].output[0],
                f"ReduceMax_{cls}",
                f"Sigmoid_{cls}",
            ],
            outputs=[f"output{i+1}_yolov6r2"],
            axis=1,
        )
        onnx_model.graph.node.append(concat)

        new_output = onnx.helper.make_tensor_value_info(
            f"output{i+1}_yolov6r2",
            onnx.TensorProto.FLOAT,
            [N, xyhw_conf_classes, H_, W_],
        )
        onnx_model.graph.output.extend([new_output])

    onnx.save(onnx_model, output_model)

    logging.info("Modify complete (%.2fs).\n" % (time.time() - t))


def modify_ppyoloe(input_model, output_model):
    t = time.time()

    logging.info("Start to modify yolov8 with onnx %s..." % onnx.__version__)

    onnx_model = onnx.load(input_model)

    N, C, H, W = [
        dim.dim_value for dim in onnx_model.graph.input[0].type.tensor_type.shape.dim
    ]
    removed_outputs = [n for n in onnx_model.graph.output]
    xyhw_conf_classes = int(
        removed_outputs[0].type.tensor_type.shape.dim[1].dim_value + 5
    )
    logging.info("remove old outputs")
    for n in removed_outputs:
        onnx_model.graph.output.remove(n)

    logging.info("get the node to be modify:")

    cls_preds = []
    reg_preds = []
    for i, n in enumerate(onnx_model.graph.node):
        if "cls_preds" in n.name:
            cls_preds.append(i)
        elif "reg_preds" in n.name:
            reg_preds.append(i + 4)

    logging.info(f"{cls_preds, reg_preds = }")

    num = len(cls_preds)
    for i, (cls, reg) in enumerate(zip(cls_preds, reg_preds)):
        if num == 2:
            H_ = int(H / 2 ** (i + 4))
            W_ = int(W / 2 ** (i + 4))
        elif num == 3:
            H_ = int(H / 2 ** (i + 3))
            W_ = int(W / 2 ** (i + 3))

        sigmoid = onnx.helper.make_node(
            "Sigmoid",
            inputs=[onnx_model.graph.node[cls].output[0]],
            outputs=[f"Sigmoid_{cls}"],
        )
        onnx_model.graph.node.append(sigmoid)

        reduceMax = onnx.helper.make_node(
            "ReduceMax",
            inputs=[f"Sigmoid_{cls}"],
            outputs=[f"ReduceMax_{cls}"],
            keepdims=1,
            axes=[1],
        )
        onnx_model.graph.node.append(reduceMax)

        reshape_shape = onnx.helper.make_tensor(
            f"reshape_shape_{reg}", onnx.TensorProto.INT64, dims=[3], vals=[1, -1, 4]
        )
        onnx_model.graph.initializer.append(reshape_shape)

        reshape = onnx.helper.make_node(
            "Reshape",
            inputs=[onnx_model.graph.node[reg].output[0], f"reshape_shape_{reg}"],
            outputs=[f"Reshape_{reg}"],
            allowzero=0,
        )
        onnx_model.graph.node.append(reshape)

        transpose = onnx.helper.make_node(
            "Transpose",
            inputs=[f"Reshape_{reg}"],
            outputs=[f"Transpose_{reg}"],
            perm=[0, 2, 1],
        )
        onnx_model.graph.node.append(transpose)

        reshape_shape2 = onnx.helper.make_tensor(
            f"reshape_shape2_{reg}",
            onnx.TensorProto.INT64,
            dims=[4],
            vals=[1, 4, H_, W_],
        )
        onnx_model.graph.initializer.append(reshape_shape2)

        reshape2 = onnx.helper.make_node(
            "Reshape",
            inputs=[f"Transpose_{reg}", f"reshape_shape2_{reg}"],
            outputs=[f"Reshape_{reg}_2"],
            allowzero=0,
        )

        onnx_model.graph.node.append(reshape2)

        concat = onnx.helper.make_node(
            "Concat",
            inputs=[
                f"Reshape_{reg}_2",
                f"ReduceMax_{cls}",
                f"Sigmoid_{cls}",
            ],
            outputs=[f"output{i+1}_yolov6r2"],
            axis=1,
        )
        onnx_model.graph.node.append(concat)

        new_output = onnx.helper.make_tensor_value_info(
            f"output{i+1}_yolov6r2",
            onnx.TensorProto.FLOAT,
            [N, xyhw_conf_classes, H_, W_],
        )
        onnx_model.graph.output.extend([new_output])

    onnx.save(onnx_model, output_model)

    logging.info("Modify complete (%.2fs).\n" % (time.time() - t))


def convert(convert_tool, output_model, shaves, output_dir, name, **kwargs):
    t = time.time()

    export_dir: Path = output_dir.joinpath(name + "_openvino")
    export_dir.mkdir(parents=True, exist_ok=True)

    export_xml = export_dir.joinpath(name + ".xml")
    export_blob = export_dir.joinpath(name + ".blob")

    if convert_tool == "blobconverter":
        from zipfile import ZIP_LZMA, ZipFile

        import blobconverter

        blob_path = blobconverter.from_onnx(
            model=str(output_model),
            data_type="FP16",
            shaves=shaves,
            use_cache=False,
            version="2022.1",
            output_dir=export_dir,
            optimizer_params=[
                "--scale=255",
                "--reverse_input_channel",
                "--use_new_frontend",
            ],
            download_ir=True,
        )

        with ZipFile(blob_path, "r", ZIP_LZMA) as zip_obj:
            for name in zip_obj.namelist():
                zip_obj.extract(
                    name,
                    output_dir,
                )
        blob_path.unlink()
    elif convert_tool == "docker":
        import docker

        export_dir = Path("/io").joinpath(export_dir.name)
        export_xml = export_dir.joinpath(name + ".xml")
        export_blob = export_dir.joinpath(name + ".blob")

        client = docker.from_env()
        image = client.images.pull("openvino/ubuntu20_dev", tag="2022.1.0")
        docker_output = client.containers.run(
            image=image.tags[0],
            command='bash -c "mo -m '
            + f"{name}.onnx -n {name} -o {export_dir} "
            + "--static_shape --reverse_input_channels --scale=255 --use_new_frontend "
            + "&& echo 'MYRIAD_ENABLE_MX_BOOT NO' | tee /tmp/myriad.conf >> /dev/null "
            + "&& /opt/intel/openvino/tools/compile_tool/compile_tool -m "
            + f"{export_xml} -o {export_blob} -ip U8 "
            + f"-VPU_NUMBER_OF_SHAVES {shaves} -VPU_NUMBER_OF_CMX_SLICES {shaves} "
            + '-d MYRIAD -c /tmp/myriad.conf"',
            remove=True,
            volumes=[
                f"{output_dir}:/io",
            ],
            working_dir="/io",
        )
        logging.info(docker_output.decode("utf8"))
    else:
        import subprocess as sp

        # OpenVINO export
        logging.info("Starting to export OpenVINO...")
        OpenVINO_cmd = (
            f"mo --input_model {output_model} --output_dir {export_dir} "
            + "--data_type FP16 --scale 255 --reverse_input_channel"
        )
        try:
            sp.check_output(OpenVINO_cmd, shell=True)
            logging.info("OpenVINO export success, saved as %s" % export_dir)
        except sp.CalledProcessError:
            logging.exception("")
            logging.warning("OpenVINO export failure!")
            logging.warning(
                "By the way, you can try to export OpenVINO use:\n\t%s" % OpenVINO_cmd
            )

        # OAK Blob export
        logging.info("Then you can try to export blob use:")
        blob_cmd = (
            "echo 'MYRIAD_ENABLE_MX_BOOT ON' | tee /tmp/myriad.conf\n"
            + f"compile_tool -m {export_xml} -o {export_blob} "
            + "-ip U8 -d MYRIAD "
            + f"-VPU_NUMBER_OF_SHAVES {shaves} -VPU_NUMBER_OF_CMX_SLICES {shaves} "
            + "-c /tmp/myriad.conf"
        )
        logging.info("%s" % blob_cmd)

        logging.info(
            "compile_tool maybe in the path: "
            + "/opt/intel/openvino/tools/compile_tool/compile_tool, "
            + "if you install openvino 2022.1 with apt"
        )

    logging.info("Convert complete (%.2fs).\n" % (time.time() - t))


if __name__ == "__main__":
    modify_onnx = {
        "yolox": modify_yolox,
        "yolov5": modify_yolov5,
        "yolov6": modify_yolov6,
        "yolov7": modify_yolov7,
        "yolov8": modify_yolov8,
        "ppyoloe": modify_ppyoloe,
    }

    args = parse_args()
    logging.info(args)
    print()
    output_model = args.output_dir / (args.name + ".onnx")
    modify_onnx[args.version](input_model=args.input_model, output_model=output_model)
    if args.blob:
        convert(output_model=output_model, **vars(args))

可以使用 Netron 查看模型结构

▌转换

openvino 本地转换

onnx -> openvino

mo 是 openvino_dev 2022.1 中脚本,
安装命令为 pip install openvino-dev

mo --input_model ppyoloe_plus_s_fast_8xb8-80e_coco.onnx  --scale 255 --reverse_input_channel
openvino -> blob
<path>/compile_tool -m ppyoloe_plus_s_fast_8xb8-80e_coco.xml \
-ip U8 -d MYRIAD \
-VPU_NUMBER_OF_SHAVES 6 \
-VPU_NUMBER_OF_CMX_SLICES 6

在线转换

blobconvert 网页

  • 进入网页,按下图指示操作:
    OAK相机如何将 MMYOLO 内模型转换成blob格式?_第1张图片
  • 修改参数,转换模型:
    OAK相机如何将 MMYOLO 内模型转换成blob格式?_第2张图片
    1. 选择 onnx 模型
    2. 修改 optimizer_params--data_type=FP16 --scale 255 --reverse_input_channel
    3. 修改 shaves6
    4. 转换
blobconverter python 代码
blobconverter.from_onnx(
            "ppyoloe_plus_s_fast_8xb8-80e_coco.onnx",	
            optimizer_params=[
                " --scale 255",
                "--reverse_input_channel",
            ],
            shaves=6,
        )
blobconvert cli
blobconverter --onnx ppyoloe_plus_s_fast_8xb8-80e_coco.onnx -sh 6 -o . --optimizer-params "scale=255 --reverse_input_channel"

▌DepthAI 示例

正确解码需要可配置的网络相关参数:

使用 export_yolo.py 转换模型时会将相关参数写入 json 文件中,可根据 json 文件中数据添加下列参数

  • setNumClasses – YOLO 检测类别的数量
  • setIouThreshold – iou 阈值
  • setConfidenceThreshold – 置信度阈值,低于该阈值的对象将被过滤掉
  • setAnchors – yolo 锚点
  • setAnchorMasks – 锚掩码

相关示例可参考

  • yoloX请查看
  • yolov5请查看
  • yolov6请查看
  • yolov7请查看
  • yolov8请查看
  • yolov5lite请查看
# coding=utf-8
import cv2
import depthai as dai
import numpy as np

numClasses = 80
model = dai.OpenVINO.Blob("ppyoloe_plus_s_fast_8xb8-80e_coco.blob")
dim = next(iter(model.networkInputs.values())).dims
W, H = dim[:2]

output_name, output_tenser = next(iter(model.networkOutputs.items()))
if "yolov6" in output_name:
    numClasses = output_tenser.dims[2] - 5
else:
    numClasses = output_tenser.dims[2] // 3 - 5

labelMap = [
    # "class_1","class_2","..."
    "class_%s" % i
    for i in range(numClasses)
]

# Create pipeline
pipeline = dai.Pipeline()

# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
detectionNetwork = pipeline.create(dai.node.YoloDetectionNetwork)
xoutRgb = pipeline.create(dai.node.XLinkOut)
xoutNN = pipeline.create(dai.node.XLinkOut)

xoutRgb.setStreamName("image")
xoutNN.setStreamName("nn")

# Properties
camRgb.setPreviewSize(W, H)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)

# Network specific settings
detectionNetwork.setBlob(model)
detectionNetwork.setConfidenceThreshold(0.5)

# Yolo specific parameters
detectionNetwork.setNumClasses(numClasses)
detectionNetwork.setCoordinateSize(4)
detectionNetwork.setAnchors([])
detectionNetwork.setAnchorMasks({})
detectionNetwork.setIouThreshold(0.5)

# Linking
camRgb.preview.link(detectionNetwork.input)
camRgb.preview.link(xoutRgb.input)
detectionNetwork.out.link(xoutNN.input)

# Connect to device and start pipeline
with dai.Device(pipeline) as device:
    # Output queues will be used to get the rgb frames and nn data from the outputs defined above
    imageQueue = device.getOutputQueue(name="image", maxSize=4, blocking=False)
    detectQueue = device.getOutputQueue(name="nn", maxSize=4, blocking=False)

    frame = None
    detections = []

    # nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height
    def frameNorm(frame, bbox):
        normVals = np.full(len(bbox), frame.shape[0])
        normVals[::2] = frame.shape[1]
        return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int)

    def drawText(frame, text, org, color=(255, 255, 255), thickness=1):
        cv2.putText(
            frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), thickness + 3, cv2.LINE_AA
        )
        cv2.putText(
            frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness, cv2.LINE_AA
        )

    def drawRect(frame, topLeft, bottomRight, color=(255, 255, 255), thickness=1):
        cv2.rectangle(frame, topLeft, bottomRight, (0, 0, 0), thickness + 3)
        cv2.rectangle(frame, topLeft, bottomRight, color, thickness)

    def displayFrame(name, frame):
        color = (128, 128, 128)
        for detection in detections:
            bbox = frameNorm(
                frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax)
            )
            drawText(
                frame=frame,
                text=labelMap[detection.label],
                org=(bbox[0] + 10, bbox[1] + 20),
            )
            drawText(
                frame=frame,
                text=f"{detection.confidence:.2%}",
                org=(bbox[0] + 10, bbox[1] + 35),
            )
            drawRect(
                frame=frame,
                topLeft=(bbox[0], bbox[1]),
                bottomRight=(bbox[2], bbox[3]),
                color=color,
            )
        # Show the frame
        cv2.imshow(name, frame)

    while True:
        imageQueueData = imageQueue.tryGet()
        detectQueueData = detectQueue.tryGet()

        if imageQueueData is not None:
            frame = imageQueueData.getCvFrame()

        if detectQueueData is not None:
            detections = detectQueueData.detections

        if frame is not None:
            displayFrame("rgb", frame)

        if cv2.waitKey(1) == ord("q"):
            break

▌参考资料

https://docs.oakchina.cn/en/latest/
https://www.oakchina.cn/selection-guide/


OAK中国
| OpenCV AI Kit在中国区的官方代理商和技术服务商
| 追踪AI技术和产品新动态

戳「+关注」获取最新资讯↗↗

你可能感兴趣的:(OAK深度相机使用教程,计算机视觉,深度学习,python,OAK相机,depthai)