keras,mxnet ,pytorch,onnx测试代码 ,to onnx 代码, caffe 测试

1、keras读取一张图片测试,转换onnx

import numpy as np
from keras.preprocessing import image
import keras2onnx
import onnxruntime
import cv2
from PIL import Image
def keras_onnx_test():
    img_path = './data/vehicle_0002.jpg'
    img = cv2.imread(img_path)
    if img is None:
        exit(-1)
    # preprocess
    pil_img = Image.fromarray(np.uint8(img))
    np_pil_img = np.array(pil_img)    

    size=224
    img = cv2.resize(np_pil_img, (size, size), interpolation=cv2.INTER_CUBIC)
    data = np.expand_dims(img, axis=0)
    data = data / 255.
    data = data.astype(np.float32)
    
    import onnxruntime as rt
    sess=rt.InferenceSession("./model.onnx")#model_path就是模型的地址
    input_name=sess.get_inputs()[0].name
    res=sess.run(None,{input_name:data})#data就是模型的输入float32,res为模型的输出
    ind = np.argmax(res)
    print(ind)
转换onnx
#coding:utf-8
import win_unicode_console           #导入模块win_unicode_console
win_unicode_console.enable()          #使能win_unicode_console
import os                            #由于keras需要使用tensorflow的后端,故需先于keras导入os
import keras                         #导入keras
import keras2onnx                    #导入转换工具模块keras2onnx
import onnx                          #导入onnx
from keras.models import load_model                       #从模块keras.model中导入函数load_model,用于下载待转换模型
model = load_model('./yolo.h5')    #调用load_model下载待转换模型yolo.h5,传送给model
onnx_model = keras2onnx.convert_keras(model, model.name)   #调用模型转换函数keras2onnx.convert_keras,生成onnx模型,传送给onnx_model
temp_model_file = 'E:/jupyter_notebook/keras/yolo.onnx'    #指定onnx模型保存路径并命名为yolo.onnx
onnx.save_model(onnx_model, temp_model_file)               #保存模型

2、mxnet 测试,mxnet,对应的onnx对应的caffe,测试

#!/usr/bin/env python
# -*- coding=utf-8 -*-
import sys
sys.path.insert(0, "/home/shiyy/nas/NVCaffe/python")
import caffe
import onnx
import numpy as np
import caffe2.python.onnx.backend as onnx_caffe2_backend

import mxnet.contrib.onnx as onnx_mxnet
import mxnet as mx
from mxnet import gluon,nd




def mxnet_get_model(ctx, image_size, model_str, layer):
  _vec = model_str.split(',')
  assert len(_vec)==2
  prefix = _vec[0]
  epoch = int(_vec[1])
  print('loading',prefix, epoch)
  sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
  all_layers = sym.get_internals()
  # print(all_layers)
  sym = all_layers[layer+'_output']
  model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)
  model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
  model.set_params(arg_params, aux_params)
  model.aux_params = aux_params
  model.arg_params = arg_params
  return model

def mxnet_get_feature(img , mx_model, layer,imgsize):
  # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
  # img = np.transpose(img, (2, 0, 1))
  ctx = mx.cpu(0)#mx.gpu(0)
  image_size = (int(imgsize), int(imgsize))
  model = mxnet_get_model(ctx, image_size, mx_model, layer)
  # input_blob = np.expand_dims(img, axis=0)
  # data = mx.nd.array(input_blob)
  data = img
  db = mx.io.DataBatch(data=(data,))
  model.forward(db, is_train=False)
  embedding = model.get_outputs()[0].asnumpy()
  return embedding

def test_mx_onnx():
    onnx_path="./11.onnx"
    sym, arg_params, aux_params = onnx_mxnet.import_model(onnx_path)
    model_metadata = onnx_mxnet.get_model_metadata(onnx_path)
    # obtain the data names of the inputs to the model by using the model metadata API:
    # get in out name
    print(model_metadata)
    data_names = [inputs[0] for inputs in model_metadata.get('input_tensor_data')]
    print("input variable:",data_names) #input_tensor_data': [(u'data', (1L, 3L, 224L, 224L))]},input is data
    ctx = mx.cpu()
    
    imgsize = 224
    data_names = [graph_input for graph_input in sym.list_inputs()
                  if graph_input not in arg_params and graph_input not in aux_params]
    print(data_names)#['data']
    onnx_mod = mx.mod.Module(symbol=sym, data_names=['data'], context=ctx, label_names=None)
    batch = nd.array(nd.random.randn(1,3,imgsize,imgsize),ctx=ctx).astype(np.float32)
    onnx_mod.bind(for_training=False, data_shapes=[(data_names[0], batch.shape)], label_shapes=None)
    onnx_mod.set_params(arg_params=arg_params, aux_params=aux_params, allow_missing=True, allow_extra=True)
    from collections import namedtuple
    Batch=namedtuple("Batch",["data"])
    onnx_mod.forward(Batch([batch]))#[bchw]
    results = []
    out = onnx_mod.get_outputs()
    results.extend([o for o in out[0].asnumpy()])


    #####mxnet out
    mxnet_model_epoch = './resnet18,0'
    mxnet_feature = mxnet_get_feature(batch, mxnet_model_epoch, 'flatten0',imgsize)


    ####caffe out
    caffe_model = caffe.Net("./mynet.prototxt", "./mynet.caffemodel", caffe.TEST)
    # reshape network inputs
    blobs = {}
    blobs["data"] = batch.asnumpy()
    caffe_model.blobs["data"].reshape(*blobs["data"].shape)
    # do forward
    forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
    output_blobs = caffe_model.forward_all(**forward_kwargs)
    caffe_out = output_blobs["flatten0"].flatten()


    # print results[0][0:10]
    # print ("mx onnx model out lenght",len(results[0]))
    # print "\n ######################################## \n"


    print mxnet_feature[0][0:10]
    print ("mxnet model out lenght",len(mxnet_feature[0]))
    print "\n ######################################## \n"



    print caffe_out[0:10]
    print ("to caffe model length",len(caffe_out))
    print "\n ######################################## \n"
    np.testing.assert_almost_equal(results[0],caffe_out, decimal=5)
    print("Exported model has been executed decimal=5 and the result looks good!")

test_mx_onnx()
单独mxnet转换的,onnx ,测试接口
import mxnet.contrib.onnx as onnx_mxnet
import mxnet
sym, arg, aux = onnx_mxnet.import_model(model_path)#onnx模型地址
data_names = [graph_input for graph_input in sym.list_inputs()
                      if graph_input not in arg and graph_input not in aux]
print(data_names)#得到输入的名字
mod = mx.mod.Module(symbol=sym, data_names=data_names, context=mx.cpu(), label_names=None)
mod.bind(for_training=False, data_shapes=[(data_names[0],data.shape)], label_shapes=None)#data是模型的输入
mod.set_params(arg_params=arg, aux_params=aux, allow_missing=True, allow_extra=True)
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
mod.forward(Batch([mx.nd.array(data)]))
output = mod.get_outputs()
mxnet转onnx
def mxnet_onnx():
    ###########################################
    input_shape = (1, 3, 224, 224)
    sym = 'resnet18-symbol.json'
    params = 'resnet18-0000.params'
    #######################################
    onnx_file="./mynet.onnx"  #save modelname
    print ("************************")
    # 调用导出模型API。它返回转换后的onnx模型的路径

    converted_model_path = onnx_mxnet.export_model(sym,
        params,
        [input_shape],
        np.float32,
        onnx_file,
        verbose=True  #print node information   data ,Output node is: softmax
    )

    print ("succed to onnx")  ##muxt onnx==1.3.0
    # # Load onnx model
    model_proto = onnx.load_model(converted_model_path)
    # Check if converted ONNX protobuf is valid
    checker.check_graph(model_proto.graph)
    print ("onnx check succed")


    model = onnx.load(r'mynet.onnx')
    for node in model.graph.node:
        if (node.op_type == "BatchNormalization"):
            for attr in node.attribute:
                if (attr.name == "spatial"):  #0 to 1
                    attr.i = 1  ## use to onnxruntime , not to effect output
    onnx.save(model, r'mynet.onnx')
    print ("mxnet onnx modify spatial==0 to 1, resave ok")

3、pytorch onnx ,test

import onnx
import torch
import numpy as np
input = torch.randn(32, 3, 300, 300, requires_grad=False, dtype=torch.float)
def onnx(input):
    import onnx
    onnx_model = onnx.load("./graph_sim.onnx")
    prepared_backend = onnx_caffe2_backend.prepare(onnx_model)
    W = {onnx_model.graph.input[0].name: input.data.numpy()}
    c2_out = prepared_backend.run(W)["576"].flatten()
    return c2_out
def pytorch_out(input):
    import torchvision
    model = torchvision.models.vgg16(pretrained=True)
    model.eval()###must to write
    input = input
    torch.no_grad()
    output = model(input)
    # print output[0].flatten()[70:80]
    return output    
pytorch to onnx
def torch_onnx():
    dummy_input = torch.randn(1, 3, 224, 224, dtype=torch.float)
    from efficientnet_pytorch import EfficientNet
    model = EfficientNet.from_name('efficientnet-b0')
    #model = torchvision.models.resnet18(pretrained=True)
    for k,v in model.state_dict().items():
        print (k)
    input_names = ["data"] + ["mynet_%d" % i for i in range(32)] #use to tensorrt
    output_names = ["fc"]
    torch.onnx.export(
        model,
        dummy_input,
        "./mynet.onnx",
        verbose=True,
        input_names=input_names,
        output_names=output_names,
    )
    print ("onnx convert succed")
import torch
torch_model = torch.load("save.pt") # pytorch模型加载
batch_size = 1  #批处理大小
input_shape = (3,244,244)   #输入数据

x = torch.randn(batch_size,*input_shape)		# 生成张量
export_onnx_file = "test.onnx"					# 目的ONNX文件名
torch.onnx.export(torch_model,
                    x,
                    export_onnx_file,
                    opset_version=10,  #onnx 有各种版本问题不兼容,这是指定版本,可以不要
                    do_constant_folding=True,	# 是否执行常量折叠优化
                    input_names=["input"],		# 输入名
                    output_names=["output"],	# 输出名
                    dynamic_axes={"input":{0:"batch_size"},		# 批处理变量
                                    "output":{0:"batch_size"}})
# 注:dynamic_axes字段用于批处理.若不想支持批处理或固定批处理大小,移除dynamic_axes字段即可.                                    

4、caffe 图片测试

import sys
sys.path.insert(0,"/home/caffe/python")
net = caffe.Net(model_file, pretrained_file, caffe.TEST)

img=cv2.imread("path")
img = cv2.resize(img,(640,640))
mean=(104, 117, 123)
img -= (mean)  #
img.transpose(2, 0, 1)  # hwc  to chw
new_shape = [1, img.shape[0], img.shape[1], img.shape[2]]  # chw  to nchw
img = img.reshape(new_shape)
net.blobs[input_name].data[...] = img
net.forward()

out = net.blobs["fc"].data

你可能感兴趣的:(keras,mxnet ,pytorch,onnx测试代码 ,to onnx 代码, caffe 测试)