C++调用Tensorflow和Pytorch模型

C++调用Tensorflow和Pytorch模型

本文主要介绍在Tensorflow和pytorch下使用C++调用Python端训练的模型,进行预测.

本机配置及开发环境

  • Ubuntu 16.04
  •  RTX 2080Ti
  • Tensorflow 1.14.0 
  • python 3.5 
  • cuda 10.0 
  • cudnn 7.4.2 

C++调用tensorflow训练模型

  • 模型转换成pb文件

    • 在训练时可以直接保存成.pb格式的模型,当遇到保存成.h5格式的模型,需要转换成.pb格式
    • .h5转换成.pb格式代码,转换时一定要记住input和output端口的名字,以用来后续的验证
      from keras.models import load_model
      import tensorflow as tf
      from keras import backend as K
      from tensorflow.python.framework import graph_io
       
      def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
          from tensorflow.python.framework.graph_util import convert_variables_to_constants
          graph = session.graph
          with graph.as_default():
              freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
              output_names = output_names or []
              output_names += [v.op.name for v in tf.global_variables()]
              input_graph_def = graph.as_graph_def()
              if clear_devices:
                  for node in input_graph_def.node:
                      node.device = ""
              frozen_graph = convert_variables_to_constants(session, input_graph_def,
                                                           output_names,freeze_var_names)
              return frozen_graph
       
       
      """----------------------------------配置路径-----------------------------------"""
      epochs=20
      h5_model_path='./my_model_ep{}.h5'.format(epochs)
      output_path='.'
      pb_model_name='my_model_ep{}.pb'.format(epochs)
       
       
      """----------------------------------导入keras模型------------------------------"""
      K.set_learning_phase(0)
      net_model = load_model(h5_model_path)
       
      print('input is :', net_model.input.name)
      print ('output is:', net_model.output.name)
       
      """----------------------------------保存为.pb格式------------------------------"""
      sess = K.get_session()
      frozen_graph = freeze_session(K.get_session(), output_names=[net_model.output.op.name])
      graph_io.write_graph(frozen_graph, output_path, pb_model_name, as_text=False)
      
  • 模型验证

  • python端进行模型验证,需要根据上一步的input和output的名字
      import os
      import cv2
      import numpy as np
      from keras.models import load_model
       
      os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
       
      """---------载入已经训练好的模型---------"""
      new_model = load_model('my_model_ep20.h5')
       
      """---------用opencv载入一张待测图片-----"""
      # 载入图片
      src = cv2.imread('Pictures/6.png')
      cv2.imshow("test picture", src)
       
      # 将图片转化为28*28的灰度图
      src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
      dst = cv2.resize(src, (28, 28))
      dst=dst.astype(np.float32)
       
      # 将灰度图转化为1*784的能够输入的网络的数组
      picture=1-dst/255
      picture=np.reshape(picture,(1,28,28,1))
       
      # 用模型进行预测
      y = new_model.predict(picture)
      print("softmax:")
      for i,prob in enumerate(y[0]):
          print("class{},Prob:{}".format(i,prob))
      result = np.argmax(y)
      print("你写的数字是:", result)
      print("对应的概率是:",np.max(y[0]))
      cv2.waitKey(20170731)
    
  • tensorflow编译C library

    • Tensorflow C++和C库 编译安装教程可参考我的一篇blog

      https://blog.csdn.net/m0_37726343/article/details/85341971

    • Tensorflow C++端的GPU配置可参考我的blog

      https://blog.csdn.net/m0_37726343/article/details/88246495

  • C++端调用pb模型文件

    • 源码
      #include 
      #include 
      #include 
      #include 
      #include 
      #include "opencv2/opencv.hpp"
      using namespace std;
      using namespace cv;
      using namespace tensorflow;
       
      // 定义一个函数讲OpenCV的Mat数据转化为tensor,python里面只要对cv2.read读进来的矩阵进行np.reshape之后,
      // 数据类型就成了一个tensor,即tensor与矩阵一样,然后就可以输入到网络的入口了,但是C++版本,我们网络开放的入口
      // 也需要将输入图片转化成一个tensor,所以如果用OpenCV读取图片的话,就是一个Mat,然后就要考虑怎么将Mat转化为
      // Tensor了
      void CVMat_to_Tensor(Mat img,Tensor* output_tensor,int input_rows,int input_cols)
      {
          //imshow("input image",img);
          //图像进行resize处理
          resize(img,img,cv::Size(input_cols,input_rows));
          //imshow("resized image",img);
       
          //归一化
          img.convertTo(img,CV_32FC1);
          img=1-img/255;
       
          //创建一个指向tensor的内容的指针
          float *p = output_tensor->flat().data();
       
          //创建一个Mat,与tensor的指针绑定,改变这个Mat的值,就相当于改变tensor的值
          cv::Mat tempMat(input_rows, input_cols, CV_32FC1, p);
          img.convertTo(tempMat,CV_32FC1);
       
      //    waitKey(0);
       
      }
       
      int main(int argc, char** argv )
      {
          /*--------------------------------配置关键信息------------------------------*/
          string model_path="../my_model_ep20.pb";
          string image_path="../test_images/6.png";
          int input_height =28;
          int input_width=28;
          string input_tensor_name="conv2d_1_input";
          string output_tensor_name="dense_2/Softmax";
       
          /*--------------------------------创建session------------------------------*/
          Session* session;
          Status status = NewSession(SessionOptions(), &session);//创建新会话Session
       
          /*--------------------------------从pb文件中读取模型--------------------------------*/
          GraphDef graphdef; //Graph Definition for current model
       
          Status status_load = ReadBinaryProto(Env::Default(), model_path, &graphdef); //从pb文件中读取图模型;
          if (!status_load.ok()) {
              cout << "ERROR: Loading model failed..." << model_path << std::endl;
              cout << status_load.ToString() << "\n";
              return -1;
          }
          Status status_create = session->Create(graphdef); //将模型导入会话Session中;
          if (!status_create.ok()) {
              cout << "ERROR: Creating graph in session failed..." << status_create.ToString() << std::endl;
              return -1;
          }
          cout << "<----Successfully created session and load graph.------->"<< endl;
       
          /*---------------------------------载入测试图片-------------------------------------*/
          cout<"<"< outputs;
          string output_node = output_tensor_name;
          Status status_run = session->Run({{input_tensor_name, resized_tensor}}, {output_node}, {}, &outputs);
       
          if (!status_run.ok()) {
              cout << "ERROR: RUN failed..."  << std::endl;
              cout << status_run.ToString() << "\n";
              return -1;
          }
          //把输出值给提取出来
          cout << "Output tensor size:" << outputs.size() << std::endl;
          for (std::size_t i = 0; i < outputs.size(); i++) {
              cout << outputs[i].DebugString()<();        // Tensor Shape: [batch_size, target_class_num]
          int output_dim = t.shape().dim_size(1);  // Get the target_class_num from 1st dimension
       
          // Argmax: Get Final Prediction Label and Probability
          int output_class_id = -1;
          double output_prob = 0.0;
          for (int j = 0; j < output_dim; j++)
          {
              cout << "Class " << j << " prob:" << tmap(0, j) << "," << std::endl;
              if (tmap(0, j) >= output_prob) {
                  output_class_id = j;
                  output_prob = tmap(0, j);
              }
          }
       
          // 输出结果
          cout << "Final class id: " << output_class_id << std::endl;
          cout << "Final class prob: " << output_prob << std::endl;
       
          return 0;
      }
      
    • Cmake编译
      cmake_minimum_required(VERSION 2.8)
      project(demo)
      set(CMAKE_CXX_STANDARD 11)
      set(TENSORFLOW_DIR /home/linan/tensorflow)
      find_package(OpenCV 3 REQUIRED)
      
      
      include_directories(${TENSORFLOW_DIR}
      ${TENSORFLOW_DIR}/tensorflow/contrib/makefile/gen/proto
      ${TENSORFLOW_DIR}/tensorflow/contrib/makefile/gen/protobuf-host/include
      ${TENSORFLOW_DIR}/tensorflow/contrib/makefile/downloads/eigen
      ${TENSORFLOW_DIR}/tensorflow/contrib/makefile/downloads/nsync/public
      ${TENSORFLOW_DIR}/tensorflow/contrib/makefile/downloads/absl
      ${TENSORFLOW_DIR}/tensorflow/contrib/makefile/gen/lib
      ${TENSORFLOW_DIR}/tensorflow/contrib/makefile/gen/protobuf-host/lib
      ${TENSORFLOW_DIR}/tensorflow/contrib/makefile/downloads/nsync/builds/default.linux.c++11
      ${TENSORFLOW_DIR}/bazel-bin/tensorflow
      ${OpenCV_INCLUDE_DIRS}) 
      
      #项目中lib路径
      link_directories(${TENSORFLOW_DIR}/tensorflow/bazel-bin/tensorflow) 
      
      add_executable(demo test.cpp)
      target_link_libraries(demo tensorflow_cc tensorflow_framework ${OpenCV_LIBS})
      
       
      # 以下是将可执行文件与一些.so文件建立动态链接关系,
      # 用到的有libtensorflow_cc.so,libtensorflow_framework.so,以及opencv相关的so
      target_link_libraries(demo tensorflow_cc tensorflow_framework ${OpenCV_LIBS})
      

C++调用Pytorch训练模型

  • 利用Tracing将模型转换为Torch Script

    • 要通过tracing来将PyTorch模型转换为Torch脚本,必须将模型的实例以及样本输入传递给torch.jit.trace函数.这将生成一个torch.jit.ScriptModule对象,并在模块的forward方法中嵌入模型评估的跟踪:
      import torch
      import torchvision
      
      # An instance of your model.
      model = torchvision.models.resnet18()
      
      # An example input you would normally provide to your model's forward() method.
      example = torch.rand(1, 3, 224, 224)
      
      # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
      traced_script_module = torch.jit.trace(model, example)
      
  • 将Script Module序列化为一个文件

    • 要执行此序列化,只需在模块上调用save并将其传递给文件名:
      traced_script_module.save("model.pt")
      
  • C++代码调用libtorch

    • 下载libtorch
      • 首先,在pytorch官网下载libtroch, 官网提供了win/linux/Mac系统编译好的库,省去了编译库的过程。https://pytorch.org/
      • 下载好之后,解压到一个路径即可。
    • 编写C++代码
      • 准备好上一步骤转换的模型文件
      • 准备几张测试图像

      Tips: 训练过程中,采用PIL.Image加载图像(3通道 RGB),然后Resize到224 x 224大小, 之后再进行ToTensor。因此使用C++ libTorch时候也需要按照上述过程对图像进行预处理。

      1. cv::imread() 默认读取为三通道BGR,需要进行B/R通道交换,这里采用 cv::cvtColor()实现。

      2. 缩放 cv::resize() 实现。

      3. opencv读取的图像矩阵存储形式:H x W x C, 但是pytorch中 Tensor的存储为:N x C x H x W, 因此需要进行变换,就是np.transpose()操作,这里使用tensor.permut()实现,效果是一样的。

      4. 数据归一化,采用tensor.div(255) 实现。

        // One-stop header.
        #include 
        
        // headers for opencv
        #include 
        #include 
        #include 
        
        #include 
        #include 
        #include 
        #include 
        #include 
        
        #define kIMAGE_SIZE 224
        #define kCHANNELS 3
        #define kTOP_K 3
        
        bool LoadImage(std::string file_name, cv::Mat &image) {
          image = cv::imread(file_name);  // CV_8UC3
          if (image.empty() || !image.data) {
            return false;
          }
          cv::cvtColor(image, image, CV_BGR2RGB);
          std::cout << "== image size: " << image.size() << " ==" << std::endl;
        
          // scale image to fit
          cv::Size scale(kIMAGE_SIZE, kIMAGE_SIZE);
          cv::resize(image, image, scale);
          std::cout << "== simply resize: " << image.size() << " ==" << std::endl;
        
          // convert [unsigned int] to [float]
          image.convertTo(image, CV_32FC3, 1.0f / 255.0f);
        
          return true;
        }
        
        bool LoadImageNetLabel(std::string file_name,
                               std::vector &labels) {
          std::ifstream ifs(file_name);
          if (!ifs) {
            return false;
          }
          std::string line;
          while (std::getline(ifs, line)) {
            labels.push_back(line);
          }
          return true;
        }
        
        int main(int argc, const char *argv[]) {
          if (argc != 3) {
            std::cerr << "Usage: classifier  "
                         ""
                      << std::endl;
            return -1;
          }
        
          std::shared_ptr module =
              torch::jit::load(argv[1]);
          std::cout << "== Switch to GPU mode" << std::endl;
          // to GPU
          module->to(at::kCUDA);
        
          assert(module != nullptr);
          std::cout << "== ResNet50 loaded!\n";
          std::vector labels;
          if (LoadImageNetLabel(argv[2], labels)) {
            std::cout << "== Label loaded! Let's try it\n";
          } else {
            std::cerr << "Please check your label file path." << std::endl;
            return -1;
          }
        
          std::string file_name = "";
          cv::Mat image;
          while (true) {
            std::cout << "== Input image path: [enter Q to exit]" << std::endl;
            std::cin >> file_name;
            if (file_name == "Q") {
              break;
            }
            if (LoadImage(file_name, image)) {
              auto input_tensor = torch::from_blob(
                  image.data, {1, kIMAGE_SIZE, kIMAGE_SIZE, kCHANNELS});
              input_tensor = input_tensor.permute({0, 3, 1, 2});
              input_tensor[0][0] = input_tensor[0][0].sub_(0.485).div_(0.229);
              input_tensor[0][1] = input_tensor[0][1].sub_(0.456).div_(0.224);
              input_tensor[0][2] = input_tensor[0][2].sub_(0.406).div_(0.225);
        
              // to GPU
              input_tensor = input_tensor.to(at::kCUDA);
        
              torch::Tensor out_tensor = module->forward({input_tensor}).toTensor();
        
              auto results = out_tensor.sort(-1, true);
              auto softmaxs = std::get<0>(results)[0].softmax(0);
              auto indexs = std::get<1>(results)[0];
        
              for (int i = 0; i < kTOP_K; ++i) {
                auto idx = indexs[i].item();
                std::cout << "    ============= Top-" << i + 1
                          << " =============" << std::endl;
                std::cout << "    Label:  " << labels[idx] << std::endl;
                std::cout << "    With Probability:  "
                          << softmaxs[i].item() * 100.0f << "%" << std::endl;
              }
        
            } else {
              std::cout << "Can't load the image, please check your path." << std::endl;
            }
          }
        }
        
    • CMake编译

      #设置cmake的最小版本
      cmake_minimum_required(VERSION 2.8)
      #项目名称
      project(demo)
      #设置c++编译器
      set(CMAKE_CXX_STANDARD 11)
      SET(CMAKE_C_COMPILER "/usr/bin/gcc")
      SET(CMAKE_CXX_COMPILER "/usr/bin/g++")
      
      SET(CMAKE_PREFIX_PATH /home/linan/libtorch)
      
      #寻找Pytorch
      find_package(Torch REQUIRED)
      message(STATUS "Pytorch status:")
      message(STATUS " libraries: ${TORCH_LIBRARIES}")
      
      # 寻找OpenCV库
      
      find_package( OpenCV 3 REQUIRED )
      
      #头文件
      include_directories(
      ${Torch_INCLUDE_DIRS}
      ${OpenCV_INCLUDE_DIRS}
      ./include)  
      
      aux_source_directory(./src DIRSRCS)
      add_executable(demo ${DIRSRCS})
      target_link_libraries(demo ${OpenCV_LIBS} ${TORCH_LIBRARIES})
      

END

你可能感兴趣的:(深度学习,人脸识别,机器学习,图像处理)