tensorRT 分类模型构建与推理

tensorRT分类模型构建与推理示例代码classifier.cpp


// tensorRT include
// 编译用的头文件
#include 

// onnx解析器的头文件
#include 

// 推理用的运行时头文件
#include 

// cuda include
#include 

// system include
#include 
#include 

#include 
#include 
#include 
#include 
#include 
#include 
#include 

#include 

using namespace std;

#define checkRuntime(op)  __check_cuda_runtime((op), #op, __FILE__, __LINE__)

bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, int line)
{
    if(code != cudaSuccess)
    {    
        const char* err_name = cudaGetErrorName(code);    
        const char* err_message = cudaGetErrorString(code);  
        printf("runtime error %s:%d  %s failed. \n  code = %s, message = %s\n", file, line, op, err_name, err_message);   
        return false;
    }
    return true;
}


class TRTLogger : public nvinfer1::ILogger
{
public:
    virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override
    {
        if(severity <= Severity::kINFO)
        {
            // 打印带颜色的字符,格式如下:
            // printf("\033[47;33m打印的文本\033[0m");
            // 其中 \033[ 是起始标记
            //      47    是背景颜色
            //      ;     分隔符
            //      33    文字颜色
            //      m     开始标记结束
            //      \033[0m 是终止标记
            // 其中背景颜色或者文字颜色可不写
            // 部分颜色代码 https://blog.csdn.net/ericbar/article/details/79652086
            if(severity == Severity::kWARNING)
            {
                printf("\033[33m%s: %s\033[0m\n", severity_string(severity), msg);
            }
            else if(severity <= Severity::kERROR)
            {
                printf("\033[31m%s: %s\033[0m\n", severity_string(severity), msg);
            }
            else
            {
                printf("%s: %s\n", severity_string(severity), msg);
            }
        }
    }

    inline const char* severity_string(nvinfer1::ILogger::Severity t)
    {
        switch(t)
        {
            case nvinfer1::ILogger::Severity::kINTERNAL_ERROR: return "internal_error";
            case nvinfer1::ILogger::Severity::kERROR:   return "error";
            case nvinfer1::ILogger::Severity::kWARNING: return "warning";
            case nvinfer1::ILogger::Severity::kINFO:    return "info";
            case nvinfer1::ILogger::Severity::kVERBOSE: return "verbose";
            default: return "unknow";
        }
    }
};

// 通过智能指针管理nv返回的指针参数
// 内存自动释放,避免泄漏
template
shared_ptr<_T> make_nvshared(_T* ptr)
{
    return shared_ptr<_T>(ptr, [](_T* p){p->destroy();});
}

bool exists(const string& path)
{
    return access(path.c_str(), R_OK) == 0;
}


bool build_model(std::string &onnx_model_file, std::string &engine_file, int max_batch_size=10)
{
    if(not exists(onnx_model_file))
    {
        printf("%s not has exists.\n", onnx_model_file.c_str());
        return false;
    }

    TRTLogger logger;

    // 这是基本需要的组件
    auto builder = make_nvshared(nvinfer1::createInferBuilder(logger));
    auto config = make_nvshared(builder->createBuilderConfig());
    auto network = make_nvshared(builder->createNetworkV2(1));

    // 通过onnxparser解析器解析的结果会填充到network中,类似addConv的方式添加进去
    auto parser = make_nvshared(nvonnxparser::createParser(*network, logger));
    if(!parser->parseFromFile(onnx_model_file.c_str(), 1))
    {
        printf("Failed to parse %s\n", onnx_model_file.c_str());
        return false;
    }
    
    
    printf("Workspace Size = %.2f MB\n", (1 << 28) / 1024.0f / 1024.0f);
    config->setMaxWorkspaceSize(1 << 28);

    // 如果模型有多个输入,则必须多个profile
    auto profile = builder->createOptimizationProfile();
    auto input_tensor = network->getInput(0);
    auto input_dims = input_tensor->getDimensions();
    
    // 配置最小、最优、最大范围
    input_dims.d[0] = 1;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
    input_dims.d[0] = max_batch_size;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
    config->addOptimizationProfile(profile);

    auto engine = make_nvshared(builder->buildEngineWithConfig(*network, *config));
    if(engine == nullptr)
    {
        printf("Build engine failed.\n");
        return false;
    }

    // 将模型序列化,并储存为文件
    auto model_data = make_nvshared(engine->serialize());
    FILE* f = fopen(engine_file.c_str(), "wb");
    fwrite(model_data->data(), 1, model_data->size(), f);
    fclose(f);

    // 卸载顺序按照构建顺序倒序
    printf("Done.\n");
    return true;
}

///

vector load_file(const string& file)
{
    ifstream in(file, ios::in | ios::binary);
    if (!in.is_open())
        return {};

    in.seekg(0, ios::end);
    size_t length = in.tellg();

    std::vector data;
    if (length > 0)
    {
        in.seekg(0, ios::beg);
        data.resize(length);

        in.read((char*)&data[0], length);
    }
    in.close();
    return data;
}

vector load_labels(const char* file)
{
    vector lines;

    ifstream in(file, ios::in | ios::binary);
    if (!in.is_open())
    {
        printf("open %d failed.\n", file);
        return lines;
    }
    
    string line;
    while(getline(in, line))
    {
        lines.push_back(line);
    }
    in.close();
    return lines;
}

void inference(std::string &engine_file)
{

    TRTLogger logger;
    auto engine_data = load_file(engine_file);
    auto runtime = make_nvshared(nvinfer1::createInferRuntime(logger));
    auto engine = make_nvshared(runtime->deserializeCudaEngine(engine_data.data(), engine_data.size()));
    if(engine == nullptr)
    {
        printf("Deserialize cuda engine failed.\n");
        runtime->destroy();
        return;
    }

    cudaStream_t stream = nullptr;
    checkRuntime(cudaStreamCreate(&stream));
    auto execution_context = make_nvshared(engine->createExecutionContext());

    int input_batch = 1;
    int input_channel = 3;
    int input_height = 224;
    int input_width = 224;
    int input_numel = input_batch * input_channel * input_height * input_width;
    float* input_data_host = nullptr;
    float* input_data_device = nullptr;
    checkRuntime(cudaMallocHost(&input_data_host, input_numel * sizeof(float)));
    checkRuntime(cudaMalloc(&input_data_device, input_numel * sizeof(float)));

    ///
    // image to float
    auto image = cv::imread("./images/0.jpg");
    float mean[] = {0.406, 0.456, 0.485};
    float std[]  = {0.225, 0.224, 0.229};

    // 对应于pytorch的代码部分
    cv::resize(image, image, cv::Size(input_width, input_height));
    int image_area = image.cols * image.rows;
    unsigned char* pimage = image.data;
    float* phost_b = input_data_host + image_area * 0;
    float* phost_g = input_data_host + image_area * 1;
    float* phost_r = input_data_host + image_area * 2;
    for(int i = 0; i < image_area; ++i, pimage += 3){
        // 注意这里的顺序rgb调换了
        *phost_r++ = (pimage[0] / 255.0f - mean[0]) / std[0];
        *phost_g++ = (pimage[1] / 255.0f - mean[1]) / std[1];
        *phost_b++ = (pimage[2] / 255.0f - mean[2]) / std[2];
    }
    ///
    checkRuntime(cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream));

    // 3x3输入,对应3x3输出
    const int num_classes = 512;
    float output_data_host[num_classes];
    float* output_data_device = nullptr;
    checkRuntime(cudaMalloc(&output_data_device, sizeof(output_data_host)));

    // 明确当前推理时,使用的数据输入大小
    auto input_dims = execution_context->getBindingDimensions(0);
    input_dims.d[0] = input_batch;

    // 设置当前推理时,input大小
    execution_context->setBindingDimensions(0, input_dims);
    float* bindings[] = {input_data_device, output_data_device};
    bool success      = execution_context->enqueueV2((void**)bindings, stream, nullptr);
    checkRuntime(cudaMemcpyAsync(output_data_host, output_data_device, sizeof(output_data_host), cudaMemcpyDeviceToHost, stream));
    checkRuntime(cudaStreamSynchronize(stream));

    float* prob = output_data_host;
    int predict_label = std::max_element(prob, prob + num_classes) - prob;  // 确定预测类别的下标
    auto labels = load_labels("labels.imagenet.txt");
    auto predict_name = labels[predict_label];
    float confidence  = prob[predict_label];    // 获得预测值的置信度
    printf("Predict: %s, confidence = %f, label = %d\n", predict_name.c_str(), confidence, predict_label);

    checkRuntime(cudaStreamDestroy(stream));
    checkRuntime(cudaFreeHost(input_data_host));
    checkRuntime(cudaFree(input_data_device));
    checkRuntime(cudaFree(output_data_device));
}

int main()
{
    std::string onnx_model_file = "./models/pplcnet.onnx";
    std::string engine_file = "./models/pplcnet_test.engine";

    if (not exists(engine_file))
    {
        if(!build_model(onnx_model_file, engine_file))
        {
            return -1;
        }
    }

    inference(engine_file);
    return 0;
}

 CMakeLists.txt

cmake_minimum_required(VERSION 3.10)
project(pro VERSION 1.0.0 LANGUAGES C CXX CUDA)

option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Debug)
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/workspace/bin)

set(CUDA_GEN_CODE "-gencode=arch=compute_86,code=sm_86")
set(OpenCV_DIR    "/opt/opencv4.7.0/lib/cmake/opencv4/")
set(CUDA_DIR      "/usr/local/cuda-11.8/")
set(CUDNN_DIR     "/usr/local/cuda-11.8/")
set(TENSORRT_DIR "/opt/TensorRT-8.6.1.6")

find_package(CUDA REQUIRED)
find_package(OpenCV)

include_directories(
    ${CUDA_DIR}/include
    ${CUDNN_DIR}/include
    ${TENSORRT_DIR}/include
)

link_directories(
    ${CUDA_DIR}/lib64
    ${CUDNN_DIR}/lib64
    ${TENSORRT_DIR}/lib
)

set(CMAKE_CXX_FLAGS  "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -O0 -Wfatal-errors -pthread -w -g")
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11 -O0 -Xcompiler -fPIC -g -w ${CUDA_GEN_CODE}")

set(CUDA_LIBS
    cuda 
    cublas 
    cudart 
    cudnn
)

set(TRT_LIBS 
    nvinfer 
    nvinfer_plugin 
    nvonnxparser
)

set(srcs
    ${PROJECT_SOURCE_DIR}/src/classifier.cpp
)

add_executable(pro ${srcs})

target_link_libraries(pro ${TRT_LIBS} ${CUDA_LIBS} pthread stdc++ dl)
target_link_libraries(pro ${OpenCV_LIBS})

你可能感兴趣的:(TensorRT,人工智能)