TensorRT部署(图像分类)之engine生成及反序列化推理(第二讲)

1)日志文件类创建

class TRTLogger : public nvinfer1::ILogger {
public:
	virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override {
		if (severity <= Severity::kINFO) {
			if (severity == Severity::kWARNING) {
				printf("\033[33m%s: %s\033[0m\n", msg);
			}
			else if (severity <= Severity::kERROR) {
				printf("\033[31m%s: %s\033[0m\n", msg);
			}
			else {
				printf("%s: %s\n", msg);
			}
		}
	}
} logger;

日志文件主要用于打印警告错误信息,方便调试

bool build_model() {
	// 这是基本需要的组件
	auto builder = nvinfer1::createInferBuilder(logger);
	auto config = builder->createBuilderConfig();
	auto network = builder->createNetworkV2(1);

	// 通过onnxparser解析器解析的结果会填充到network中,类似addConv的方式添加进去
	auto parser = nvonnxparser::createParser(*network, logger);
	if (!parser->parseFromFile("classifier.onnx", 1)) {
		printf("Failed to parse classifier.onnx\n");

		// 注意这里的几个指针还没有释放,是有内存泄漏的,后面考虑更优雅的解决
		return false;
	}

	int maxBatchSize = 10;
	printf("Workspace Size = %.2f MB\n", (1 << 28) / 1024.0f / 1024.0f);
	config->setMaxWorkspaceSize(1 << 28);

	// 如果模型有多个输入,则必须多个profile
	auto profile = builder->createOptimizationProfile();
	auto input_tensor = network->getInput(0);
	auto input_dims = input_tensor->getDimensions();

	// 配置最小、最优、最大范围
	input_dims.d[0] = 1;
	profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
	profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
	input_dims.d[0] = maxBatchSize;
	profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
	config->addOptimizationProfile(profile);

	auto engine = builder->buildEngineWithConfig(*network, *config);
	if (engine == nullptr) {
		printf("Build engine failed.\n");
		return false;
	}

	// 将模型序列化,并储存为文件
	auto model_data = engine->serialize();
	FILE* f;
	fopen_s(&f, "engine.trtmodel", "wb");
	fwrite(model_data->data(), 1, model_data->size(), f);
	fclose(f);

	// 卸载顺序按照构建顺序倒序
	printf("Done.\n");
	return true;
}

2)推理引擎主要包括配置文件的设置,onnx文件的解析,生成序列化文件,方便下次反序列化,毕竟解析onnx文件生成推理引擎挺浪费时间

void inference() {

	TRTLogger logger;
	auto engine_data = load_file("engine.trtmodel");
	auto runtime = nvinfer1::createInferRuntime(logger);
	auto engine = runtime->deserializeCudaEngine(engine_data.data(), engine_data.size());
	if (engine == nullptr) {
		printf("Deserialize cuda engine failed.\n");
		runtime->destroy();
		return;
	}

	cudaStream_t stream = nullptr;
	cudaStreamCreate(&stream);
	auto execution_context = engine->createExecutionContext();

	int input_batch = 1;
	int input_channel = 3;
	int input_height = 224;
	int input_width = 224;
	int input_numel = input_batch * input_channel * input_height * input_width;
	float* input_data_host = nullptr;
	float* input_data_device = nullptr;
	cudaMallocHost(&input_data_host, input_numel * sizeof(float));
	cudaMalloc(&input_data_device, input_numel * sizeof(float));

	///
	// image to float
	auto image = cv::imread("input_image.jpg");
	float mean[] = { 0.406, 0.456, 0.485 };
	float std[] = { 0.225, 0.224, 0.229 };

	// 对应于pytorch的代码部分
	cv::resize(image, image, cv::Size(input_width, input_height));
	int image_area = image.cols * image.rows;
	unsigned char* pimage = image.data;
	float* phost_b = input_data_host + image_area * 0;
	float* phost_g = input_data_host + image_area * 1;
	float* phost_r = input_data_host + image_area * 2;
	for (int i = 0; i < image_area; ++i, pimage += 3) {
		// 注意这里的顺序rgb调换了
		*phost_r++ = (pimage[0] / 255.0f - mean[0]) / std[0];
		*phost_g++ = (pimage[1] / 255.0f - mean[1]) / std[1];
		*phost_b++ = (pimage[2] / 255.0f - mean[2]) / std[2];
	}
	///
	cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream);

	// 3x3输入,对应3x3输出
	const int num_classes = 1000;
	float output_data_host[num_classes];
	float* output_data_device = nullptr;
	cudaMalloc(&output_data_device, sizeof(output_data_host));

	// 明确当前推理时,使用的数据输入大小
	auto input_dims = execution_context->getBindingDimensions(0);
	input_dims.d[0] = input_batch;

	// 设置当前推理时,input大小
	execution_context->setBindingDimensions(0, input_dims);
	float* bindings[] = { input_data_device, output_data_device };
	bool success = execution_context->enqueueV2((void**)bindings, stream, nullptr);
	cudaMemcpyAsync(output_data_host, output_data_device, sizeof(output_data_host), cudaMemcpyDeviceToHost, stream);
	cudaStreamSynchronize(stream);

	float* prob = output_data_host;
	int predict_label = std::max_element(prob, prob + num_classes) - prob;  // 确定预测类别的下标
	auto labels = load_labels("labels.txt");
	auto predict_name = labels[predict_label];
	float confidence = prob[predict_label];    // 获得预测值的置信度
	printf("Predict: %s, confidence = %f, label = %d\n", predict_name.c_str(), confidence, predict_label);

	cudaStreamDestroy(stream);
	cudaFreeHost(input_data_host);
	cudaFree(input_data_device);
	cudaFree(output_data_device);
}

3)推理主要包括反序列化文件生成推理引擎;图像预处理;将数据拷贝到GPU完成并行计算,再拷贝到CPU,拿到推理结果

你可能感兴趣的:(开发语言,c++,人工智能,深度学习)