一、测试openvino自身的demo
cd /app/intel/openvino/deployment_tools/inference_engine/samples
sh build_samples.sh
在 /app/inference_engine_samples_build 下生产多个样例,在这测试object_detection_sample_ssd
cd /app/inference_engine_samples_build/intel64/Release
./object_detection_sample_ssd -i /app/openvino_test/dog.jpg -m /app/openvino_test/ssd_mobilenet_v2_coco_2018_03_29/FP32/frozen_inference_graph.xml -d CPU -l /app/inference_engine_samples_build/intel64/Release/lib/libcpu_extension.so
得到以下结果
在运行的过程中可能会出现下面错误
./object_detection_sample_ssd: error while loading shared libraries: libformat_reader.so: cannot open shared object file: No such file or directory
vi ~/.bashrc
在最后添加 export LD_LIBRARY_PATH=$/app/inference_engine_samples_build/intel64/Release/lib
二、测试自己创建的工程
例如测试ssd_mobilenetV2模型
1、编辑ssd_mobilenetV2_openvino.cpp,如下:
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace InferenceEngine;
using namespace std;
template
void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, int batchIndex = 0) {
InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
const size_t width = blobSize[3];
const size_t height = blobSize[2];
const size_t channels = blobSize[1];
T* blob_data = blob->buffer().as();
cv::Mat resized_image(orig_image);
if (width != orig_image.size().width || height != orig_image.size().height) {
cv::resize(orig_image, resized_image, cv::Size(width, height));
}
// 耗时操作!!
for (size_t h = 0; h < height; h++) {
uchar* curr_row = resized_image.ptr(h);
for (size_t w = 0; w < width; w++) {
for (size_t c = 0; c < channels; c++) {
blob_data[c * width * height + h * width + w] = *curr_row++;
}
}
}
}
void frametoBlob(const Mat &frame, InferRequest::Ptr &inferRequest, const std::string & inputName) {
Blob::Ptr frameBlob = inferRequest->GetBlob(inputName);
matU8ToBlob(frame, frameBlob);
}
int main(int argc, char** argv)
{
string xml = "/app/openvino_test/ssd_mobilenet_v2_coco_2018_03_29/FP32/frozen_inference_graph.xml";
string bin = "/app/openvino_test/ssd_mobilenet_v2_coco_2018_03_29/FP32/frozen_inference_graph.bin";
string input_file = "/app/openvino_test/demo.mp4";
//namedWindow("frame", WINDOW_NORMAL);
namedWindow("pedestrian detection", WINDOW_NORMAL);
typedef std::chrono::duration> ms;
// 创建IE插件
// --------------------------- 1. Load inference engine -------------------------------------
Core ie;
IExtensionPtr cpuExtension, inPlaceExtension;
cpuExtension = std::make_shared();
inPlaceExtension = std::make_shared();
ie.AddExtension(cpuExtension, "CPU");
ie.AddExtension(inPlaceExtension, "CPU");
// -----------------------------------------------------------------------------------------------------
// 加载网络
CNNNetReader network_reader;
network_reader.ReadNetwork(xml);
network_reader.ReadWeights(bin);
// 获取输入输出
auto network = network_reader.getNetwork();
InferenceEngine::InputsDataMap input_info(network.getInputsInfo());
InferenceEngine::OutputsDataMap output_info(network.getOutputsInfo());
// 设置输入输出
for (auto &item : input_info) {
auto input_data = item.second;
input_data->setPrecision(Precision::U8);
input_data->setLayout(Layout::NCHW);
}
auto inputName = input_info.begin()->first;
for (auto &item : output_info) {
auto output_data = item.second;
output_data->setPrecision(Precision::FP32);
}
// 创建可执行网络
auto exec_network = ie.LoadNetwork(network, "CPU");
// 请求推断
auto infer_request_curr = exec_network.CreateInferRequestPtr();
auto infer_request_next = exec_network.CreateInferRequestPtr();
VideoCapture capture(input_file);
Mat curr_frame, next_frame;
capture.read(curr_frame);
//image.copyTo(curr_frame);
int image_width = curr_frame.cols;
int image_height = curr_frame.rows;
bool isLastFrame = false;
bool isFirstFrame = true;
frametoBlob(curr_frame, infer_request_curr, inputName);
while (true) {
//if (!capture.read(next_frame)) {
//image.copyTo(next_frame);
if (!capture.read(next_frame))
{
if (next_frame.empty())
{
isLastFrame = true;
}
}
auto t0 = std::chrono::high_resolution_clock::now();
if (!isLastFrame) {
frametoBlob(next_frame, infer_request_next, inputName);
}
// 开启异步执行模型
if (isFirstFrame) {
infer_request_curr->StartAsync();
infer_request_next->StartAsync();
isFirstFrame = false;
}
else {
if (!isLastFrame) {
infer_request_next->StartAsync();
}
}
// 检查返回数据
if (OK == infer_request_curr->Wait(IInferRequest::WaitMode::RESULT_READY)) {
auto output_name = output_info.begin()->first;
auto output = infer_request_curr->GetBlob(output_name);
const float* detection = static_cast::value_type*>(output->buffer());
const SizeVector outputDims = output->getTensorDesc().getDims();
const int rows = outputDims[2];
const int object_size = outputDims[3];
for (int row = 0; row < rows; row++) {
float label = detection[row*object_size + 1];
float confidence = detection[row*object_size + 2];
float x_min = detection[row*object_size + 3] * image_width;
float y_min = detection[row*object_size + 4] * image_height;
float x_max = detection[row*object_size + 5] * image_width;
float y_max = detection[row*object_size + 6] * image_height;
if (confidence > 0.5) {
Rect object_box((int)x_min, (int)y_min, (int)(x_max - x_min), (int(y_max - y_min)));
rectangle(curr_frame, object_box, Scalar(0, 0, 255), 2, 8, 0);
}
}
// 计算执行时间
auto t1 = std::chrono::high_resolution_clock::now();
ms dtime = std::chrono::duration_cast(t1 - t0);
ostringstream ss;
ss << "SSD MobilenetV2 detection fps: " << std::fixed << std::setprecision(2) << 1000 / dtime.count() << "fps";
//ss << "detection time : " << std::fixed << std::setprecision(2) << dtime.count() << " ms";
printf("SSD MobilenetV2 detection fps = %f \n", 1000 / dtime.count());
putText(curr_frame, ss.str(), Point(50, 50), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 0, 255), 2, 8);
}
imshow("pedestrian detection", curr_frame);
char c = waitKey(2);
if (c == 27) {
break;
}
if (isLastFrame) {
break;
}
// 异步交换
next_frame.copyTo(curr_frame);
infer_request_curr.swap(infer_request_next);
}
capture.release();
destroyAllWindows();
return 0;
}
2、编辑CMakeLists.txt
cmake_minimum_required(VERSION 3.12)
#项目名称
project(ssd_mobilenetV2_demo)
#设置c++编译器
set(CMAKE_CXX_STANDARD 11)
#项目中的include路径
include_directories(
/app/intel/openvino/deployment_tools/inference_engine/external/tbb/include
/app/intel/openvino/opencv/include/opencv2
/app/intel/openvino/deployment_tools/inference_engine/include
/app/intel/openvino/deployment_tools/inference_engine/samples
/app/intel/openvino/deployment_tools/inference_engine/src/extension
)
#项目中lib路径
link_directories(
/app/intel/openvino/deployment_tools/inference_engine/lib/intel64
/app/inference_engine_samples_build/intel64/Release/lib
/usr/local/lib64
/app/intel/openvino/deployment_tools/inference_engine/external/tbb/lib
)
#动态库路径
link_libraries(
/app/intel/openvino/opencv/lib/libopencv_core.so
/app/intel/openvino/opencv/lib/libopencv_highgui.so
/app/intel/openvino/opencv/lib/libopencv_imgcodecs.so
/app/intel/openvino/opencv/lib/libopencv_videoio.so
/app/intel/openvino/opencv/lib/libopencv_imgproc.so
/app/intel/openvino/opencv/lib/libopencv_videoio_ffmpeg.so
/app/inference_engine_samples_build/intel64/Release/lib/libcpu_extension.so
/app/intel/openvino/deployment_tools/inference_engine/lib/intel64/libinference_engine.so
)
add_executable(ssd_mobilenetV2_demo ssd_mobilenetV2_openvino.cpp)
3、将CMakeLists.txt 和ssd_mobilenetV2_openvino.cpp放在同一目录下例如/app/openvino_test/ssd_demo/
cd /app/openvino_test/ssd_demo/
mkdir build
cd build
cmake ..
make
4、编译成功的话,/app/openvino_test/ssd_demo/build路径下有一个ssd_mobilenetV2_demo工程
运行 ./ssd_mobilenetV2_demo