百度开源了一个十分便捷的部署库fastDeploy。
他对于小白十分的友好,大多数百度PaddlePaddle中的模型在fastDeploy中都有相应部署的demo,包括一些其他热门的算法模型fastDeploy中也会有相应部署的demo。在 Windows 使用 FastDeploy C++ SDK教程。
直接在fastDeploy官网选择需要的模型下载即可。我这里是下载的PP-Mattingv2。
下载解压后文件夹中会有四个文件分别是:
CUDA ToolKits 11.1.1(Windows10 CUDA 11.1.1下载地址,若无GPU不需要)
cudnn 8.4.1(cudnn下载地址,若无GPU不需要)
Visual Studio 2019
我们只需要修改framework.h和TestLib.cpp文件中的内容,其余两个文件不动。
下面我们导入相关代码:
framework.h代码:
#pragma once
#define WIN32_LEAN_AND_MEAN // 从 Windows 头文件中排除极少使用的内容
#include "fastdeploy/vision.h"
cv::Mat GpuInfer(const std::string& model_dir, const cv::Mat& image,
const std::string& background_file, const bool save);
cv::Mat CpuInfer(const std::string& model_dir, const cv::Mat& image,
const std::string& background_file, const bool save);
int infer_by_camera(const std::string& device, const std::string& model_dir,
const std::string& window_name, const std::string& background_file);
PPMattingv2_StaticLib.cpp代码:
// PPMatting_Staticlib.cpp : 定义静态库的函数。
//
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "pch.h"
#include "framework.h"
#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif
cv::Mat CpuInfer(const std::string& model_dir, const cv::Mat& image, const std::string& background_file = "", const bool save = false) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::matting::PPMatting(model_file, params_file,
config_file, option);
cv::Mat vis_im;
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return vis_im;
}
auto im = image;
fastdeploy::vision::MattingResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return vis_im;
}
if (!background_file.empty()) {
auto bg = cv::imread(background_file);
vis_im =
fastdeploy::vision::SwapBackground(im, bg, res);
}
else {
vis_im = fastdeploy::vision::VisMatting(im, res);
}
if (save) {
cv::imwrite("visualized_result_fg.jpg", vis_im);
std::cout << "visualized_result_fg.jpg" << std::endl;
}
return vis_im;
}
cv::Mat GpuInfer(const std::string& model_dir, const cv::Mat& image, const std::string& background_file = "", const bool save = false) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
option.UsePaddleInferBackend();
auto model = fastdeploy::vision::matting::PPMatting(model_file, params_file,
config_file, option);
cv::Mat vis_im;
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return vis_im;
}
auto im = image;
fastdeploy::vision::MattingResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return vis_im;
}
if (!background_file.empty()) {
auto bg = cv::imread(background_file);
vis_im =
fastdeploy::vision::SwapBackground(im, bg, res);
}
else {
vis_im = fastdeploy::vision::VisMatting(im, res);
}
if (save) {
cv::imwrite("visualized_result_fg.jpg", vis_im);
std::cout << "visualized_result_fg.jpg" << std::endl;
}
return vis_im;
}
int infer_by_camera(const std::string& device, const std::string& model_dir, const std::string& window_name = "video", const std::string& background_file = "") {
cv::VideoCapture cap;
cap.open(0);
if (!cap.isOpened()) {
std::cout << "open camera failed!" << std::endl;
return 0;
}
cv::namedWindow(window_name, 1);
while (1) {
time_t t_now = time(0);
cv::Mat frame;
cap >> frame;
if (frame.empty()) {
return 0;
}
if (device == "gpu" or device == "GPU")
cv::imshow(window_name, GpuInfer(model_dir, frame, background_file));
else
cv::imshow(window_name, CpuInfer(model_dir, frame, background_file));
std::cout << "Matting此帧共消耗" << (time(0) - t_now) << "秒" << std::endl;
if (cv::waitKey(30) >= 0) break;
}
cap.release();
return 1;
}
这里路径是你配置好后fastDeploy的include路径和fastDeploy中opencv的include路径。
添加成功后,可以发现代码之前出现的大量关于无法找到fastDeploy库的报错会消失。
#include
#include
#include
#include "E:/VS_code/PPMatting_Staticlib_cpu/pch.h"
int main() {
infer_by_camera("cpu", "E:/VS_code/PPMatting_cpu/x64/Release/ppmattingv2-stdc1-human_512", "PPMattingv2", "E:/VS_code/PPMatting_cpu/x64/Release/test_bg.jpg");
return 0;
}
先点击运行生成一个.exe文件,我们会发现会提示缺失一系列的.dll文件。