sudo apt update
sudo apt install build-essential
tar -zxv -f cmake-3.23.0-rc3.tar.gz
./bootstrap
如果出现下面的错误,则需安装libssl-dev,安装完后再重新执行./bootstrap
sudo apt-get install libssl-dev
make
sudo make install
cmake --version
验证是否安装完成sudo apt-get install autoconf automake libtool curl make g++ unzip libffi-dev -y
tar -zxv -f protobuf-cpp-3.20.0-rc-1.tar.gz
cd protobuf-3.20.0-rc-1/
./autogen.sh
./configure
make
sudo make install
sudo ldconfig
protoc --version
查看是否安装成功官方文档
unzip MNN-master.zip
cd MNN
./schema/generate.sh
mkdir build && cd build && cmake … && make -j8
部署mnist 还需要opencv库,所以还需要安装一下opencv
可以根据这篇文章通过克隆OpenCV源代码进行安装
博客
根据博客安装完成后,编写一个简单的opencv程序验证是否可用
创建main.cpp文件,并使用gcc编译并运行
g++ main.cpp -o output `pkg-config --cflags --libs opencv4`
./output
#include
#include "opencv2/imgcodecs/legacy/constants_c.h"
#include "opencv2/imgproc/types_c.h"
#include
int main( int argc, char** argv ) {
cv::Mat image;
image = cv::imread("test.jpg" , CV_LOAD_IMAGE_COLOR);
if(! image.data ) {
std::cout << "Could not open or find the image" << std::endl ;
return -1;
}
std::cout << "image wide: "<< image.cols << ",image high: " << image.rows << ",image channels: "<< image.channels() << std::endl;
/* display image
cv::namedWindow( "Display window", cv::WINDOW_AUTOSIZE );
cv::imshow( "Display window", image );
cv::waitKey(0);
*/
size_t y,x;// y is row, x is col
int c; // c is channel
y = x = 250;
c = 2;
// row_ptr is the head point of y row
unsigned char *row_ptr = image.ptr<unsigned char>(y);
// data_ptr points to pixel data
unsigned char *data_ptr = &row_ptr[x * image.channels()];
unsigned char data = data_ptr[c];
// use cv::Mat::at() to get the pixel value
// unsigned char is not printable
// std::cout << std::isprint(data)<(y,x)[c]) << std::endl;
std::cout << "pixel value at y, x ,c"<<static_cast<unsigned>(image.at<cv::Vec3b>(y,x)[c]) << std::endl;
return 0;
}
sudo vi /etc/ld.so.conf.d/opencv.conf
在/etc/ld.so.conf.d/目录下sudo vim /etc/ld.so.conf.d/opencv.conf
写入两行:
/usr/local/lib
~/opencv_build/opencv/build/lib(这里指的是你安装的opencv路径下的lib)
保存退出,运行sudo ldconfig
,问题解决
再运行./output
能够输出正确信息说明opencv安装成功
创建c++文件,按照MNN官方文档推理过程编写程序,mnn模型下载地址
#include "Backend.hpp"
#include "Interpreter.hpp"
#include "MNNDefine.h"
#include "Interpreter.hpp"
#include "Tensor.hpp"
#include
#include
#include
#include
using namespace MNN;
using namespace cv;
int main(void)
{
// 填写自己的测试图像和mnn模型文件路径
std::string image_name = "test.jpg";
const char* model_name = "mnist.mnn";
// 一些任务调度中的配置参数
int forward = MNN_FORWARD_CPU;
// int forward = MNN_FORWARD_OPENCL;
int precision = 2;
int power = 0;
int memory = 0;
int threads = 1;
int INPUT_SIZE = 28;
cv::Mat raw_image = cv::imread(image_name.c_str());
//imshow("image", raw_image);
int raw_image_height = raw_image.rows;
int raw_image_width = raw_image.cols;
cv::Mat image;
cv::resize(raw_image, image, cv::Size(INPUT_SIZE, INPUT_SIZE));
// 1. 创建Interpreter, 通过磁盘文件创建: static Interpreter* createFromFile(const char* file);
std::shared_ptr<Interpreter> net(Interpreter::createFromFile(model_name));
MNN::ScheduleConfig config;
// 2. 调度配置,
// numThread决定并发数的多少,但具体线程数和并发效率,不完全取决于numThread
// 推理时,主选后端由type指定,默认为CPU。在主选后端不支持模型中的算子时,启用由backupType指定的备选后端。
config.numThread = threads;
config.type = static_cast<MNNForwardType>(forward);
MNN::BackendConfig backendConfig;
// 3. 后端配置
// memory、power、precision分别为内存、功耗和精度偏好
backendConfig.precision = (MNN::BackendConfig::PrecisionMode)precision;
backendConfig.power = (MNN::BackendConfig::PowerMode) power;
backendConfig.memory = (MNN::BackendConfig::MemoryMode) memory;
config.backendConfig = &backendConfig;
// 4. 创建session
auto session = net->createSession(config);
net->releaseModel();
clock_t start = clock();
// preprocessing
image.convertTo(image, CV_32FC3);
image = image / 255.0f;
// 5. 输入数据
// wrapping input tensor, convert nhwc to nchw
std::vector<int> dims{1, INPUT_SIZE, INPUT_SIZE, 3};
auto nhwc_Tensor = MNN::Tensor::create<float>(dims, NULL, MNN::Tensor::TENSORFLOW);
auto nhwc_data = nhwc_Tensor->host<float>();
auto nhwc_size = nhwc_Tensor->size();
::memcpy(nhwc_data, image.data, nhwc_size);
std::string input_tensor = "data";
// 获取输入tensor
// 拷贝数据, 通过这类拷贝数据的方式,用户只需要关注自己创建的tensor的数据布局,
// copyFromHostTensor会负责处理数据布局上的转换(如需)和后端间的数据拷贝(如需)。
auto inputTensor = net->getSessionInput(session, nullptr);
inputTensor->copyFromHostTensor(nhwc_Tensor);
// 6. 运行会话
net->runSession(session);
// 7. 获取输出
std::string output_tensor_name0 = "dense1_fwd";
// 获取输出tensor
MNN::Tensor *tensor_scores = net->getSessionOutput(session, output_tensor_name0.c_str());
MNN::Tensor tensor_scores_host(tensor_scores, tensor_scores->getDimensionType());
// 拷贝数据
tensor_scores->copyToHostTensor(&tensor_scores_host);
// post processing steps
auto scores_dataPtr = tensor_scores_host.host<float>();
// softmax
float exp_sum = 0.0f;
for (int i = 0; i < 10; ++i)
{
float val = scores_dataPtr[i];
exp_sum += val;
}
// get result idx
int idx = 0;
float max_prob = -10.0f;
for (int i = 0; i < 10; ++i)
{
float val = scores_dataPtr[i];
float prob = val / exp_sum;
if (prob > max_prob)
{
max_prob = prob;
idx = i;
}
}
printf("the result is %d\n", idx);
return 0;
}
编写CMakeLists.txt,注意要将里面MNN的地址换成自己编译的MNN地址
cmake_minimum_required(VERSION 3.10)
project(mnist)
set(CMAKE_CXX_STANDARD 11)
find_package(OpenCV REQUIRED)
set(MNN_DIR /home/chen/MNN)
include_directories(${MNN_DIR}/include)
include_directories(${MNN_DIR}/include/MNN)
include_directories(${MNN_DIR}/tools)
include_directories(${MNN_DIR}/tools/cpp)
include_directories(${MNN_DIR}/source)
include_directories(${MNN_DIR}/source/backend)
include_directories(${MNN_DIR}/source/core)
LINK_DIRECTORIES(${MNN_DIR}/build)
add_executable(mnist main.cpp)
target_link_libraries(mnist -lMNN ${OpenCV_LIBS})
编译
cmake .
make
编译完成后出现mnist可执行文件,输入./mnist
运行,可以看到已经可以成功预测出手写数字