以下介绍来自官网介绍
ncnn 是一个为手机端极致优化的高性能神经网络前向计算框架。ncnn 从设计之初深刻考虑手机端的部署和使用。无第三方依赖,跨平台,手机端 cpu 的速度快于目前所有已知的开源框架。基于 ncnn,开发者能够将深度学习算法轻松移植到手机端高效执行,开发出人工智能 APP,将 AI 带到你的指尖。ncnn 目前已在腾讯多款应用中使用,如 QQ,Qzone,微信,天天P图等。
功能概述
git clone https://github.com/Tencent/ncnn
cd ncnn
mkdir build
cd build
cmake ..
make -j
make install
这里我们测试Alexnet进行分类。模型下载地址:https://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel
由于alexnet是用老版caffe训练完成的,参数保存不一致,为了提升到现有的caffe版本,使用下面命令:
~/caffe/build/tools/upgrade_net_proto_text deploy.prototxt new_deplpy.prototxt
~/caffe/build/tools/upgrade_net_proto_binary bvlc_alexnet.caffemodel new_bvlc_alexnet.caffemodel
之后将caffe模型转换为ncnn的模型:
./caffe2ncnn ../../examples/alexnet/deploy.prototxt ../../examples/alexnet/bvlc_alexnet.caffemodel ../../examples/alexnet/alexnet.param alexnet.bin
编写测试代码如下:
#include
#include
#include
#include
#include
#include
#include
#include "net.h"
using namespace std;
void read_label(std::string label_path, vector<string> &labels)
{
ifstream infile;
infile.open(label_path.data());
assert(infile.is_open());
string line;
while(getline(infile, line)){
labels.push_back(line);
}
infile.close();
}
static int detect_squeezenet(const cv::Mat& bgr, std::vector<float>& cls_scores)
{
ncnn::Net squeezenet;
squeezenet.load_param("alexnet.param");
squeezenet.load_model("alexnet.bin");
ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows, 227, 227);
const float mean_vals[3] = {104.f, 117.f, 123.f};
in.substract_mean_normalize(mean_vals, 0);
ncnn::Extractor ex = squeezenet.create_extractor();
ex.set_light_mode(true);
ex.input("data", in);
ncnn::Mat out;
ex.extract("prob", out);
cls_scores.resize(out.c);
for (int j=0; jconst float* prob = out.data + out.cstep * j;
cls_scores[j] = prob[0];
}
return 0;
}
static int print_topk(const std::vector<float>& cls_scores, int topk, vector<string> labels)
{
// partial sort topk with index
int size = cls_scores.size();
std::vector< std::pair<float, int> > vec;
vec.resize(size);
for (int i=0; istd::make_pair(cls_scores[i], i);
}
std::partial_sort(vec.begin(), vec.begin() + topk, vec.end(),
std::greater< std::pair<float, int> >());
// print topk and score
for (int i=0; ifloat score = vec[i].first;
int index = vec[i].second;
fprintf(stderr, "%d = %f\n", index, score);
cout << labels[index] << endl;
}
return 0;
}
int main(int argc, char** argv)
{
const char* imagepath = argv[1];
cv::Mat m = cv::imread(imagepath, CV_LOAD_IMAGE_COLOR);
if (m.empty())
{
fprintf(stderr, "cv::imread %s failed\n", imagepath);
return -1;
}
vector<string> labels;
read_label("./label.txt", labels);
std::vector<float> cls_scores;
detect_squeezenet(m, cls_scores);
print_topk(cls_scores, 3, labels);
return 0;
}
为了编译上述代码,我们使用cmake设置编译环境
cmake_minimum_required(VERSION 3.5)
find_package(OpenCV REQUIRED core highgui imgproc)
#include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src)
#include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src)
include_directories(/home/young/deeplearning/ncnn/build/install/include)
link_directories(/home/young/deeplearning/ncnn/build/install/lib)
FIND_PACKAGE( OpenMP REQUIRED)
if(OPENMP_FOUND)
message("OPENMP FOUND")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
endif()
add_executable(classify classify.cpp)
target_link_libraries(classify ncnn ${OpenCV_LIBS})
编译完成,运行如下命令:
./classify ./test.jpg
结果如下:
260 = 0.354059
n02112137 狗
273 = 0.231026
n02115641 狼
270 = 0.065178
n02114548 狼
学习源码,进行优化