树莓派4:基于NCNN的火焰检测

在前面两篇关于火焰检测的文章中,最终的效果不是很好,为了提高火焰检测的效果,又搜集了一些火焰数据,训练的网络由之前的yolov3-tiny改为mobilev2-yolov3,最终在树莓派上利用NCNN推算框架,比之前的效果要好很多,如图:
树莓派4:基于NCNN的火焰检测_第1张图片树莓派4:基于NCNN的火焰检测_第2张图片
下面把实现的步骤和大家分享下:1、火焰数据集链接:

https://pan.baidu.com/s/1VypCAODfMvEexU-kDooSgw 
提取码:feo2 
链接:https://pan.baidu.com/s/1e32KPWI71PeFadUPmeEvHQ
提取码:ihsj

2、在darknet下训练训练的cfg和model文件如果需要联系我。
3、在树莓派上部署NCNN官方提供了在树莓派上的编译说明,按照这个说明是可以编译起来的,安装依赖:

sudo apt-get install git cmake
sudo apt-get install -y gfortran
sudo apt-get install -y libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compiler
sudo apt-get install --no-install-recommends libboost-all-dev
sudo apt-get install -y libgflags-dev libgoogle-glog-dev liblmdb-dev libatlas-base-dev

然后下载NCNN:

git clone https://github.com/Tencent/ncnn.git
cd ncnn

编辑CMakeList.txt文件,添加examples和benchmark:

add_subdirectory(examples)
add_subdirectory(benchmark)
add_subdirectory(tools)

然后就可以按照官方文档进行编译了,官方提供的pi3 toolchain在4代Raspbian上可以直接使用,最新版的NCNN会自动使用OpenMP:

cd <ncnn-root-dir>
mkdir -p build
cd build
cmake -DCMAKE_TOOLCHAIN_FILE=../toolchains/pi3.toolchain.cmake -DPI3=ON ..
make -j4

4、模型转换

cd <ncnn-root-dir>
cd build
cd tools/darknet
./darknet2ncnn mobilenetV2-yolov3.cfg mobilenetV2-yolov3.weights mobilenetV2-yolov3.param mobilenetV2-yolov3.bin 1

5、运行

cd <ncnn-root-dir>
cd build
cd example
./mobilenetV2-yolov3

6、部分代码

#include "net.h"
#include "platform.h"
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#if NCNN_VULKAN
#include "gpu.h"
#endif // NCNN_VULKAN

#define MobileNetV2-yolov3_TINY 1 //0 or undef for MobileNetV2-yolov3

struct Object
{
    cv::Rect_<float> rect;
    int label;
    float prob;
};

double what_time_is_it_now()
{
    struct timeval time;
    if (gettimeofday(&time,NULL)){
        return 0;
    }
    return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
ncnn::Net MobileNetV2-yolov3;
static int detect_MobileNetV2-yolov3(const cv::Mat& bgr, std::vector<Object>& objects)
{
    double time;
#if NCNN_VULKAN
    MobileNetV2-yolov3.opt.use_vulkan_compute = true;
#endif // NCNN_VULKAN
    const int target_size = 320;
    time = what_time_is_it_now();
    int img_w = bgr.cols;
    int img_h = bgr.rows;
    //PIXEL_BGR
    ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB, bgr.cols, bgr.rows, target_size, target_size);
    const float mean_vals[3] = {0, 0, 0};
    const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
    in.substract_mean_normalize(mean_vals, norm_vals);
    ncnn::Extractor ex = MobileNetV2-yolov3.create_extractor();
    ex.set_num_threads(4);
    ex.input("data", in);
    ncnn::Mat out;
    ex.extract("output", out);
    printf("Predicted in %f seconds.11\n", what_time_is_it_now()-time);
    printf("%d %d %d\n", out.w, out.h, out.c);
    objects.clear();
    for (int i = 0; i < out.h; i++)
    {
        const float* values = out.row(i);
        Object object;
        object.label = values[0];
        object.prob = values[1];
        object.rect.x = values[2] * img_w;
        object.rect.y = values[3] * img_h;
        object.rect.width = values[4] * img_w - object.rect.x;
        object.rect.height = values[5] * img_h - object.rect.y;
        objects.push_back(object);
    }
    return 0;
}

void draw_objects(cv::Mat& image, const std::vector<Object>& objects)
{
    static const char* class_names[] = {"background", "fire"};

    //cv::Mat image = bgr.clone();

    for (size_t i = 0; i < objects.size(); i++)
    {
        const Object& obj = objects[i];

        fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
                obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
        cv::rectangle(image, obj.rect, cv::Scalar(255, 0, 0));
        char text[256];
        sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100);
        int baseLine = 0;
        cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);

        int x = obj.rect.x;
        int y = obj.rect.y - label_size.height - baseLine;
        if (y < 0)
            y = 0;
        if (x + label_size.width > image.cols)
            x = image.cols - label_size.width;

        cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
                      cv::Scalar(255, 255, 255), -1);
        cv::putText(image, text, cv::Point(x, y + label_size.height),
                    cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
    }
}
int main(int argc, char** argv)
{
    MobileNetV2-yolov3.load_param("MobileNetV2-YOLOv3-Lite.param");
    MobileNetV2-yolov3.load_model("MobileNetV2-YOLOv3-Lite.bin");
    cv::VideoCapture cap(0);
    if(!cap.isOpened()){
        printf("capture err");
        return -1;
    }
    cv::Mat cv_img;
    std::vector<Object> objects;
    while(true){
        if(!cap.read(cv_img)){
            printf("cv_img err");
            return -1;
        }
        detect_MobileNetV2-yolov3(cv_img, objects);
        draw_objects(cv_img, objects);
        cv::imshow("video", cv_img);
        cv::waitKey(1);
    }
    cap.release();
    return 0;
}

7、结果分析目前测试效果还比较满意,但是每帧处理的时间需要0.3s左右,还不能实时,接下来的目标是达到实时检测,并尝试别推理框架,比如MNN和TNN。
树莓派4:基于NCNN的火焰检测_第3张图片

你可能感兴趣的:(目标识别,yolov3,ncnn)