记录一下入门小白的树莓派部署记录,前前后后走过不少坑。
git clone https://github.com/ppogg/YOLOv5-Lite.git
默认你在windows10上已经会配置环境,由于我本身已经配置好torch=1.7,torchvision=0.8,对应的cuda版本为11.0,还有对应的cudnn版本以及显卡驱动,我就在此环境下进行训练。并使用requirements.txt里的依赖(不需要重新配置相应的cuda以及cudnn)进行.onnx的导出
pip install -r requirements.txt
这一部分是记录转换xml格式为txt文件以及yolov5lites训练自己的模型等所需要的准备工作。
为自己的训练种类以及个数
将下载的v5lite-s.pt文件放在创建的weights/文件夹下。
点击train.py,训练保存的权重在runs文件夹下
使用autoanchor.py,将生成的数据保存到v5lites.yaml
自己手动添加到对应的是C:\Users\jxbj2\Desktop\yolov5lite\YOLOv5-Lite-master\models\v5lite-s.yaml
文件夹准备如下,images放入图片,indata放入对应的xml格式文件,生成的txt文件会在labels文件夹下。
转换代码如下,如需调动测试以及验证数据集自己手动调yolov5lites的代码,换上自己对应修改的种类即可。
import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir , getcwd
from os.path import join
import glob
classes = ["desk", "projector","cup","laptop","trash","box","mecanum"]
def convert(size, box):
dw = 1.0/size[0]
dh = 1.0/size[1]
x = (box[0]+box[1])/2.0
y = (box[2]+box[3])/2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def convert_annotation(image_name):
in_file = open('./indata/'+image_name[:-3]+'xml') #xml文件路径
out_file = open('./labels/'+image_name[:-3]+'txt', 'w') #转换后的txt文件存放路径
f = open('./indata/'+image_name[:-3]+'xml')
xml_text = f.read()
root = ET.fromstring(xml_text)
f.close()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
cls = obj.find('name').text
if cls not in classes:
print(cls)
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
float(xmlbox.find('ymax').text))
bb = convert((w,h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
wd = getcwd()
if __name__ == '__main__':
#for image_path in glob.glob("./images/train/*.jpg"): #每一张图片都对应一个xml文件这里写xml对应的图片的路径
for image_path in glob.glob("./images/*.jpg"): #每一张图片都对应一个xml文件这里写xml对应的图片的路径
image_name = image_path.split('\\')[-1]
convert_annotation(image_name)
这部分的导出我是在windows上完成的,我尝试了一下在树莓派上去导出,但一直报导出错误。
python export.py --weights weights/last.pt
会在weights/文件夹下生成对应的last.onnx文件
使用onnx-simplifier对转换后的onnx进行简化,将last.onnx文件放到yolov5lite-master文件下输入在终端输入以下指令
python -m onnxsim last.onnx lastsim.onnx
将简化后的lastsim.onnx放入u盘,导入到树莓派中
sudo apt-get install git cmake
sudo apt-get install -y gfortran
sudo apt-get install -y libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compiler
sudo apt-get install --no-install-recommends libboost-all-dev
sudo apt-get install -y libgflags-dev libgoogle-glog-dev liblmdb-dev libatlas-base-dev
$ git clone https://gitee.com/Tencent/ncnn.git
cd ncnn
mkdir build
cd build
cmake ..
make -j4
make install
对应路径自己修改(应当具备基础的命令行使用能力哈哈哈哈哈)
cd ncnn/build
./tools/onnx/onnx2ncnn lastsim.onnx lastsim.param lastsim.bin
./tools/ncnnoptimize lastsim.param lastsim.bin last.param last.bin 65536
具体代码如下所示
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "layer.h"
#include "net.h"
#if defined(USE_NCNN_SIMPLEOCV)
#include "simpleocv.h"
#else
#include
#include
#include
#endif
#include
#include
#include
#include
// 0 : FP16
// 1 : INT8
#define USE_INT8 0
// 0 : Image
// 1 : Camera
#define USE_CAMERA 1
struct Object
{
cv::Rect_<float> rect;
int label;
float prob;
};
static inline float intersection_area(const Object& a, const Object& b)
{
cv::Rect_<float> inter = a.rect & b.rect;
return inter.area();
}
static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right)
{
int i = left;
int j = right;
float p = faceobjects[(left + right) / 2].prob;
while (i <= j)
{
while (faceobjects[i].prob > p)
i++;
while (faceobjects[j].prob < p)
j--;
if (i <= j)
{
// swap
std::swap(faceobjects[i], faceobjects[j]);
i++;
j--;
}
}
#pragma omp parallel sections
{
#pragma omp section
{
if (left < j) qsort_descent_inplace(faceobjects, left, j);
}
#pragma omp section
{
if (i < right) qsort_descent_inplace(faceobjects, i, right);
}
}
}
static void qsort_descent_inplace(std::vector<Object>& faceobjects)
{
if (faceobjects.empty())
return;
qsort_descent_inplace(faceobjects, 0, faceobjects.size() - 1);
}
static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold)
{
picked.clear();
const int n = faceobjects.size();
std::vector<float> areas(n);
for (int i = 0; i < n; i++)
{
areas[i] = faceobjects[i].rect.area();
}
for (int i = 0; i < n; i++)
{
const Object& a = faceobjects[i];
int keep = 1;
for (int j = 0; j < (int)picked.size(); j++)
{
const Object& b = faceobjects[picked[j]];
// intersection over union
float inter_area = intersection_area(a, b);
float union_area = areas[i] + areas[picked[j]] - inter_area;
// float IoU = inter_area / union_area
if (inter_area / union_area > nms_threshold)
keep = 0;
}
if (keep)
picked.push_back(i);
}
}
static inline float sigmoid(float x)
{
return static_cast<float>(1.f / (1.f + exp(-x)));
}
// unsigmoid
static inline float unsigmoid(float y) {
return static_cast<float>(-1.0 * (log((1.0 / y) - 1.0)));
}
static void generate_proposals(const ncnn::Mat &anchors, int stride, const ncnn::Mat &in_pad,
const ncnn::Mat &feat_blob, float prob_threshold,
std::vector <Object> &objects) {
const int num_grid = feat_blob.h;
float unsig_pro = 0;
if (prob_threshold > 0.6)
unsig_pro = unsigmoid(prob_threshold);
int num_grid_x;
int num_grid_y;
if (in_pad.w > in_pad.h) {
num_grid_x = in_pad.w / stride;
num_grid_y = num_grid / num_grid_x;
} else {
num_grid_y = in_pad.h / stride;
num_grid_x = num_grid / num_grid_y;
}
const int num_class = feat_blob.w - 5;
const int num_anchors = anchors.w / 2;
for (int q = 0; q < num_anchors; q++) {
const float anchor_w = anchors[q * 2];
const float anchor_h = anchors[q * 2 + 1];
const ncnn::Mat feat = feat_blob.channel(q);
for (int i = 0; i < num_grid_y; i++) {
for (int j = 0; j < num_grid_x; j++) {
const float *featptr = feat.row(i * num_grid_x + j);
// find class index with max class score
int class_index = 0;
float class_score = -FLT_MAX;
float box_score = featptr[4];
if (prob_threshold > 0.6) {
// while prob_threshold > 0.6, unsigmoid better than sigmoid
if (box_score > unsig_pro) {
for (int k = 0; k < num_class; k++) {
float score = featptr[5 + k];
if (score > class_score) {
class_index = k;
class_score = score;
}
}
float confidence = sigmoid(box_score) * sigmoid(class_score);
if (confidence >= prob_threshold) {
float dx = sigmoid(featptr[0]);
float dy = sigmoid(featptr[1]);
float dw = sigmoid(featptr[2]);
float dh = sigmoid(featptr[3]);
float pb_cx = (dx * 2.f - 0.5f + j) * stride;
float pb_cy = (dy * 2.f - 0.5f + i) * stride;
float pb_w = pow(dw * 2.f, 2) * anchor_w;
float pb_h = pow(dh * 2.f, 2) * anchor_h;
float x0 = pb_cx - pb_w * 0.5f;
float y0 = pb_cy - pb_h * 0.5f;
float x1 = pb_cx + pb_w * 0.5f;
float y1 = pb_cy + pb_h * 0.5f;
Object obj;
obj.rect.x = x0;
obj.rect.y = y0;
obj.rect.width = x1 - x0;
obj.rect.height = y1 - y0;
obj.label = class_index;
obj.prob = confidence;
objects.push_back(obj);
}
} else {
for (int k = 0; k < num_class; k++) {
float score = featptr[5 + k];
if (score > class_score) {
class_index = k;
class_score = score;
}
}
float confidence = sigmoid(box_score) * sigmoid(class_score);
if (confidence >= prob_threshold) {
float dx = sigmoid(featptr[0]);
float dy = sigmoid(featptr[1]);
float dw = sigmoid(featptr[2]);
float dh = sigmoid(featptr[3]);
float pb_cx = (dx * 2.f - 0.5f + j) * stride;
float pb_cy = (dy * 2.f - 0.5f + i) * stride;
float pb_w = pow(dw * 2.f, 2) * anchor_w;
float pb_h = pow(dh * 2.f, 2) * anchor_h;
float x0 = pb_cx - pb_w * 0.5f;
float y0 = pb_cy - pb_h * 0.5f;
float x1 = pb_cx + pb_w * 0.5f;
float y1 = pb_cy + pb_h * 0.5f;
Object obj;
obj.rect.x = x0;
obj.rect.y = y0;
obj.rect.width = x1 - x0;
obj.rect.height = y1 - y0;
obj.label = class_index;
obj.prob = confidence;
objects.push_back(obj);
}
}
}
}
}
}
}
static int detect_yolov5(const cv::Mat& bgr, std::vector<Object>& objects)
{
ncnn::Net yolov5;
#if USE_INT8
yolov5.opt.use_int8_inference=true;
#else
yolov5.opt.use_vulkan_compute = true;
yolov5.opt.use_bf16_storage = true;
#endif
// original pretrained model from https://github.com/ultralytics/yolov5
// the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
#if USE_INT8
yolov5.load_param("/home/corvin/Mask/weights/e.param");
yolov5.load_model("/home/corvin/Mask/weights/e.bin");
#else
yolov5.load_param("/home/corvin/Mask/weights/eopt.param");
yolov5.load_model("/home/corvin/Mask/weights/eopt.bin");
#endif
const int target_size = 320;
const float prob_threshold = 0.60f;
const float nms_threshold = 0.60f;
int img_w = bgr.cols;
int img_h = bgr.rows;
// letterbox pad to multiple of 32
int w = img_w;
int h = img_h;
float scale = 1.f;
if (w > h)
{
scale = (float)target_size / w;
w = target_size;
h = h * scale;
}
else
{
scale = (float)target_size / h;
h = target_size;
w = w * scale;
}
ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB, img_w, img_h, w, h);
// pad to target_size rectangle
// yolov5/utils/datasets.py letterbox
int wpad = (w + 31) / 32 * 32 - w;
int hpad = (h + 31) / 32 * 32 - h;
ncnn::Mat in_pad;
ncnn::copy_make_border(in, in_pad, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, ncnn::BORDER_CONSTANT, 114.f);
const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
in_pad.substract_mean_normalize(0, norm_vals);
ncnn::Extractor ex = yolov5.create_extractor();
ex.input("images", in_pad);
std::vector<Object> proposals;
// stride 8
{
ncnn::Mat out;
ex.extract("451", out);
ncnn::Mat anchors(6);
anchors[0] = 10.f;
anchors[1] = 13.f;
anchors[2] = 16.f;
anchors[3] = 30.f;
anchors[4] = 33.f;
anchors[5] = 23.f;
std::vector<Object> objects8;
generate_proposals(anchors, 8, in_pad, out, prob_threshold, objects8);
proposals.insert(proposals.end(), objects8.begin(), objects8.end());
}
// stride 16
{
ncnn::Mat out;
ex.extract("479", out);
ncnn::Mat anchors(6);
anchors[0] = 30.f;
anchors[1] = 61.f;
anchors[2] = 62.f;
anchors[3] = 45.f;
anchors[4] = 59.f;
anchors[5] = 119.f;
std::vector<Object> objects16;
generate_proposals(anchors, 16, in_pad, out, prob_threshold, objects16);
proposals.insert(proposals.end(), objects16.begin(), objects16.end());
}
// stride 32
{
ncnn::Mat out;
ex.extract("507", out);
ncnn::Mat anchors(6);
anchors[0] = 116.f;
anchors[1] = 90.f;
anchors[2] = 156.f;
anchors[3] = 198.f;
anchors[4] = 373.f;
anchors[5] = 326.f;
std::vector<Object> objects32;
generate_proposals(anchors, 32, in_pad, out, prob_threshold, objects32);
proposals.insert(proposals.end(), objects32.begin(), objects32.end());
}
// sort all proposals by score from highest to lowest
qsort_descent_inplace(proposals);
// apply nms with nms_threshold
std::vector<int> picked;
nms_sorted_bboxes(proposals, picked, nms_threshold);
int count = picked.size();
objects.resize(count);
for (int i = 0; i < count; i++)
{
objects[i] = proposals[picked[i]];
// adjust offset to original unpadded
float x0 = (objects[i].rect.x - (wpad / 2)) / scale;
float y0 = (objects[i].rect.y - (hpad / 2)) / scale;
float x1 = (objects[i].rect.x + objects[i].rect.width - (wpad / 2)) / scale;
float y1 = (objects[i].rect.y + objects[i].rect.height - (hpad / 2)) / scale;
// clip
x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f);
y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f);
x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f);
y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f);
objects[i].rect.x = x0;
objects[i].rect.y = y0;
objects[i].rect.width = x1 - x0;
objects[i].rect.height = y1 - y0;
}
return 0;
}
static void draw_objects(const cv::Mat& bgr, const std::vector<Object>& objects)
{
static const char* class_names[] = {
"face","face_mask"
};
cv::Mat image = bgr.clone();
for (size_t i = 0; i < objects.size(); i++)
{
const Object& obj = objects[i];
fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
cv::rectangle(image, obj.rect, cv::Scalar(0, 255, 0));
char text[256];
sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100);
int baseLine = 0;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
int x = obj.rect.x;
int y = obj.rect.y - label_size.height - baseLine;
if (y < 0)
y = 0;
if (x + label_size.width > image.cols)
x = image.cols - label_size.width;
cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
cv::Scalar(255, 255, 255), -1);
cv::putText(image, text, cv::Point(x, y + label_size.height),
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
}
#if USE_CAMERA
imshow("camera", image);
cv::waitKey(1);
#else
cv::imwrite("result.jpg", image);
#endif
}
#if USE_CAMERA
int main(int argc, char** argv)
{
cv::VideoCapture capture;
capture.open(0); //修改这个参数可以选择打开想要用的摄像头
cv::Mat frame;
while (true)
{
capture >> frame;
cv::Mat m = frame;
std::vector<Object> objects;
detect_yolov5(frame, objects);
draw_objects(m, objects);
if (cv::waitKey(30) >= 0)
break;
}
}
#else
int main(int argc, char** argv)
{
if (argc != 2)
{
fprintf(stderr, "Usage: %s [imagepath]\n", argv[0]);
return -1;
}
const char* imagepath = argv[1];
struct timespec begin, end;
long time;
clock_gettime(CLOCK_MONOTONIC, &begin);
cv::Mat m = cv::imread(imagepath, 1);
if (m.empty())
{
fprintf(stderr, "cv::imread %s failed\n", imagepath);
return -1;
}
std::vector<Object> objects;
detect_yolov5(m, objects);
clock_gettime(CLOCK_MONOTONIC, &end);
time = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec);
printf(">> Time : %lf ms\n", (double)time/1000000);
draw_objects(m, objects);
return 0;
}
#endif
修改成你所需要的检测种类,“desk”, “bicycle”, “cup”, “laptop”, “trash”, “box”, “mecanum”
对应的是C:\Users\jxbj2\Desktop\yolov5lite\YOLOv5-Lite-master\models\v5lite-s.yaml
将上面这些anchor的数据(15,28,19,35,23,46)放入yolov5lite.cpp中的以下代码中,第一行的六个数据对应10,13,16,30,33,23)
对应的还有两个对应的地方也做出同样修改即可。
将Reshape 0=x全部设置为0=-1,如画圈所示
打开lastsim.param文件,对应上图三个方框里的 onnx::Sigmoid_647改写到ex.extract里面。
修改好yolov5.cpp中lastsim.param和lastsim.bin的路径,并放到测试的文件夹内(路径)。
进入到ncnn/examples/CMakelist.txt,如下图所示
输入指令
cd ncnn/build
cmake ..
make
完成编译。
打开测试的文件夹,将编译好的yolov5.cpp可执行文件放到测试文件夹下,(在yolov5lite.cpp文件内选择摄像头还是图片,如果有图片记得放在测试文件夹下)。
cd pi/ceshi
./yolov5_lite.cpp
说实话,我用的树莓派4B,yolov5lites感觉效果都不是很好,接下来我要继续试一下int8的量化。