2020年7月18日,OpenCV官网发布了OpenCV的最新版本OpenCV4.4.0,令我比较兴奋的是,其中支持了YOLOv4,之前的一段时间,我都在YOLO系列苦苦挣扎,虽然YOLOv4的性能很好,准确率也高,但当时opencv不支持,就导致在QT做界面时,读取不了yolov4的权重,无法进行目标检测,后来无奈只能选择了yolov3。
虽然用pytorch-yolov4也能得到很好的效果,但那样训练出来的权重文件后缀是pth,这么做,只能选择PyQT去做界面,考虑到Python的运行速度远不及C++,且到最后这个项目要进行实地的检测等等,还是放弃了。
当时我就觉得,pytorch-yolov4训练自己的模型,也只能在自己电脑上玩玩了,不能将项目落地实现。
但OpenCV4.4支持YOLOv4,它来了。。。(迟到了一个多月)
1.OpenCV4.4的安装包、YOLOv4相关配置文件:
链接:https://pan.baidu.com/s/1Xg-Er1mJNCvSQyMt3B2HBw
提取码:p2ye
2.关于Visual Studio配置OpenCV,参考如下博客:
https://blog.csdn.net/shuiyixin/article/details/105998661
PS:我电脑上装的是VS2019,在Path系统环境变量中还是设置的vc15文件夹
源程序:
#pragma once
#include
#include
#include
#include
using namespace std;
using namespace cv;
using namespace cv::dnn;
int main()
{
//---------------------------------------加载类别---------------------------------------
ifstream classNamesFile("./model/coco.names");
vector<string> classNamesVec;
if (classNamesFile.is_open())
{
string className = "";
while (std::getline(classNamesFile, className))
classNamesVec.push_back(className);
}
for (int i = 0; i < classNamesVec.size(); i++) {
cout << i + 1 << "\t" << classNamesVec[i].c_str() << endl;
}
//---------------------------------------模型设置---------------------------------------
String cfg = "./model/yolov4.cfg";
String weight = "./model/yolov4.weights";
//模型读入
dnn::Net net = readNetFromDarknet(cfg, weight);
//预处理读取的图像,并将图像读入网络
Mat frame = imread("./image/timg.jpg");
imshow("src", frame);
Mat inputBlob = blobFromImage(frame, 1.0 / 255, Size(608, 608), Scalar());
net.setInput(inputBlob);
//获取未连接输出层
std::vector<String> outNames = net.getUnconnectedOutLayersNames();
for (int i = 0; i < outNames.size(); i++) {
cout << "output layer name : " << outNames[i].c_str() << endl;
}
std::vector<Mat> outs;
net.forward(outs, outNames);
//---------------------------------------目标检测---------------------------------------
//需要的变量
float* data;
Mat scores;
vector<Rect> boxes;
vector<int> classIds;
vector<float> confidences;
int centerX, centerY, width, height, left, top;
float confidenceThreshold = 0.2; // 置信度设置
double confidence;
Point classIdPoint;
//找出所有的目标及其位置
for (size_t i = 0; i < outs.size(); ++i) {
data = (float*)outs[i].data;
for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols) {
scores = outs[i].row(j).colRange(5, outs[i].cols);
minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
if (confidence > confidenceThreshold) {
centerX = (int)(data[0] * frame.cols);
centerY = (int)(data[1] * frame.rows);
width = (int)(data[2] * frame.cols);
height = (int)(data[3] * frame.rows);
left = centerX - width / 2;
top = centerY - height / 2;
classIds.push_back(classIdPoint.x);
confidences.push_back((float)confidence);
boxes.push_back(Rect(left, top, width, height));
}
}
}
vector<int> indices;
NMSBoxes(boxes, confidences, 0.3, 0.2, indices);
//---------------------------------------效果展示---------------------------------------
Scalar rectColor, textColor; //box 和 text 的颜色
Rect box, textBox;
int idx; //类别索引
String className;
Size labelSize;
for (size_t i = 0; i < indices.size(); ++i) {
idx = indices[i];
className = classNamesVec[classIds[idx]];
labelSize = getTextSize(className, FONT_HERSHEY_SIMPLEX, 0.5, 1, 0);
box = boxes[idx];
textBox = Rect(Point(box.x - 1, box.y),
Point(box.x + labelSize.width, box.y - labelSize.height));
rectColor = Scalar(idx * 11 % 256, idx * 22 % 256, idx * 33 % 256);
textColor = Scalar(255 - idx * 11 % 256, 255 - idx * 22 % 256, 255 - idx * 33 % 256);
rectangle(frame, box, rectColor, 2, 8, 0);
rectangle(frame, textBox, rectColor, -1, 8, 0);
putText(frame, className.c_str(), Point(box.x, box.y - 2), FONT_HERSHEY_SIMPLEX, 0.5, textColor, 1, 8);
}
imshow("dst", frame);
waitKey(0);
return 0;
}
1.首先用Darknet-YOLOv4训练自己的权重文件,具体步骤参考我的这篇博客:
Windows10系统下YOLOv4—Darknet训练过程
https://blog.csdn.net/qq_45445740/article/details/108253155
2.训练好了之后会在darknet-master\build\darknet\x64\backup文件下生成你的权重文件,这里选择yolo-obj_4000.weights权重文件。
还有选择darknet-master\build\darknet\x64文件下的yolo-obj.cfg文件。
3.修改上面程序中的几个地方,换成你需要检测的目标相关的权重配置文件。