使用YOLOv3模型训练一个字符串识别模型,例如车牌、数码管、刻度值等,端到端的无字符分割的快速实现数字串的识别。
使用c++调用模型直接输出字符串数值:
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
using namespace std;
using namespace cv;
using namespace dnn;
vector<string> classes;
vector<int> indices;
int indices_length;
//int *segnum1 = new int[indices_length]; //动态创建一个长度为roi_Final.size()即分割的字符长度的数组,用于存放识别的数字
//char *segnum2 = new char[indices_length]; //创建字符数组用于把segnum1整型转为字符型方便后面插入小数点字符
int segnum1[4];
char segnum2[4];
//对识别的数字排序
struct node
{
int value;
int index;
};
bool cmp(struct node a, struct node b)
{
if (a.value < b.value)
{
return true;
}
return false;
}
template <typename T>
T sort_indexes(vector<size_t> &idx, vector<T> &v)
{
node* a = new node[v.size()];
for (int i = 0; i < v.size(); i++)
{
a[i].value = v[i];
a[i].index = i;
}
std::sort(a, a + v.size(), cmp);
for (int i = 0; i < v.size(); i++)
{
idx.push_back(a[i].index);
}
delete[] a;
return 0;
}
//原文链接:https ://blog.csdn.net/u013925378/article/details/83865707
vector<String> getOutputsNames(Net&net)
{
static vector<String> names;
if (names.empty())
{
//Get the indices of the output layers, i.e. the layers with unconnected outputs
vector<int> outLayers = net.getUnconnectedOutLayers();
//get the names of all the layers in the network
vector<String> layersNames = net.getLayerNames();
// Get the names of the output layers in names
names.resize(outLayers.size());
for (size_t i = 0; i < outLayers.size(); ++i)
names[i] = layersNames[outLayers[i] - 1];
}
return names;
}
void drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat& frame)
{
//Draw a rectangle displaying the bounding box
rectangle(frame, Point(left, top), Point(right, bottom), Scalar(0, 0, 255), 3);//矩形框大小及颜色
//Get the label for the class name and its confidence
string label = format("%.3f", conf); //预测值保留小数点后两位
if (!classes.empty())
{
CV_Assert(classId < (int)classes.size());
label = classes[classId] + ":" + label;
}
//Display the label at the top of the bounding box
int baseLine;
Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0, 1, &baseLine); //0表示预测框上面的文本条大小,0表示无
top = max(top, labelSize.height);
rectangle(frame, Point(left, top - round(0.5*labelSize.height)), Point(left + round(0.5*labelSize.width), top + baseLine), Scalar(255, 255, 255), FILLED);
putText(frame, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 1, Scalar(255, 0, 0), 3); //0.4表示预测字体的大小,1表示字体的粗细
//putText(frame, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 0.4, Scalar(255, 0, 0), 1.4); //0.4表示预测字体的大小,1表示字体的粗细
}
void postprocess(Mat& frame, const vector<Mat>& outs, float confThreshold, float nmsThreshold)
{
vector<int> classIds;
vector<float> confidences;
vector<Rect> boxes;
for (size_t i = 0; i < outs.size(); ++i)
{
// Scan through all the bounding boxes output from the network and keep only the
// ones with high confidence scores. Assign the box's class label as the class
// with the highest score for the box.
float* data = (float*)outs[i].data;
for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols)
{
Mat scores = outs[i].row(j).colRange(5, outs[i].cols);
Point classIdPoint;
double confidence;
// Get the value and location of the maximum score
minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
if (confidence > confThreshold)
{
int centerX = (int)(data[0] * frame.cols);
int centerY = (int)(data[1] * frame.rows);
int width = (int)(data[2] * frame.cols);
int height = (int)(data[3] * frame.rows);
int left = centerX - width / 2;
int top = centerY - height / 2;
classIds.push_back(classIdPoint.x);
confidences.push_back((float)confidence);
boxes.push_back(Rect(left, top, width, height));
}
}
}
// Perform non maximum suppression to eliminate redundant overlapping boxes with
// lower confidences
NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);
vector<int>NumClassid;
vector<int>Num_X;
indices_length = indices.size();
for (size_t i = 0; i < indices.size(); ++i)
{
int idx = indices[i];
Rect box = boxes[idx];
drawPred(classIds[idx], confidences[idx], box.x, box.y,
box.x + box.width, box.y + box.height, frame);
Num_X.push_back(box.x);
NumClassid.push_back(classIds[idx]);
}
vector<size_t> idx;
sort_indexes(idx, Num_X);
for (int i = 0; i < NumClassid.size(); i++)
{
//cout << NumClassid[idx[i]] << endl;
segnum1[i] = NumClassid[idx[i]]; //识别的数字依次存入一维数组中
}
}
Mat numReco(string imageFile)
{
string names_file = "D:\\PointerImg\\darknet-master-num\\data\\voc.names";
String model_def = "D:\\PointerImg\\darknet-master-num\\cfg\\yolov3-voc.cfg";
String weights = "D:\\PointerImg\\darknet-master-num\\backup\\yolov3-voc_last.weights";
int in_w, in_h;
double thresh = 0.7;
double nms_thresh = 0.25;
in_w = in_h = 320;
//read names
ifstream ifs(names_file.c_str());
string line;
while (getline(ifs, line)) classes.push_back(line);
//init model
Net net = readNetFromDarknet(model_def, weights);
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(DNN_TARGET_CPU);
//read image and forward
VideoCapture capture(2);// VideoCapture:OENCV中新增的类,捕获视频并显示出来
Mat blob, frame;
capture >> frame;
frame = imread(imageFile);
if (!frame.data)
{
cout << "输入图片不存在!" << endl;
system("pause");
exit(0);
}
blobFromImage(frame, blob, 1 / 255.0, Size(in_w, in_h), Scalar(), true, false);
vector<Mat> mat_blob;
imagesFromBlob(blob, mat_blob);
//Sets the input to the network
net.setInput(blob);
// Runs the forward pass to get output of the output layers
vector<Mat> outs;
net.forward(outs, getOutputsNames(net));
postprocess(frame, outs, thresh, nms_thresh);
vector<double> layersTimes;
double freq = getTickFrequency() / 1000;
double t = net.getPerfProfile(layersTimes) / freq;
//string label = format("Inference time for a frame : %.2f ms", t);
string label = format("", t);
putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
//printf("Inference time for a frame : %.2f ms", t);
imshow("num location", frame);
//imwrite("../../data/predicts/meterLocal.jpg", frame);
return frame;
}
double ScalesNumReco()
{
//int型转char
for (int i = 0; i < indices_length; i++)
{
segnum2[i] = char(segnum1[i] + 48);
}
char b = char(46);
if (segnum2[0] == '0' && indices_length > 1) //识别的数字字符的第一个字符为0,且字符长度大于1,则读数为x.xxx
{
for (int i = indices_length - 1; i != 0; i--) //从后往前移动,防止覆盖
{
segnum2[i + 1] = segnum2[i];
}
segnum2[1] = b;
}
double number = atof(segnum2); //把字符数组转化为转换成字符串s
//meterNumReco.push_back(number);
//delete segnum1, segnum2; //释放动态数组
//indices.clear();
return number;
//string s(segnum2);// 将字符数组numStr
}
int main()
{
clock_t startTime, endTime;
startTime = clock();
//String imageFile1 = "E:/picture/meter/ScaleNumReco/scale_num/*.jpg";
String imageFile1 = "E:/picture/meter/ScaleNumReco/scale_num/022150.jpg";
vector<cv::String> image_files;
glob(imageFile1, image_files);
if (image_files.size() == 0) {
std::cout << "No image files[jpg]" << std::endl;
return 0;
}
string str;
ofstream out("E:/picture/meter/ScaleNumReco/out.txt");
for (unsigned int frame = 0; frame < image_files.size(); ++frame)//image_file.size()代表文件中总共的图片个数
{
stringstream ss1, ss2;
ss1 << frame;
ss1 >> str;
cout << image_files[frame] << endl;
Mat prediction = numReco(image_files[frame]);
double result = ScalesNumReco();
cout << "刻度数字为:" << result << endl;
imwrite("E:/picture/meter/ScaleNumReco/scale_num_pre/" + str + ".jpg", prediction);
out << image_files[frame] << ": " << result << endl;
//清空数组,给数组全部赋值为0
for (int i = 0; i < 4; i++)
{
segnum1[i] = 0;
segnum2[i] = 0;
}
}
out.close();
endTime = clock();//计时结束
cout << "The total run time is: " << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << endl;
system("pause");
return 0;
}