这一节,主要讲通过灰度化,自适应二值化,ROI找轮廓,轮廓筛选,ROI轮廓分割,自动割取样本;
如图:在车牌分割中应用比较广,大致思路可以看我的程序,具体细节可以针对不同的检测物体的大小以及背景更改。
#include <cv.h> #include <highgui.h> #include <cvaux.h> using namespace std; using namespace cv; #define showSteps false #define saveModel false //定义文件夹下视频的数量 #define NumVideo 16 string getFilename(string s) { char sep = '/'; char sepExt='.'; #ifdef _WIN32 sep = '\\'; #endif size_t i = s.rfind(sep, s.length( )); if (i != string::npos) { string fn= (s.substr(i+1, s.length( ) - i)); size_t j = fn.rfind(sepExt, fn.length( )); if (i != string::npos) { return fn.substr(0,j); }else{ return fn; } }else{ return ""; } } void adaptiveThreshold(Mat input, Mat bin, int width, int height); bool verifySizes(RotatedRect mr){ float error=0.6; //Spain car plate size: 20*40 aspect 360 float aspect=0.5; //Set a min and max area. All other patchs are discarded int min= 20*aspect*20; // minimum area 200 int max= 50*aspect*50; // maximum area 1250 //Get only patchs that match to a respect ratio. float rmin= aspect-aspect*error; //0.5-0.5*0.6 = 0.2 float rmax= aspect+aspect*error; //0.8 int area= mr.size.height * mr.size.width; float r= (float)mr.size.width / (float)mr.size.height; /*if(r<1) r= (float)mr.size.height / (float)mr.size.width;*/ if(( area < min || area > max ) || ( r < rmin || r > rmax ) || (mr.size.height>=47 || mr.size.height<=26)||(mr.size.width<=9 || mr.size.width>=31)){ // if( (mr.size.height>=45 || mr.size.height<=24)||(mr.size.width<=6 || mr.size.width>=27) ){ return false; }else{ return true; } } int main ( int argc, char** argv ) { //初始化一个视频文件捕捉器 char *video_name = NULL; CvCapture* capture; video_name = (char*)malloc(500*sizeof(char)); //动态分配内存 char *filename = NULL; filename=(char *)malloc(50*sizeof(char)); //动态分配内存 for(int jj=1; jj<=NumVideo; jj++) { int i=0; //视频读取标志标志 printf("\n第%d段视频\n",jj); sprintf_s(video_name, 500, "E:\\video\\%d.avi",jj);//保存的图片名 // sprintf_s(video_name, 500, "I:\\ship\\%d.avi",jj);//保存的图片名 capture = cvCaptureFromAVI(video_name); // CvCapture* capture = cvCaptureFromAVI("..\\video\\4.avi"); if( capture == NULL ){ printf("视频文件打开失败!!"); continue; } int frameH = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT); int frameW = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH); int fps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS); int numFrames = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT); printf("\tvideo height : %d\n\tvideo width : %d\n\tfps : %d\n\tframe numbers : %d\n", frameH, frameW, fps, numFrames); //定义和初始化变量 Mat input_image; input_image = cvQueryFrame(capture); //获取一帧图片 namedWindow("Source Image",1); char * image_name=NULL; //char image_name[20]; image_name=(char *)malloc(50*sizeof(char)); //动态分配内存 // for(int i=0;i<202;i++) input_image = cvQueryFrame(capture); //获取一帧图片 int c = 0; int imageNum = 0; while(1) { imageNum++; //获取空格键值,并置位抠图标志位flag // c = cvWaitKey(0); // if((char)c==(char)32) { // printf("%d\n",imageNum); input_image = cvQueryFrame(capture); //获取一帧图片 if( capture == NULL ) { printf("视频文件打开失败!!"); continue; } if(! input_image.data ) // Check for invalid input { cout << "Could not open or find the image" << std::endl ; return -1; } // string filename_whithoutExt=getFilename(filename); cout <<"\n"<<"working with file: "<< imageNum << "\n"; Mat img; img = input_image.clone(); /* char *imgName = NULL; imgName = (char *)malloc(100*sizeof(char)); sprintf(imgName,"%s%d%s%d%s","..\\image\\",jj,"_",imageNum,".bmp"); imwrite(imgName,img);*/ Mat img_gray; cvtColor(input_image, img_gray, CV_BGR2GRAY); medianBlur(img_gray,img_gray,3); if(showSteps){ namedWindow("img_gray",1); imshow("img_gray",img_gray); } /*Mat img_normalize = img_gray.clone(); normalize(img_normalize,img_normalize,255,0,CV_MINMAX,-1); imshow("normalize",img_normalize);*/ Mat img_equal = img_gray.clone(); equalizeHist(img_equal,img_equal); // imwrite("img_equal.jpg",img_equal); if(showSteps){ namedWindow("img_equal",1); imshow("img_equal",img_equal); } ////定义折线变换的坐标 //const CvPoint p1 = cvPoint(127, 8); //const CvPoint p2 = cvPoint(158, 250); //Mat img_gray_enhance = img_gray.clone(); //for (int j=0;j<img_gray.rows;j++){ // uchar* pData = img_gray.ptr<uchar>(j); // uchar* pDataEnhance = img_gray_enhance.ptr<uchar>(j); // for(int i=0;i<img_gray.cols*img_gray.channels();i++){ // if(pData[i]<=p1.x) // pDataEnhance[i]=(int)(p1.y); // else if(pData[i]>=p2.x) // pDataEnhance[i]=(int)(p2.y); // // pDataEnhance[i]=p1.y; // else // pDataEnhance[i]=(int)((p2.y-p1.y)/(p2.x-p1.x)*pData[i])+p1.y; // // /* if(pDataEnhance[i]>255) // pDataEnhance[i]=255; // if(pDataEnhance[i]<=0) // pDataEnhance[i]=1;*/ // } //} //// medianBlur(img_gray_enhance, img_gray_enhance, 3); //if(showSteps) // imshow("enhance",img_gray_enhance); // blur(img_gray,img_gray,cvSize(4,4),Point(-1,-1),4); //if(showSteps){ // namedWindow("gray Image",1); // imshow("gray Image",img_gray); // /*Mat img; // cvtColor(img_gray,img,CV_GRAY2BGR); // imwrite("test1.bmp",img);*/ //} // blur(img_gray, img_gray, Size(5,5)); Mat img_grayTemp; img_equal.copyTo(img_grayTemp); Mat img_binary; img_equal.copyTo(img_binary); int blockSize = 19; int constValue = 9; Mat img_threshold; // adaptiveThresholdSelf(img_gray, img_threshold,img_gray.cols,img_gray.rows); // threshold(img_grayTemp,img_binary,0,255,CV_THRESH_OTSU+CV_THRESH_BINARY_INV); adaptiveThreshold(img_grayTemp, img_binary, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, blockSize, constValue); medianBlur(img_binary,img_binary,5); // blur(img_binary,img_binary,cvSize(5,5),Point(-1,-1),4); // medianBlur(img_binary,img_binary,7); if(showSteps){ namedWindow("img_binary",1); imshow("img_binary",img_binary); } //Find contours of possibles plates vector< vector< Point> > contours; findContours(img_binary, contours, // a vector of contours CV_RETR_EXTERNAL, // retrieve the external contours CV_CHAIN_APPROX_NONE); // all pixels of each contours if(showSteps) cout << "the number of contour is :" <<contours.size() <<"\n"; // drawContours(input_image,contours,-1,cv::Scalar(255,0,0), 1); vector<Rect> boundRect1( contours.size() ); char *image_name = NULL; image_name = (char *)malloc(50*sizeof(char)); //for( int i = 0; i < contours.size(); i++ ){ // // approxPolyDP( Mat(contours[i]), contours_poly[i], 6, true ); // boundRect1[i] = boundingRect( Mat(contours[i]) );//获取区域的边界对焦点坐标 // cout<<boundRect1[i].tl()<<","<<boundRect1[i].br()<<endl; // Rect box; // // //在指定的窗口内进行处理 // box.x = boundRect1[i].tl().x; // box.y = boundRect1[i].tl().y; // box.width = boundRect1[i].br().x - boundRect1[i].tl().x; // box.height = boundRect1[i].br().y - boundRect1[i].tl().y; // //扣取指定区域,在指定区域内处理图像 // Mat imageROI(input_image, box); // sprintf(image_name, "%s%d%s%d%s", "..\\temp\\", imageNum,"_",i, ".bmp"); // imwrite(image_name,imageROI); //} //Start to iterate to each contour founded vector<vector<Point> >::iterator itc= contours.begin(); vector<RotatedRect> rects; //Remove patch that are no inside limits of aspect ratio and area. while (itc!=contours.end()) { //Create bounding rect of object RotatedRect mr= minAreaRect(Mat(*itc)); // cout<<"区域高:"<<mr.size.height<<"区域低:"<<mr.size.width<<endl; if( !verifySizes(mr)){ itc= contours.erase(itc); }else{ ++itc; rects.push_back(mr); // cout<<"-----------区域高:"<<mr.size.height<<"区域低:"<<mr.size.width<<endl; } } if(showSteps) cout << "目标个数是:" <<contours.size() <<"\n"; /// Approximate contours to polygons + get bounding rects and circles vector<vector<Point> > contours_poly( contours.size() ); vector<Rect> boundRect( contours.size() ); // vector<Point2f>center( contours.size() ); // vector<float>radius( contours.size() ); for( int i = 0; i < contours.size(); i++ ){ // approxPolyDP( Mat(contours[i]), contours_poly[i], 6, true ); boundRect[i] = boundingRect( Mat(contours[i]) );//获取区域的边界对焦点坐标 // int br_y = boundRect[i].br().y+33; int br_y = boundRect[i].br().y; if(br_y>=199) br_y = 198; rectangle( input_image, Point(boundRect[i].tl().x,boundRect[i].tl().y), Point(boundRect[i].br().x,br_y), cv::Scalar(0,0,255), 2, 8, 0 ); cout<<boundRect[i].tl()<<","<<boundRect[i].br()<<"["<<boundRect[i].br().x - boundRect[i].tl().x<<","<<boundRect[i].br().y - boundRect[i].tl().y<<"]"<<endl; Rect box; //在指定的窗口内进行处理 box.x = boundRect[i].tl().x; box.y = boundRect[i].tl().y; box.width = boundRect[i].br().x - boundRect[i].tl().x; box.height = br_y - boundRect[i].tl().y; //扣取指定区域,在指定区域内处理图像 Mat imageROI(img,box); // //版本一:通过10*40 resize,然后自适应二值化,滤波作为模板: // Mat imageROIResize; // resize(imageROI,imageROIResize,cvSize(10,40)); //// imshow("imageROIResize",imageROIResize); // Mat tempBinary; // int blockSize = 19; // int constValue = 3; // adaptiveThreshold(imageROIResize, tempBinary, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, blockSize, constValue); // medianBlur(tempBinary,tempBinary,3); //// imshow("tempBinary",tempBinary); // sprintf(image_name, "%s%d%s%d%s%d%s", "..\\tempBinary\\",jj,"_", imageNum,"_",i, ".bmp"); // imwrite(image_name,tempBinary); //版本二:通过10*40 resize,然后自适应二值化,滤波作为模板: Mat imageROIResize; resize(imageROI,imageROIResize,cvSize(22,35)); // imshow("imageROIResize",imageROIResize); // equalizeHist(imageROIResize,imageROIResize); // imshow("tempBinary",tempBinary); sprintf(image_name, "%s%d%s%d%s%d%s", "..\\tempGray\\",jj,"_", imageNum,"_",i, ".bmp"); imwrite(image_name,imageROIResize); // cvWaitKey(0); } // drawContours(input_image,contours,-1,cv::Scalar(255,0,0), 1); //Rect box; ////在指定的窗口内进行处理 //box.x = boundRect[i].tl().x; //box.y = boundRect[i].tl().y; //box.width = boundRect[i].br().x - boundRect[i].tl().x; //box.height = boundRect[i].br().y - boundRect[i].tl().y + 40; //adaptiveThreshold(img_grayTemp, img_binary,img_grayTemp.rows, img_grayTemp.cols); // const int nChannels = img_grayTemp.channels(); // unsigned char *input; // input = (unsigned char*)malloc(img_grayTemp.rows*img_grayTemp.cols*sizeof(unsigned char*)); // unsigned char* bin; // bin = (unsigned char*)malloc(img_grayTemp.rows*img_grayTemp.cols*sizeof(unsigned char*)); // int index = 0; //// int totalNum = 0; //// int sum = 0; // for(int j = 0 ; j < img_grayTemp.rows; ++j) // { // uchar* current = img_grayTemp.ptr<uchar>(j ); // // uchar* maskOutput = mask.ptr<uchar>(j); // for(int i= 0;i < nChannels*(img_grayTemp.cols); ++i) // { // index = j*img_grayTemp.rows+i; // input[index] = current[i]; // } // } // adaptiveThreshold(input,bin, img_grayTemp.rows, img_grayTemp.cols); // // Mat img_binary; // img_gray.copyTo(img_binary); // for(int j = 0 ; j < img_grayTemp.rows; ++j) // { // uchar* currentBin = img_binary.ptr<uchar>(j ); // // uchar* maskOutput = mask.ptr<uchar>(j); // for(int i= 0;i < nChannels*(img_grayTemp.cols); ++i) // { // index = j*img_grayTemp.rows+i; // currentBin[i] = bin[index]; // } // } //Median // medianBlur(img_binary, img_binary, 5); // medianBlur(img_binary, img_binary, 9); if(showSteps){ namedWindow("Source Image",1); imshow("Source Image",input_image); } /* if(showSteps){ namedWindow("Binary Image",1); imshow("Binary Image",img_binary); }*/ } if(imageNum ==numFrames - 2) break; } cvWaitKey(0); } } void adaptiveThreshold(Mat input, Mat bin, int width, int height) { int S = width >> 3; int T = 15; unsigned long* integralImg = 0; int i, j; long sum=0; int count=0; int index; int x1, y1, x2, y2; int s2 = S/2; input.copyTo(bin); // create the integral image integralImg = (unsigned long*)malloc(width*height*sizeof(unsigned long*)); for (i=0; i<width; i++) { uchar* currentInput = input.ptr<uchar>(i); // reset this column sum sum = 0; for (j=0; j<height; j++) { index = j*width+i; sum += currentInput[j]; if (i==0) integralImg[index] = sum; else integralImg[index] = integralImg[index-1] + sum; } } // perform thresholding for (i=0; i<width; i++) { uchar* currentBin = bin.ptr<uchar>(i); uchar* currentInput = input.ptr<uchar>(i); for (j=0; j<height; j++) { index = j*width+i; // set the SxS region x1=i-s2; x2=i+s2; y1=j-s2; y2=j+s2; // check the border if (x1 < 0) x1 = 0; if (x2 >= width) x2 = width-1; if (y1 < 0) y1 = 0; if (y2 >= height) y2 = height-1; count = (x2-x1)*(y2-y1); // I(x,y)=s(x2,y2)-s(x1,y2)-s(x2,y1)+s(x1,x1) sum = integralImg[y2*width+x2] - integralImg[y1*width+x2] - integralImg[y2*width+x1] + integralImg[y1*width+x1]; if ((long)(currentInput[j]*count) < (long)(sum*(100-T)/100)) currentBin[j] = 0; else currentBin[j] = 255; } } free (integralImg); }
JDI工作室主要从事视频图像处理,安防监控,机器视觉等项目开发,工作室成员都毕业与国内顶级高校,工作室现成员5个,其中2个专注于安防监控已有8年经验,3人专注于图像处理,应用开发2年。
JDI工作室的核心技术在于安防监控系统开发,实时高帧率视频采集,软件应用开发,图像视频处理与算法开发。从基于PC机器的开发到嵌入式开发,从android开发到Linux下的应用开发,从软件开发到硬件控制系统,从摄像头前段采集到处理到后端的识别等等,我们工作室都可以在较快的速度内实现并保质的完成,具体合作模式见附件开发合同,JDI工作室的所有开发严格按照合同执行,并提供2-6个月的软件后续维护和升级功能。
承接机器视觉检测,图像处理,OCR,车牌识别,人脸识别等等,各类项目开发。QQ:896922782
http://blog.csdn.net/zhubenfulovepoem/article/details/12343219