原视频
主要思想:
通过当前帧的灰度图(currentGrayFrame)和前一帧的灰度图(previousGrayFrame)的差,进行像素级的比较。
符号:
:代表当前帧(x,y)处的灰度值
:代表前一帧(x,y)处的灰度值
:代表当前帧和上一帧在(x,y)处的差的绝对值
如果
:差值大于阈值Thresh,代表是前景点
:差值小于阈值Thresh,代表为背景点
优点:
实时性
缺点:
1、运动物体如果过大,且颜色一致,则会造成运动物体中空的现象
2、如果光照变化强烈,也不适合该算法
代码:
#include<iostream> using namespace std; #include<opencv2\core\core.hpp> #include<opencv2\highgui\highgui.hpp> #include<opencv2\imgproc\imgproc.hpp> using namespace cv; const unsigned char FORE_GROUD = 255; int thresh = 30; int main(int argc,char*argv[]) { VideoCapture video(argv[1]); //判断如果video是否可以打开 if(!video.isOpened()) return -1; //用于保存当前帧的图片 Mat currentBGRFrame; //用来保存上一帧和当前帧的灰度图片 Mat previousGrayFrame; Mat currentGaryFrame; //用来保存帧差 Mat frameDifference;//CV_16SC1 //用来保存帧差的绝对值 Mat absFrameDifferece; //用来显示前景 Mat segmentation; //显示原视频 namedWindow("video",1); //显示前景 namedWindow("segmentation",1); createTrackbar("阈值:","segmentation",&thresh,FORE_GROUD,NULL); //帧数 int numberFrame = 0; //形态学处理用到的算子 Mat morphologyKernel = getStructuringElement(MORPH_RECT,Size(3,3),Point(-1,-1)); for(;;) { //读取当前帧 video >> currentBGRFrame; //判断当前帧是否存在 if(!currentBGRFrame.data) break; numberFrame++; if( numberFrame == 1) { //颜色空间的转换 cvtColor(currentBGRFrame,currentGaryFrame,COLOR_BGR2GRAY); //保存当前帧的灰度图 previousGrayFrame = currentGaryFrame.clone(); imshow("video",currentBGRFrame); continue; } else { //颜色空间的转换 cvtColor(currentBGRFrame,currentGaryFrame,COLOR_BGR2GRAY); //src1-src2 subtract(currentGaryFrame,previousGrayFrame,frameDifference,Mat(),CV_16SC1); //取绝对值 absFrameDifferece = abs(frameDifference); //位深的改变 absFrameDifferece.convertTo(absFrameDifferece,CV_8UC1,1,0); //阈值处理 threshold(absFrameDifferece,segmentation,double(thresh),double(FORE_GROUD),THRESH_BINARY); //中值滤波 medianBlur(segmentation,segmentation,3); //形态学处理(开闭运算) //morphologyEx(segmentation,segmentation,MORPH_OPEN,morphologyKernel,Point(-1,-1),1,BORDER_REPLICATE); morphologyEx(segmentation,segmentation,MORPH_CLOSE,morphologyKernel,Point(-1,-1),2,BORDER_REPLICATE); //显示二值化图片 imshow("segmentation",segmentation); //找边界 vector< vector<Point> > contours; vector<Vec4i> hierarchy; findContours( segmentation, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );//CV_RETR_TREE vector< vector<Point> > contours_poly( contours.size() ); /*存储运动物体*/ vector<Rect> boundRect; boundRect.clear(); //画出运动物体 //对视频中出现的运动物体,进行初次的筛选 for(int index = 0;index < contours.size() ;index++) { approxPolyDP( Mat(contours[index]), contours_poly[index], 3, true ); Rect rect = boundingRect( Mat(contours_poly[index]) ); rectangle(currentBGRFrame,rect,Scalar(0,255,255),2); } //显示原视频 imshow("video",currentBGRFrame); //保存当前帧的灰度图 previousGrayFrame = currentGaryFrame.clone(); } if(waitKey(33) == 'q') break; } return 0; }
发现当运动的颜色,很接近时,前景检测运动物体容易出现空洞。
--下一篇会介绍对二帧差法的改进,三帧差法