近来,因为在项目中遇到跟踪的难题,没事翻看opencv2.4.5里的相关源码查阅,偶然发现在opencv的contrib文件夹下实现了多种目标跟踪算法,包括featuretracker、hybridtracker和detection_based_tracker等。由于是改代码是放置于contrib文件夹下,所以不能直接调用(除非自己重新编译)。对源代码进行了提取,单还是有错,欢迎同道之人下载下来一起找错,相互交流。
首先附上main函数里的相关代码:
Rect box(0,0,0,0); bool drawing_box = false; bool gotBB = false; string video ="E:\\Video\\test1.avi"; //bounding box mouse callback void mouseHandler(int event, int x, int y, int flags, void *param) { switch( event ) { case CV_EVENT_MOUSEMOVE: if (drawing_box) { box.width = x-box.x; box.height = y-box.y; } break; case CV_EVENT_LBUTTONDOWN: drawing_box = true; box = Rect( x, y, 0, 0 ); break; case CV_EVENT_LBUTTONUP: drawing_box = false; if( box.width < 0 ) { box.x += box.width; box.width *= -1; } if( box.height < 0 ) { box.y += box.height; box.height *= -1; } gotBB = true; break; } } void drawBox(Mat& image, CvRect box, Scalar color, int thick) { rectangle( image, cvPoint(box.x, box.y), cvPoint(box.x+box.width,box.y+box.height),color, thick); } int main(int argc, char * argv[]) { VideoCapture capture; Mat frame; CvFeatureTrackerParams params; params.feature_type = 0; params.window_size = 0; FeatureTraker tracker(params); //FeatureTraker tracker; capture.open(video); if (!capture.isOpened()) { cout << " video file open failed..." << endl; return 1; } capture >> frame; if ( frame.empty()) { cout << " get frame error..." << endl; return 1; } resize( frame, frame, Size(352,288)); //Register mouse callback to draw the bounding box namedWindow("fetureTracker",CV_WINDOW_AUTOSIZE); setMouseCallback( "fetureTracker", mouseHandler, NULL ); GETBOUNDINGBOX: while(!gotBB) { drawBox(frame,box,Scalar(0,255,0),2); imshow("fetureTracker", frame); if ( waitKey(33) == 'q') return 0; } if (box.area()<160) { cout << "Bounding box too small, try again." << endl; gotBB = false; goto GETBOUNDINGBOX; } //Remove callback setMouseCallback( "fetureTracker", NULL, NULL ); printf("Initial Bounding Box = x:%d y:%d h:%d w:%d\n",box.x,box.y,box.width,box.height); tracker.newTrackingWindow(frame,box); Point2f pt = tracker.getTrackingCenter(); circle(frame,pt,5,Scalar(0,0,255),2); for (;;) { capture>>frame; resize( frame, frame, Size(352,288)); tracker.updateTrackingWindow(frame); pt = tracker.getTrackingCenter(); circle(frame,pt,5,Scalar(0,0,255),2); drawBox(frame,box,Scalar(0,255,0),2); imshow("fetureTracker", frame); if ( waitKey(33) == 'q') break; } return 0; }
接着,对opencv中FeatureTracker类中的几个问题进行简单列举下:
1) 因为SIFT和SURF是nonfree模块下的类,所以在调用该模块下的函数之前需要调用: initModule_nonfree();函数。
2) 在FeatureTracker的带有CvFeatureTrackerParams类型参数的构造函数中,switch case缺少break;语句。
修改后的头文件FeatureTraker.hpp如下:
// Feature tracking parameters struct CV_EXPORTS CvFeatureTrackerParams { enum { SIFT = 0, SURF = 1, OPTICAL_FLOW = 2 }; CvFeatureTrackerParams(int featureType = 0, int windowSize = 0) { feature_type = featureType; window_size = windowSize; } int feature_type; // Feature type to use int window_size; // Window size in pixels around which to search for new window }; class FeatureTraker { private: Ptr<Feature2D> dd; Ptr<DescriptorMatcher> matcher; vector<DMatch> matches; Mat prev_image; Mat prev_image_bw; Rect prev_trackwindow; Point2d prev_center; int ittr; vector<Point2f> features[2]; public: Mat disp_matches; CvFeatureTrackerParams params; FeatureTraker(); explicit FeatureTraker(CvFeatureTrackerParams params); ~FeatureTraker(); void newTrackingWindow(Mat image, Rect selection); Rect updateTrackingWindow(Mat image); Rect updateTrackingWindowWithSIFT(Mat image); Rect updateTrackingWindowWithFlow(Mat image); void setTrackingWindow(Rect _window); Rect getTrackingWindow(); Point2f getTrackingCenter(); };
修改后的头文件FeatureTraker.cpp如下:
FeatureTraker::FeatureTraker(CvFeatureTrackerParams _params) : params(_params) { initModule_nonfree(); switch (params.feature_type) { case CvFeatureTrackerParams::SIFT: dd = Algorithm::create<Feature2D>("Feature2D.SIFT"); if( dd.empty() ) CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without SIFT support"); dd->set("nOctaveLayers", 5); dd->set("contrastThreshold", 0.04); dd->set("edgeThreshold", 10.7); break; case CvFeatureTrackerParams::SURF: dd = Algorithm::create<Feature2D>("Feature2D.SURF"); if( dd.empty() ) CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without SURF support"); dd->set("hessianThreshold", 400); dd->set("nOctaves", 3); dd->set("nOctaveLayers", 4); break; default: CV_Error(CV_StsBadArg, "Unknown feature type"); break; } matcher = new BFMatcher(NORM_L2); } FeatureTraker::FeatureTraker() { } FeatureTraker::~FeatureTraker() { } void FeatureTraker::newTrackingWindow(Mat image, Rect selection) { image.copyTo(prev_image); cvtColor(prev_image, prev_image_bw, CV_BGR2GRAY); prev_trackwindow = selection; prev_center.x = selection.x; prev_center.y = selection.y; ittr = 0; } Rect FeatureTraker::updateTrackingWindow(Mat image) { if(params.feature_type == CvFeatureTrackerParams::OPTICAL_FLOW) return updateTrackingWindowWithFlow(image); else return updateTrackingWindowWithSIFT(image); } Rect FeatureTraker::updateTrackingWindowWithSIFT(Mat image) { ittr++; vector<KeyPoint> prev_keypoints, curr_keypoints; vector<Point2f> prev_keys, curr_keys; Mat prev_desc, curr_desc; Rect window = prev_trackwindow; Mat mask = Mat::zeros(image.size(), CV_8UC1); rectangle(mask, Point(window.x, window.y), Point(window.x + window.width, window.y + window.height), Scalar(255), CV_FILLED); dd->operator()(prev_image, mask, prev_keypoints, prev_desc); window.x -= params.window_size; window.y -= params.window_size; window.width += params.window_size; window.height += params.window_size; rectangle(mask, Point(window.x, window.y), Point(window.x + window.width, window.y + window.height), Scalar(255), CV_FILLED); dd->operator()(image, mask, curr_keypoints, curr_desc); if (prev_keypoints.size() > 4 && curr_keypoints.size() > 4) { //descriptor->compute(prev_image, prev_keypoints, prev_desc); //descriptor->compute(image, curr_keypoints, curr_desc); matcher->match(prev_desc, curr_desc, matches); for (int i = 0; i < (int)matches.size(); i++) { prev_keys.push_back(prev_keypoints[matches[i].queryIdx].pt); curr_keys.push_back(curr_keypoints[matches[i].trainIdx].pt); } Mat T = findHomography(prev_keys, curr_keys, CV_LMEDS); prev_trackwindow.x += cvRound(T.at<double> (0, 2)); prev_trackwindow.y += cvRound(T.at<double> (1, 2)); } prev_center.x = prev_trackwindow.x; prev_center.y = prev_trackwindow.y; prev_image = image; return prev_trackwindow; } Rect FeatureTraker::updateTrackingWindowWithFlow(Mat image) { ittr++; Size subPixWinSize(10,10), winSize(31,31); Mat image_bw; TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03); vector<uchar> status; vector<float> err; cvtColor(image, image_bw, CV_BGR2GRAY); cvtColor(prev_image, prev_image_bw, CV_BGR2GRAY); if (ittr == 1) { Mat mask = Mat::zeros(image.size(), CV_8UC1); rectangle(mask, Point(prev_trackwindow.x, prev_trackwindow.y), Point( prev_trackwindow.x + prev_trackwindow.width, prev_trackwindow.y + prev_trackwindow.height), Scalar(255), CV_FILLED); goodFeaturesToTrack(image_bw, features[1], 500, 0.01, 20, mask, 3, 0, 0.04); cornerSubPix(image_bw, features[1], subPixWinSize, Size(-1, -1), termcrit); } else { calcOpticalFlowPyrLK(prev_image_bw, image_bw, features[0], features[1], status, err, winSize, 3, termcrit); Point2f feature0_center(0, 0); Point2f feature1_center(0, 0); int goodtracks = 0; for (int i = 0; i < (int)features[1].size(); i++) { if (status[i] == 1) { feature0_center.x += features[0][i].x; feature0_center.y += features[0][i].y; feature1_center.x += features[1][i].x; feature1_center.y += features[1][i].y; goodtracks++; } } feature0_center.x /= goodtracks; feature0_center.y /= goodtracks; feature1_center.x /= goodtracks; feature1_center.y /= goodtracks; prev_center.x += (feature1_center.x - feature0_center.x); prev_center.y += (feature1_center.y - feature0_center.y); prev_trackwindow.x = (int)prev_center.x; prev_trackwindow.y = (int)prev_center.y; } swap(features[0], features[1]); image.copyTo(prev_image); return prev_trackwindow; } void FeatureTraker::setTrackingWindow(Rect _window) { prev_trackwindow = _window; } Rect FeatureTraker::getTrackingWindow() { return prev_trackwindow; } Point2f FeatureTraker::getTrackingCenter() { Point2f center(0, 0); center.x = (float)(prev_center.x + prev_trackwindow.width/2.0); center.y = (float)(prev_center.y + prev_trackwindow.height/2.0); return center; }
源代码下载:http://download.csdn.net/detail/kezunhai/5660195