目标追踪:金字塔光流法

金字塔光流法总结

1、金字塔光流法介绍
参考文献:
计算机视觉–光流法(optical flow)简介
1.1基本假设条件
(1)亮度恒定不变
(2)时间连续或运动是“小运动”
(3)空间一致性

2、单像素光流追踪

#include
#include
using namespace std;
using namespace cv;
vector<Point>points;
Point point_one;
vector<Point2f> corners2;
vector<Point2f> corners1;
Mat frame, gray, deal_frame, frame_one;
void on_Mouse(int event, int x, int y, int flags, void* param)
{
	Mat& image = *(Mat*)param;
	switch (event)
	{
	case EVENT_LBUTTONDOWN:
	{
		point_one.x = 1704;
		point_one.y = 252;
		circle(image, point_one, 1, Scalar(0, 0, 255), -1, 8);
	}
	break;
	}
}
void drawTrackLines();
int main()
{
	VideoCapture capture;
	capture.open("41_output.mp4");
	if (!capture.isOpened())
	{
		cout << "could not load video data" << endl;
		return -1;
	}
	int frames = capture.get(CAP_PROP_FRAME_COUNT);
	double fps = capture.get(CAP_PROP_FPS);
	Size size = Size(capture.get(CAP_PROP_FRAME_WIDTH), capture.get(CAP_PROP_FRAME_HEIGHT));
	cout << frames << endl;
	cout << fps << endl;
	cout << size << endl;
	VideoWriter writer;
	writer.open("sub_onepointtracking.avi", VideoWriter::fourcc('M', 'J', 'P', 'G'), fps, size);
	namedWindow("video_input", WINDOW_NORMAL);
	int n = 0;
	//namedWindow("video_output", WINDOW_NORMAL);
	while (capture.read(frame))
	{
		n++;
		if (n == 1)
		{
			Mat frame1;
			frame.copyTo(frame1);
			cvtColor(frame, frame_one, COLOR_BGR2GRAY);
			setMouseCallback("video_input", on_Mouse, &frame1);
			while (true)
			{
				Mat tempImage;
				frame.copyTo(tempImage);
				circle(tempImage, point_one, 10, Scalar(0, 0, 255), 1, 8);
				imshow("video_input", tempImage);
				if (waitKey(10) == 27)break;
			}
			TermCriteria criteria0 = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 100, 0.005);
			corners1.push_back(point_one);
			cornerSubPix(frame_one, corners1, Size(5, 5), Size(-1, -1),criteria0);

		}
		else
		{
			cvtColor(frame, gray, COLOR_BGR2GRAY);
			TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 100, 0.005);
			vector<uchar> features_found;
			calcOpticalFlowPyrLK(frame_one, gray, corners1, corners2, features_found, noArray(), Size(21, 21), 5, criteria);
			cout << corners1[0] << endl;
			points.push_back(corners1[0]);
			points.push_back(corners2[0]);
			//绘制特征点
			drawTrackLines();
			for (int j = 1; j < points.size(); j++)
			{
				line(frame, points[j - 1], points[j], Scalar(0, 0, 255), 1, 4);
			}
			imshow("video_output", frame);
			writer << frame;
			if (n == 208)
			{
				imwrite("sub_lastiamge4.jpg", frame);
			}
			swap(corners2, corners1);
			gray.copyTo(frame_one);
		}
	}
	return 0;
}
void drawTrackLines() {
	for (size_t t = 0; t < corners2.size(); t++) {

		circle(frame, corners2[t], 10, Scalar(0, 0, 255), 1, 8, 0);
	}
}

3、四点半光流法追踪
main.cpp

#include
#include
#include
#include"onePointTracking.h"
using namespace std;
using namespace cv;

//创建全局变量
vector<vector<Point2f>> pointss;//存储四个角点列表

int main()
{
	
	for (int i = 0; i < 4; i++)
	{
		if (i == 0)
		{
			vector<Point2f> onePoints;
			OnePointTracking onePoint(1447, 223);
			onePoints = onePoint.pointTrack();
			pointss.push_back(onePoints);
		}
		if (i == 1)
		{
			vector<Point2f> twoPoints;
			OnePointTracking twoPoint(1703, 199);
			twoPoints = twoPoint.pointTrack();
			pointss.push_back(twoPoints);
		}
		if (i == 2)
		{
			vector<Point2f> threePoints;
			OnePointTracking threePoint(1704, 252);
			threePoints = threePoint.pointTrack();
			pointss.push_back(threePoints);
		}
		if (i == 3)
		{
			vector<Point2f> fourPoints;
			OnePointTracking fourPoint(1447, 273);
			fourPoints = fourPoint.pointTrack();
			pointss.push_back(fourPoints);
		}
	}
	VideoCapture capture1;
	capture1.open("41_output.mp4");
	//查看视频信息
	int frames = capture1.get(CAP_PROP_FRAME_COUNT);//视频帧数
	int fps = capture1.get(CAP_PROP_FPS);//帧率每秒多少帧
	Size size = Size(capture1.get(CAP_PROP_FRAME_WIDTH), capture1.get(CAP_PROP_FRAME_HEIGHT));//每帧宽高
	//创建视频保存窗口
	VideoWriter writer;
	writer.open("platTracking.avi", VideoWriter::fourcc('M', 'J', 'P', 'G'), fps, size);
	//创建显示窗口
	namedWindow("video_input", WINDOW_AUTOSIZE);
	namedWindow("video_output", WINDOW_AUTOSIZE);
	Mat frame_img;
	int n = 0;
	while (capture1.read(frame_img))
	{
		imshow("video_input", frame_img);
		vector<Point> singlePoints;
		singlePoints.push_back(pointss[0][n]);
		singlePoints.push_back(pointss[1][n]);
		singlePoints.push_back(pointss[2][n]);
		singlePoints.push_back(pointss[3][n]);
		fillConvexPoly(frame_img, singlePoints, Scalar(0, 0, 255), LINE_AA, 0);
		imshow("video_output", frame_img);
		writer << frame_img;
		n++;
	}
	return 0;
}

onepointtracking.h

#include
#include
#include
class OnePointTracking
{
public:
	OnePointTracking(int x, int y);
	std::vector<cv::Point2f> pointTrack();

private:
	cv::Point point_one;
};


onepointtracking.cpp

#include"onePointTracking.h"
using namespace std;
using namespace cv;

OnePointTracking::OnePointTracking(int x,int y)
{
	point_one.x = x;
	point_one.y = y;
}
vector<Point2f> OnePointTracking::pointTrack()
{
	vector<Point2f> points;
	vector<Point2f> corners2;
	vector<Point2f> corners1;
	Mat frame, gray, frame_one;
	//创建视频读取接口
	VideoCapture capture;
	capture.open("41_output.mp4");
	//查看视频信息
	int frames = capture.get(CAP_PROP_FRAME_COUNT);//视频帧数
	int fps = capture.get(CAP_PROP_FPS);//帧率每秒多少帧
	Size size = Size(capture.get(CAP_PROP_FRAME_WIDTH), capture.get(CAP_PROP_FRAME_HEIGHT));//每帧宽高
	cout << frames << "\t" << fps << "\t" << size << "\t" << endl;
	int n = 0;
	while (capture.read(frame))
	{
		n++;
		if (n == 1)
		{
			cvtColor(frame, frame_one, COLOR_BGR2GRAY);
			TermCriteria criteria0 = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 100, 0.005);
			corners1.push_back(point_one);
			cornerSubPix(frame_one, corners1, Size(5, 5), Size(-1, -1), criteria0);
			points.push_back(corners1[0]);

		}
		else
		{
			cvtColor(frame, gray, COLOR_BGR2GRAY);
			TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 100, 0.005);
			vector<uchar> features_found;
			calcOpticalFlowPyrLK(frame_one, gray, corners1, corners2, features_found, noArray(), Size(21, 21), 5, criteria);
			cout << corners1[0] << endl;
			points.push_back(corners2[0]);
			swap(corners2, corners1);
			gray.copyTo(frame_one);
		}
	}
	cout << points.size() << endl;
	return points;
}

OpenCV之视频分析与对象跟踪(四) 光流的对象跟踪 稀疏光流&稠密光流
4、全局追踪

#include 
#include 

using namespace cv;
using namespace std;

Mat frame, gray;
Mat prev_frame, prev_gray;

vector<Point2f> features; // shi-tomasi角点检测 - 特征数据

vector<Point2f> iniPoints; // 初始化特征数据
vector<Point2f> fpts[2]; // 保持当前帧和前一帧的特征点位置

vector<uchar> status; // 特征点跟踪成功标志位
vector<float> errors; // 跟踪时候区域误差和

void drawFeature(Mat& inFrame);
void detectFeatures(Mat& inFrame, Mat& ingray);
void klTrackFeature();
void drawTrackLines();

int main(int argc, char** argv) 
{
	VideoCapture capture;
	capture.open("41_output.mp4");
	if (!capture.isOpened()) {
		printf("could not load video file...\n");
		return -1;
	}

	namedWindow("camera input", 0);
	while (capture.read(frame)) 
	{
		cvtColor(frame, gray, COLOR_BGR2GRAY);
		if (fpts[0].size() < 40) {
			detectFeatures(frame, gray);
			fpts[0].insert(fpts[0].end(), features.begin(), features.end());
			iniPoints.insert(iniPoints.end(), features.begin(), features.end());
		}
		else {
			printf("没有检测,持续追踪...\n");
		}

		if (prev_gray.empty()) {
			gray.copyTo(prev_gray);
		}

		klTrackFeature();

		// 更新前一帧数据
		gray.copyTo(prev_gray);
		frame.copyTo(prev_frame);
		imshow("camera input", frame);

		char c = waitKey(1);
		if (c == 27) {
			break;
		}
	}

	waitKey(0);
	return 0;
}

void detectFeatures(Mat& inFrame, Mat& ingray) {
	double maxCorners = 5000;
	double qualitylevel = 0.01;
	double minDistance = 10;
	double blockSize = 3;
	double k = 0.04;
	goodFeaturesToTrack(ingray, features, maxCorners, qualitylevel, minDistance, Mat(), blockSize, false, k);
	cout << "detect features : " << features.size() << endl;
}


void klTrackFeature() {
	// KLT
	calcOpticalFlowPyrLK(prev_gray, gray, fpts[0], fpts[1], status, errors);
	int k = 0;

	// 特征点过滤
	for (int i = 0; i < fpts[1].size(); i++) {
		
		/*
		1.calcOpticalFlowPyrLK函数作用是对输入的特征点fpts[0],根据下一帧的图像对这些特征点判定是不是光流,
		检测结束后,status的每个下标会保存答案,再进行判断即可。
		2.initPoints集合用于存放初始化特征数据,每次的calcOpticalFlowPyrLK后都会重新更新一次,用status判断判断有没有新的特征是可以追踪的,或者用status判断哪些旧的特征可以不要了,
		3.fpts[1]集合用于存放当前帧的数据,
		*/

		if (status[i]) 
		{
			iniPoints[k] = iniPoints[i];
			fpts[1][k++] = fpts[1][i];

			//1.将用KLT算法找到的特征点集fpts[1]进行筛选,将没用的点去除,
			//没用的点包括距离太小没有变化和status的状态
			//2.将有用的点放进fpts[1]中
		}
	}
	// 保存特征点并绘制跟踪轨迹
	iniPoints.resize(k);	//裁剪不要的特征,更新集合大小
	fpts[1].resize(k);

	drawTrackLines();

	std::swap(fpts[1], fpts[0]);//更新帧的特征点
}

void drawTrackLines() {
	for (size_t t = 0; t < fpts[1].size(); t++) {
		line(frame, iniPoints[t], fpts[1][t], Scalar(0, 255, 0), 1, 8, 0);
		circle(frame, fpts[1][t], 1, Scalar(0, 0, 255), 1, 8, 0);
	}
}

5、局部区域光流追踪

#include 
#include 

using namespace cv;
using namespace std;

Mat frame, gray;
Mat prev_frame, prev_gray;
Mat mask;

vector<Point2f> features; // shi-tomasi角点检测 - 特征数据

vector<Point2f> iniPoints; // 初始化特征数据
vector<Point2f> fpts[2]; // 保持当前帧和前一帧的特征点位置

vector<uchar> status; // 特征点跟踪成功标志位
vector<float> errors; // 跟踪时候区域误差和


void drawFeature(Mat& inFrame);
void detectFeatures(Mat& inFrame, Mat& ingray);
void klTrackFeature();
void drawTrackLines();

int main(int argc, char** argv)
{
	VideoCapture capture;
	capture.open("41_output.mp4");
	if (!capture.isOpened()) {
		printf("could not load video file...\n");
		return -1;
	}

	namedWindow("camera input", 0);
	while (capture.read(frame))
	{
		cvtColor(frame, gray, COLOR_BGR2GRAY);
		 
		if (fpts[0].size() < 10)
		{

			Rect Roirect = selectROI(frame, false, false);
			cout << Roirect.x << Roirect.y << endl;
			Mat Roiimg = gray(Roirect);
			mask= Mat::zeros(gray.size(), gray.type());
			Roiimg.copyTo(mask(Roirect));
			detectFeatures(frame, gray);
			fpts[0].insert(fpts[0].end(), features.begin(), features.end());
			iniPoints.insert(iniPoints.end(), features.begin(), features.end());
		}
		else {
			printf("没有检测,持续追踪...\n");
		}

		if (prev_gray.empty()) {
			gray.copyTo(prev_gray);
		}

		klTrackFeature();

		// 更新前一帧数据
		gray.copyTo(prev_gray);
		frame.copyTo(prev_frame);
		imshow("camera input", frame);

		char c = waitKey(1);
		if (c == 27) {
			break;
		}
	}

	waitKey(0);
	return 0;
}

void detectFeatures(Mat& inFrame, Mat& ingray) {
	double maxCorners = 5000;
	double qualitylevel = 0.01;
	double minDistance = 10;
	double blockSize = 3;
	double k = 0.04;
	goodFeaturesToTrack(ingray, features, maxCorners, qualitylevel, minDistance, mask, blockSize, false, k);
	cout << "detect features : " << features.size() << endl;
}


void klTrackFeature() {
	// KLT
	calcOpticalFlowPyrLK(prev_gray, gray, fpts[0], fpts[1], status, errors);
	int k = 0;

	// 特征点过滤
	for (int i = 0; i < fpts[1].size(); i++) {

		/*
		1.calcOpticalFlowPyrLK函数作用是对输入的特征点fpts[0],根据下一帧的图像对这些特征点判定是不是光流,
		检测结束后,status的每个下标会保存答案,再进行判断即可。
		2.initPoints集合用于存放初始化特征数据,每次的calcOpticalFlowPyrLK后都会重新更新一次,用status判断判断有没有新的特征是可以追踪的,或者用status判断哪些旧的特征可以不要了,
		3.fpts[1]集合用于存放当前帧的数据,
		*/

		if (status[i])
		{
			iniPoints[k] = iniPoints[i];
			fpts[1][k++] = fpts[1][i];

			//1.将用KLT算法找到的特征点集fpts[1]进行筛选,将没用的点去除,
			//没用的点包括距离太小没有变化和status的状态
			//2.将有用的点放进fpts[1]中
		}
	}
	// 保存特征点并绘制跟踪轨迹
	iniPoints.resize(k);	//裁剪不要的特征,更新集合大小
	fpts[1].resize(k);

	drawTrackLines();

	std::swap(fpts[1], fpts[0]);//更新帧的特征点
}

void drawTrackLines() {
	for (size_t t = 0; t < fpts[1].size(); t++) {
		line(frame, iniPoints[t], fpts[1][t], Scalar(0, 255, 0), 1, 8, 0);
		circle(frame, fpts[1][t], 1, Scalar(0, 0, 255), 1, 8, 0);
	}
}

任意形状

#include 
#include 

using namespace cv;
using namespace std;

Mat frame, gray;
Mat prev_frame, prev_gray;
Mat mask;
vector<Point> point4;
Point point_one;
vector<Point2f> features; // shi-tomasi角点检测 - 特征数据

vector<Point2f> iniPoints; // 初始化特征数据
vector<Point2f> fpts[2]; // 保持当前帧和前一帧的特征点位置

vector<uchar> status; // 特征点跟踪成功标志位
vector<float> errors; // 跟踪时候区域误差和
void on_Mouse(int event, int x, int y, int flags, void* param)
{
	Mat& image = *(Mat*)param;
	switch (event)
	{
	case EVENT_LBUTTONDOWN:
	{
		point_one.x = x;
		point_one.y = y;
		point4.push_back(point_one);
		circle(image, point_one, 3, Scalar(0, 0, 255), 1, 8);
	}
	break;
	}
}

void detectFeatures(Mat& inFrame, Mat& ingray);
void klTrackFeature();
void drawTrackLines();

int main(int argc, char** argv)
{
	VideoCapture capture;
	capture.open("41_output.mp4");
	if (!capture.isOpened()) {
		printf("could not load video file...\n");
		return -1;
	}

	namedWindow("camera input", 0);
	while (capture.read(frame))
	{
		cvtColor(frame, gray, COLOR_BGR2GRAY);
		 
		if (fpts[0].size() < 10)
		{
			//提取感兴趣的矩形跟踪
			/*Rect Roirect = selectROI(frame, false, false);
			cout << Roirect.x << Roirect.y << endl;
			Mat Roiimg = gray(Roirect);
			mask= Mat::zeros(gray.size(), gray.type());
			Roiimg.copyTo(mask(Roirect));*/
			//提取任意四边形跟踪
			//1、鼠标交互选取四点坐标
			Mat frame1;
			frame.copyTo(frame1);
			setMouseCallback("camera input", on_Mouse, &frame1);
			while (true)
			{
				Mat tempImage;
				frame.copyTo(tempImage);
				circle(tempImage, point_one, 3, Scalar(0, 0, 255), 1, 8);
				imshow("camera input", tempImage);
				if (waitKey(10) == 27)break;
			}
			//2、绘制mask;
			mask = Mat::zeros(gray.size(), gray.type());
			fillConvexPoly(mask,point4,Scalar(255),8);
			detectFeatures(frame, gray);
			fpts[0].insert(fpts[0].end(), features.begin(), features.end());
			iniPoints.insert(iniPoints.end(), features.begin(), features.end());
		}
		else {
			printf("没有检测,持续追踪...\n");
		}

		if (prev_gray.empty()) {
			gray.copyTo(prev_gray);
		}

		klTrackFeature();

		// 更新前一帧数据
		gray.copyTo(prev_gray);
		frame.copyTo(prev_frame);
		imshow("camera input", frame);

		char c = waitKey(1);
		if (c == 27) {
			break;
		}
	}

	waitKey(0);
	return 0;
}

void detectFeatures(Mat& inFrame, Mat& ingray) {
	double maxCorners = 5000;
	double qualitylevel = 0.01;
	double minDistance = 10;
	double blockSize = 3;
	double k = 0.04;
	TermCriteria criteria0 = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 100, 0.005);
	goodFeaturesToTrack(ingray, features, maxCorners, qualitylevel, minDistance, mask, blockSize, false, k);
	//求取亚像素点
	cornerSubPix(ingray, features, Size(5, 5), Size(-1, -1), criteria0);
	cout << "detect features : " << features.size() << endl;
}


void klTrackFeature() {
	// KLT
	calcOpticalFlowPyrLK(prev_gray, gray, fpts[0], fpts[1], status, errors);
	int k = 0;

	// 特征点过滤
	for (int i = 0; i < fpts[1].size(); i++) {

		/*
		1.calcOpticalFlowPyrLK函数作用是对输入的特征点fpts[0],根据下一帧的图像对这些特征点判定是不是光流,
		检测结束后,status的每个下标会保存答案,再进行判断即可。
		2.initPoints集合用于存放初始化特征数据,每次的calcOpticalFlowPyrLK后都会重新更新一次,用status判断判断有没有新的特征是可以追踪的,或者用status判断哪些旧的特征可以不要了,
		3.fpts[1]集合用于存放当前帧的数据,
		*/

		if (status[i])
		{
			iniPoints[k] = iniPoints[i];
			fpts[1][k++] = fpts[1][i];

			//1.将用KLT算法找到的特征点集fpts[1]进行筛选,将没用的点去除,
			//没用的点包括距离太小没有变化和status的状态
			//2.将有用的点放进fpts[1]中
		}
	}
	// 保存特征点并绘制跟踪轨迹
	iniPoints.resize(k);	//裁剪不要的特征,更新集合大小
	fpts[1].resize(k);

	drawTrackLines();

	std::swap(fpts[1], fpts[0]);//更新帧的特征点
}

void drawTrackLines() {
	for (size_t t = 0; t < fpts[1].size(); t++) {
		line(frame, iniPoints[t], fpts[1][t], Scalar(0, 255, 0), 1, 8, 0);
		circle(frame, fpts[1][t], 1, Scalar(0, 0, 255), 1, 8, 0);
	}
}

你可能感兴趣的:(目标追踪)