opencv学习之二:运用camshift进行红外视频跟踪。

一、RGB到GRAY的换算

camshift一般使用颜色直方图进行匹配,而红外视频为灰度图像,一般彩色RGB到灰度图像转换公式如下:

Gray = R*0.299 + G*0.587 + B*0.114
以下部分摘自博客:http://www.cnblogs.com/NIOS/archive/2009/08/25/1553653.html

  而实际应用时,希望避免低速的浮点运算,所以需要整数算法。
  注意到系数都是3位精度的没有,我们可以将它们缩放1000倍来实现整数运算算法:
Gray = (R*299 + G*587 + B*114 + 500) / 1000
      由于该算法需要32位运算,所以该公式的另一个变种很流行:
Gray = (R*30 + G*59 + B*11 + 50) / 100
     虽说上一个公式是32位整数运算,但是根据80x86体系的整数乘除指令的特点,是可以用16位整数乘除指令来运算的。而且现在32位早普及了(AMD64都出来了),所以推荐使用上一个公式。

整数移位算法


  上面的整数算法已经很快了,但是有一点仍制约速度,就是最后的那个除法。移位比除法快多了,所以可以将系数缩放成 2的整数幂。
  习惯上使用16位精度,2的16次幂是65536,所以这样计算系数:
0.299 * 65536 = 19595.264 ≈ 19595
0.587 * 65536 + (0.264) = 38469.632 + 0.264 = 38469.896 ≈ 38469
0.114 * 65536 + (0.896) =  7471.104 + 0.896 = 7472

  可能很多人看见了,我所使用的舍入方式不是四舍五入。四舍五入会有较大的误差,应该将以前的计算结果的误差一起计算进去,舍入方式是去尾法:

  写成表达式是:
Gray = (R*19595 + G*38469 + B*7472) >> 16

  2至20位精度的系数:
Gray = (R*1 + G*2 + B*1) >> 2
Gray = (R*2 + G*5 + B*1) >> 3
Gray = (R*4 + G*10 + B*2) >> 4
Gray = (R*9 + G*19 + B*4) >> 5
Gray = (R*19 + G*37 + B*8) >> 6
Gray = (R*38 + G*75 + B*15) >> 7
Gray = (R*76 + G*150 + B*30) >> 8
Gray = (R*153 + G*300 + B*59) >> 9
Gray = (R*306 + G*601 + B*117) >> 10
Gray = (R*612 + G*1202 + B*234) >> 11
Gray = (R*1224 + G*2405 + B*467) >> 12
Gray = (R*2449 + G*4809 + B*934) >> 13
Gray = (R*4898 + G*9618 + B*1868) >> 14
Gray = (R*9797 + G*19235 + B*3736) >> 15
Gray = (R*19595 + G*38469 + B*7472) >> 16
Gray = (R*39190 + G*76939 + B*14943) >> 17
Gray = (R*78381 + G*153878 + B*29885) >> 18
Gray = (R*156762 + G*307757 + B*59769) >> 19
Gray = (R*313524 + G*615514 + B*119538) >> 20

  仔细观察上面的表格,这些精度实际上是一样的:3与4、7与8、10与11、13与14、19与20
  所以16位运算下最好的计算公式是使用7位精度,比先前那个系数缩放100倍的精度高,而且速度快:
Gray = (R*38 + G*75 + B*15) >> 7

  其实最有意思的还是那个2位精度的,完全可以移位优化:
Gray = (R + (WORD)G<<1 + B) >> 2

二、红外视频到彩虹图的换算:

以下内容转载自博客:http://blog.sina.com.cn/s/blog_8924265b0101ext1.html
将灰度图像的灰度值分别转换为RBG三原色,生成伪彩色。
彩虹图一般为
// 红 255, 0, 0 255
// 橙 255, 127, 0 204
// 黄 255, 255, 0 153
// 绿 0, 255, 0 102
// 青 0, 255, 255 51
// 蓝 0, 0, 255 0
六种颜色。需要把0-255分成6段,每段51.
转换成彩虹图之后,运用camshift进行跟踪即可。

三、camshift()

CamShift算法的全称是"Continuously Adaptive Mean-SHIFT",它的基本思想是视频图像的所有帧作MeanShift运算,并将上一帧的结果(即Search Window的中心和大小)作为下一帧MeanShift算法的Search Window的初始值,如此迭代下去。
算法过程为:
(1).初始化搜索窗
(2).计算搜索窗的颜色概率分布(反向投影)
(3).运行meanshift算法,获得搜索窗新的大小和位置。
(4).在下一帧视频图像中用(3)中的值重新初始化搜索窗的大小和位置,再跳转到(2)继续进行。
camshift能有效解决目标变形和遮挡的问题,对系统资源要求不高,时间复杂度低,在简单背景下能够取得良好的跟踪效果。但当背景较为复杂,或者有许多与目标颜色相似像素干扰的情况下,会导致跟踪失败。因为它单纯的考虑颜色直方图,忽略了目标的空间分布特性,所以这种情况下需加入对跟踪目标的预测算法。
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"

#include 
#include 

using namespace cv;
using namespace std;

Mat image;

bool backprojMode = false;
bool selectObject = false;
int trackObject = 0;
bool showHist = true;
Point origin;
Rect selection;
int vmin = 10, vmax = 256, smin = 30;

static void onMouse(int event, int x, int y, int, void*)
{
	if (selectObject)
	{
		selection.x = MIN(x, origin.x);
		selection.y = MIN(y, origin.y);
		selection.width = std::abs(x - origin.x);
		selection.height = std::abs(y - origin.y);

		selection &= Rect(0, 0, image.cols, image.rows);
	}

	switch (event)
	{
	case CV_EVENT_LBUTTONDOWN:
		origin = Point(x, y);
		selection = Rect(x, y, 0, 0);
		selectObject = true;
		break;
	case CV_EVENT_LBUTTONUP:
		selectObject = false;
		if (selection.width > 0 && selection.height > 0)
			trackObject = -1;
		break;
	}
}

static void help()
{
	cout << "\nThis is a demo that shows mean-shift based tracking\n"
		"You select a color objects such as your face and it tracks it.\n"
		"This reads from video camera (0 by default, or the camera number the user enters\n"
		"Usage: \n"
		"   ./camshiftdemo [camera number]\n";

	cout << "\n\nHot keys: \n"
		"\tESC - quit the program\n"
		"\tc - stop the tracking\n"
		"\tb - switch to/from backprojection view\n"
		"\th - show/hide object histogram\n"
		"\tp - pause video\n"
		"To initialize tracking, select the object with mouse\n";
}

const char* keys =
{
	"{1|  | 0 | camera number}"
};

int main(int argc, const char** argv)
{
	help();

	VideoCapture cap;
	VideoCapture capture("C:/Users/Administrator/Desktop/camshift/camshift/Debug/example.mp4");
	Rect trackWindow;
	int hsize = 16;
	float hranges[] = { 0, 180 };
	const float* phranges = hranges;
	CommandLineParser parser(argc, argv, keys);
	int camNum = parser.get("1");

	cap.open(camNum);

	if (!cap.isOpened())
	{
		help();
		cout << "***Could not initialize capturing...***\n";
		cout << "Current parameter's value: \n";
		parser.printParams();
		return -1;
	}

	namedWindow("Histogram", 0);
	namedWindow("CamShift Demo", 0);
	namedWindow("CamShift", 0);
	setMouseCallback("CamShift Demo", onMouse, 0);
	createTrackbar("Vmin", "CamShift Demo", &vmin, 256, 0);
	createTrackbar("Vmax", "CamShift Demo", &vmax, 256, 0);
	createTrackbar("Smin", "CamShift Demo", &smin, 256, 0);

	Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;

	bool paused = false;

	for (;;)
	{
		if (!paused)
		{
			capture >> frame;
			if (frame.empty())
				break;
		}

		frame.copyTo(image);
		/////////
		//cvtColor(frame, test, COLOR_GRAY2BGR);
	


		Mat test(image.rows, image.cols, CV_8UC3);//RGB图像
		Mat test_grey;
		cvtColor(image, test_grey, COLOR_BGR2GRAY);
		

		int tmp = 0;
		Mat img;
		cvtColor(image, img, COLOR_BGR2GRAY);


		Mat img_color(image.rows, image.cols, CV_8UC3);//构造RGB图像 

#define IMG_B(img,y,x)img.at(y,x)[0] 

#define IMG_G(img,y,x)img.at(y,x)[1] 

#define IMG_R(img,y,x)img.at(y,x)[2] 

		uchar tmp2 = 0;

		for (int y = 0; y(y, x);

				if (tmp2 <= 51)

				{

					IMG_B(img_color, y, x) = 255;

					IMG_G(img_color, y, x) = tmp2 * 5;

					IMG_R(img_color, y, x) = 0;

				}

				else if(tmp2 <= 102)

				{

					tmp2 -= 51;

					IMG_B(img_color, y, x) = 255 - tmp2 * 5;

					IMG_G(img_color, y, x) = 255;

					IMG_R(img_color, y, x) = 0;

				}

				else if(tmp2 <= 153)

				{

					tmp2 -= 102;

					IMG_B(img_color, y, x) = 0;

					IMG_G(img_color, y, x) = 255;

					IMG_R(img_color, y, x) = tmp2 * 5;

				}

				else if(tmp2 <= 204)

				{

					tmp2 -= 153;

					IMG_B(img_color, y, x) = 0;

					IMG_G(img_color, y, x) = 255 - uchar(128.0*tmp2 / 51.0 + 0.5);

					IMG_R(img_color, y, x) = 255;

				}

				else

				{

					tmp2 -= 204;

					IMG_B(img_color, y, x) = 0;

					IMG_G(img_color, y, x) = 127 - uchar(127.0*tmp2 / 51.0 + 0.5);

					IMG_R(img_color, y, x) = 255;

				}

			}

		}




		imshow("CamShift", image);
		image = img_color;

		////////
		if (!paused)
		{
			cvtColor(image, hsv, COLOR_BGR2HSV);

			if (trackObject)
			{
				int _vmin = vmin, _vmax = vmax;

				inRange(hsv, Scalar(0, smin, MIN(_vmin, _vmax)),
					Scalar(180, 256, MAX(_vmin, _vmax)), mask);
				int ch[] = { 0, 0 };
				hue.create(hsv.size(), hsv.depth());
				mixChannels(&hsv, 1, &hue, 1, ch, 1);

				if (trackObject < 0)
				{
					Mat roi(hue, selection), maskroi(mask, selection);
					calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
					normalize(hist, hist, 0, 255, CV_MINMAX);

					trackWindow = selection;
					trackObject = 1;

					histimg = Scalar::all(0);
					int binW = histimg.cols / hsize;
					Mat buf(1, hsize, CV_8UC3);
					for (int i = 0; i < hsize; i++)
						buf.at(i) = Vec3b(saturate_cast(i*180. / hsize), 255, 255);
					cvtColor(buf, buf, CV_HSV2BGR);

					for (int i = 0; i < hsize; i++)
					{
						int val = saturate_cast(hist.at(i)*histimg.rows / 255);
						rectangle(histimg, Point(i*binW, histimg.rows),
							Point((i + 1)*binW, histimg.rows - val),
							Scalar(buf.at(i)), -1, 8);
					}
				}

				calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
				backproj &= mask;
				RotatedRect trackBox = CamShift(backproj, trackWindow,
					TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1));
				if (trackWindow.area() <= 1)
				{
					int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5) / 6;
					trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
						trackWindow.x + r, trackWindow.y + r) &
						Rect(0, 0, cols, rows);
				}

				if (backprojMode)
					cvtColor(backproj, image, COLOR_GRAY2BGR);
				ellipse(image, trackBox, Scalar(0, 0, 255), 3, CV_AA);
			}
		}
		else if (trackObject < 0)
			paused = false;

		if (selectObject && selection.width > 0 && selection.height > 0)
		{
			Mat roi(image, selection);
			bitwise_not(roi, roi);
		}

		imshow("CamShift Demo", image);
		imshow("Histogram", histimg);

		char c = (char)waitKey(10);
		if (c == 27)
			break;
		switch (c)
		{
		case 'b':
			backprojMode = !backprojMode;
			break;
		case 'c':
			trackObject = 0;
			histimg = Scalar::all(0);
			break;
		case 'h':
			showHist = !showHist;
			if (!showHist)
				destroyWindow("Histogram");
			else
				namedWindow("Histogram", 1);
			break;
		case 'p':
			paused = !paused;
			break;
		default:
			;
		}
	}

	return 0;
}


你可能感兴趣的:(opencv)