《OpenCV》Part7 OpenCV3.1.0 获取图片中光斑坐标(未拟合光斑)

《OpenCV》Part7 OpenCV3.1.0 获取图片中光斑中心坐标(暂未拟合光斑)

1、利用高斯卷积函数和hession矩阵处理图像,精度亚像素级

#include 
#include 
#include 
#include 
#include 
#include 

using namespace std;
using namespace cv;

void ImageConv2D(const IplImage* src, IplImage* dst, const CvMat* kernel);
void ImageGaussianConv2D(const IplImage* src, IplImage* dst, const CvMat* kernel);


int main(int argc, char** argv)
{

	//ppoint.jpg lightspott.jpg 的图像中心为(63 62)(65 227) (298 83)  (281 210)
	IplImage* pSrcImg = cvLoadImage("point.jpg", CV_LOAD_IMAGE_GRAYSCALE);

	if (NULL == pSrcImg)
	{
		cout << "Could not load image!!" << endl;

		exit(1);
	}

	clock_t start_time1 = clock();
	cvNamedWindow("src", CV_WINDOW_AUTOSIZE);
	cvMoveWindow("src", 300, 10);
	cvShowImage("src", pSrcImg);
	CvSize srcImgSize = cvGetSize(pSrcImg);

	IplImage* pSrcImgBin = cvCreateImage(srcImgSize, IPL_DEPTH_8U, 1);

	IplImage* pSrcImgDouble = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);
	cvConvertScale(pSrcImg, pSrcImgDouble, 1.0 / 255);

	IplImage* pDxImg = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);
	IplImage* pDyImg = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);
	IplImage* pDxxImg = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);
	IplImage* pDyyImg = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);
	IplImage* pDxyImg = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);

	const int sigma = 3;
	const int num = 2 * 3 * sigma + 1;
	const int MidVal = num / 2;
	const float PI = 3.14159f;

	//计算高斯卷积模板
	CvMat* gaussianDx = cvCreateMat(num, num, CV_32FC1);
	CvMat* gaussianDy = cvCreateMat(num, num, CV_32FC1);
	CvMat* gaussianDxx = cvCreateMat(num, num, CV_32FC1);
	CvMat* gaussianDyy = cvCreateMat(num, num, CV_32FC1);
	CvMat* gaussianDxy = cvCreateMat(num, num, CV_32FC1);

	for (int i = 0; i < num; i++)
	{
		for (int j = 0; j < num; j++)
		{
			int squareSigma = sigma * sigma;
			int powFourSigma = squareSigma * squareSigma;
			int x_index = i - MidVal;
			int y_index = j - MidVal;
			float expTerm = exp(-(x_index * x_index + y_index * y_index) / 2.0f / squareSigma);
			float tempval = expTerm / PI / powFourSigma / 2;

			((float*)(gaussianDx->data.ptr + gaussianDx->step * i))[j] = -(x_index * tempval);
			((float*)(gaussianDy->data.ptr + gaussianDy->step * i))[j] = -(y_index * tempval);
			((float*)(gaussianDxx->data.ptr + gaussianDxx->step * i))[j] = (x_index * x_index - squareSigma) * tempval / squareSigma;
			((float*)(gaussianDyy->data.ptr + gaussianDyy->step * i))[j] = (y_index * y_index - squareSigma) * tempval / squareSigma;
			((float*)(gaussianDxy->data.ptr + gaussianDxy->step * i))[j] = x_index * y_index * tempval / squareSigma;
		}
	}

	//clock_t start_time = clock();
	long start_time = cvGetTickCount();
	//进行一阶导和二阶导的高斯卷积
	cvFilter2D(pSrcImgDouble, pDxImg, gaussianDx);

	cvFilter2D(pSrcImgDouble, pDyImg, gaussianDy);
	cvFilter2D(pSrcImgDouble, pDxxImg, gaussianDxx);
	cvFilter2D(pSrcImgDouble, pDyyImg, gaussianDyy);
	cvFilter2D(pSrcImgDouble, pDxyImg, gaussianDxy);
	//clock_t end_time = clock();
	//clock_t start_time = clock();
	//ImageConv2D(pSrcImgDouble, pDxImg, gaussianDx);
	//ImageConv2D(pSrcImgDouble, pDyImg, gaussianDy);
	//ImageConv2D(pSrcImgDouble, pDxxImg, gaussianDxx);
	//ImageConv2D(pSrcImgDouble, pDyyImg, gaussianDyy);//ImageGaussianConv2D
	//ImageConv2D(pSrcImgDouble, pDxyImg, gaussianDxy);
	//ImageGaussianConv2D(pSrcImgDouble, pDxImg, gaussianDx);
	//ImageGaussianConv2D(pSrcImgDouble, pDyImg, gaussianDy);
	//ImageGaussianConv2D(pSrcImgDouble, pDxxImg, gaussianDxx);
	//ImageGaussianConv2D(pSrcImgDouble, pDyyImg, gaussianDyy);//ImageGaussianConv2D
	//ImageGaussianConv2D(pSrcImgDouble, pDxyImg, gaussianDxy);
	//clock_t end_time = clock();
	//std::cout << (end_time - start_time)  << endl;

	long end_time = cvGetTickCount();
	std::cout << (end_time - start_time) / cvGetTickFrequency() << endl;//


	//释放高斯卷积模板占用的内存
	cvReleaseMat(&gaussianDx);
	cvReleaseMat(&gaussianDy);
	cvReleaseMat(&gaussianDxx);
	cvReleaseMat(&gaussianDyy);
	cvReleaseMat(&gaussianDxy);

	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* contours = NULL;

	cvThreshold(pSrcImg, pSrcImgBin, 80, 255, CV_THRESH_BINARY);

	cvFindContours(pSrcImgBin, storage, &contours, sizeof(CvContour),
		CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

	vector areaStar;
	CvSeq* _contours = contours;

	while (NULL != _contours)
	{
		areaStar.push_back(cvBoundingRect(_contours));
		_contours = _contours->h_next;
	}

	//求特征值之和以及特征值之积
	CvMat* eigenvalueSum = cvCreateMat(srcImgSize.height, srcImgSize.width, CV_32FC1);
	CvMat* eigenvaluePro = cvCreateMat(srcImgSize.height, srcImgSize.width, CV_32FC1);
	CvMat* eigenvalueTemp_a = cvCreateMat(srcImgSize.height, srcImgSize.width, CV_32FC1);
	CvMat* eigenvalueTemp_b = cvCreateMat(srcImgSize.height, srcImgSize.width, CV_32FC1);

	cvAdd(pDxxImg, pDyyImg, eigenvalueSum);                        //特征值之和

	cvMul(pDxxImg, pDyyImg, eigenvalueTemp_a);
	cvMul(pDxyImg, pDxyImg, eigenvalueTemp_b);
	cvSub(eigenvalueTemp_a, eigenvalueTemp_b, eigenvaluePro);      //特征值之积

	释放临时变量矩阵所占用的内存
	cvReleaseMat(&eigenvalueTemp_a);
	cvReleaseMat(&eigenvalueTemp_b);


	//求每个像素的亚像素值
	IplImage* sVal = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);
	IplImage* tVal = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);
	IplImage* stValTempA = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);
	IplImage* stValTempB = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);
	IplImage* stValTempC = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);
	IplImage* stValTempD = cvCreateImage(srcImgSize, IPL_DEPTH_32F, 1);

	cvMul(pDyImg, pDxyImg, stValTempA);

	cvMul(pDxImg, pDyyImg, stValTempB);
	cvMul(pDyImg, pDxxImg, stValTempC);
	cvSub(stValTempA, stValTempB, stValTempD);
	cvDiv(stValTempD, eigenvaluePro, sVal);             //s值
	cvSub(stValTempA, stValTempC, stValTempD);
	cvDiv(stValTempD, eigenvaluePro, tVal);             //t值

	//释放临时变量所占用的内存
	cvReleaseImage(&stValTempA);
	cvReleaseImage(&stValTempB);
	cvReleaseImage(&stValTempC);
	cvReleaseImage(&stValTempD);

	vector pointDetect;
	//定义一些与坐标相关的变量
	float minTemp = 255;
	float _i = 0;
	float _j = 0;
	float _s = 0.0;
	float _t = 0.0;
	float _Subx = 0.0;
	float _Suby = 0.0;
	const float halfPixel = 0.5;

	for (vector::const_iterator iter = areaStar.begin();
		iter != areaStar.end(); iter++)
	{
		for (int i = iter->y; i < (iter->y + iter->height); i++)
		{
			for (int j = iter->x; j < (iter->x + iter->width); j++)
			{
				根据特征值之和最小和[s t]在[[-0.5,0.5] [-0.5,0.5]]区域内的准则进行光点提取
				if ((fabs(((float*)(sVal->imageData + sVal->widthStep*i))[j]) < halfPixel)\
					&& (fabs(((float*)(tVal->imageData + tVal->widthStep*i))[j]) < halfPixel))
				{
					if ((((float*)(eigenvalueSum->data.ptr + eigenvalueSum->step*i))[j] < 0)\
						&& (((float*)(eigenvaluePro->data.ptr + eigenvaluePro->step*i))[j] > 0))
					{
						if (((float*)(eigenvalueSum->data.ptr + eigenvalueSum->step*i))[j] < minTemp)
						{
							minTemp = ((float*)(eigenvalueSum->data.ptr + eigenvalueSum->step*i))[j];

							_s = ((float*)(sVal->imageData + sVal->widthStep*i))[j];
							_t = ((float*)(tVal->imageData + tVal->widthStep*i))[j];

							_i = i;
							_j = j;
						}//end if
					}//end if
				}//end if

				_Subx = _j + _t;
				_Suby = _i + _s;

			}//end for: iter->j
		}//end for:iter->i

		pointDetect.push_back(cvPoint2D32f(_Subx, _Subx));

		std::cout << "右下 x: " << _Subx << " y: " << _Suby << endl;

		minTemp = 255;
		_i = 0;
		_j = 0;
		_s = 0.0;
		_t = 0.0;
		_Subx = 0.0;
		_Suby = 0.0;

	}//end for: iter

	cvReleaseMat(&eigenvalueSum);
	cvReleaseMat(&eigenvaluePro);

	clock_t end_time1 = clock();
	std::cout << (end_time1 - start_time1) << endl;
	cvWaitKey(0);

	cvReleaseImage(&pSrcImg);
	cvReleaseImage(&pSrcImgDouble);

	cvReleaseImage(&pDxImg);
	cvReleaseImage(&pDyImg);
	cvReleaseImage(&pDxxImg);
	cvReleaseImage(&pDyyImg);
	cvReleaseImage(&pDxyImg);



	return 0;
}

//Conv2D (the same as conv2 in matlab); without rotate 180 degree
void ImageConv2D(const IplImage* src, IplImage* dst, const CvMat* kernel)
{
	assert(src);
	assert(dst);
	assert(kernel);

	if ((src->height != dst->height) || (src->width != dst->width))
	{
		std::cout << "source image and destination image no same size" << endl;

		return;
	}

	if (src->nChannels != dst->nChannels)
	{
		std::cout << "source image and destination image has different channels" << endl;

		return;
	}

	int kernelW = kernel->width;
	int kernelH = kernel->height;

	int halfKernelW = kernelW >> 1;
	int halfKernelH = kernelH >> 1;

	double sumTemp = 0.;

	for (int i = 0; i < src->height; i++)
	{
		for (int j = 0; jwidth; j++)
		{
			if (((float*)(src->imageData + src->widthStep*i))[j] > 80.0 / 255)
			{
				for (int m = 0; m < kernelH; m++)
				{
					for (int n = 0; n < kernelW; n++)
					{
						int temp_i = i - m + halfKernelH;
						int temp_j = j - n + halfKernelW;

						if ((temp_i >= 0) && (temp_i < src->height)\
							&& (temp_j >= 0) && (temp_j < src->width)
							)
						{
							sumTemp += (((float*)(src->imageData + src->widthStep*temp_i))[temp_j]\
								* cvmGet(kernel, m, n));
						} //end if
					}//end for n
				}//end for m

				((float*)(dst->imageData + dst->widthStep*i))[j] = sumTemp;//accumulateT(src, kernel, i, j, kernelH, kernelW);

				sumTemp = 0.f;
			}

		}//end for j
	}//end for i
}




inline double accumulator(const IplImage* src, int row, int col, const CvMat* kernel)
{
	int _iKernelCenX = kernel->width >> 1;
	int _iKernelCenY = kernel->height >> 1;

	double dSumRet = 0.0;

	for (int m = 0; m < kernel->height; m++)
	{
		for (int n = 0; n < kernel->width; n++)
		{
			int temp_i = row - m + _iKernelCenY;
			int temp_j = col - n + _iKernelCenX;

			if ((temp_i >= 0) && (temp_i < src->height)\
				&& (temp_j >= 0) && (temp_j < src->width)
				)
			{
				dSumRet += (((float*)(src->imageData + src->widthStep*temp_i))[temp_j]\
					* cvmGet(kernel, m, n));
			} //end if
		}
	}

	return dSumRet;
}

void ImageGaussianConv2D(const IplImage* src, IplImage* dst, const CvMat* kernel)
{
	assert(src);
	assert(dst);
	assert(kernel);

	if ((src->height != dst->height) || (src->width != dst->width))
	{
		std::cout << "source image and destination image no same size" << endl;

		return;
	}

	if (src->nChannels != dst->nChannels)
	{
		std::cout << "source image and destination image has different channels" << endl;

		return;
	}

	int iKernelW = kernel->width;
	int iKernelH = kernel->height;

	int iKernelCenX = iKernelW >> 1;   //模板中心x
	int iKernelCenY = iKernelH >> 1;   //模板中心y

	double sumTemp = 0.;
	const double precision = 0.00000001;
	int temp_i = 0;
	int temp_j = 0;
	int invI = 0;
	int invJ = 0;
	int i = 0;
	int j = 0;
	int m = 0;
	int n = 0;
	int _i = 0;
	int _j = 0;

	//上边缘
	for (i = 0; i < iKernelCenY; i++)
	{
		for (j = 0; jwidth; j++)
		{
			if (((float*)(src->imageData + src->widthStep*i))[j] > 80.0 / 255)
			{
				((float*)(dst->imageData + dst->widthStep*i))[j] = accumulator(src, i, j, kernel);//accumulateT(src, kernel, i, j, kernelH, kernelW);
			}
		}
	}

	//下边缘
	for (i = (src->height - iKernelCenY); i < src->height; i++)
	{
		for (j = 0; jwidth; j++)
		{
			if (((float*)(src->imageData + src->widthStep*i))[j] > 80.0 / 255)
			{
				((float*)(dst->imageData + dst->widthStep*i))[j] = accumulator(src, i, j, kernel);;//accumulateT(src, kernel, i, j, kernelH, kernelW);
			}
		}
	}
	//左边缘
	for (i = iKernelCenY; i < (src->height - iKernelCenY); i++)
	{
		for (j = 0; jimageData + src->widthStep*i))[j] > 80.0 / 255)
			{
				((float*)(dst->imageData + dst->widthStep*i))[j] = accumulator(src, i, j, kernel);;//accumulateT(src, kernel, i, j, kernelH, kernelW);
			}
		}
	}

	//右边缘
	for (i = iKernelCenY; i < (src->height - iKernelCenY); i++)
	{
		for (j = (src->width - iKernelCenX); jwidth; j++)
		{
			if (((float*)(src->imageData + src->widthStep*i))[j] > 80.0 / 255)
			{
				((float*)(dst->imageData + dst->widthStep*i))[j] = accumulator(src, i, j, kernel);;//accumulateT(src, kernel, i, j, kernelH, kernelW);
			}
		}
	}

	//去除边缘后,根据高斯模板的对称性,用先加减的方式,减少一半的除法
	if (fabs(cvmGet(kernel, 0, 0) - cvmGet(kernel, iKernelW - 1, iKernelH - 1)) < precision)
	{
		for (i = iKernelCenY; i < (src->height - iKernelCenY); i++)
		{
			for (j = iKernelCenX; j<(src->width - iKernelCenX); j++)
			{
				if (((float*)(src->imageData + src->widthStep*i))[j] > 80.0 / 255)
				{
					m = 0;
					_i = i + iKernelCenY;
					_j = j + iKernelCenX;

					for (; m < iKernelCenY; m++)
					{
						for (n = 0; n < iKernelW; n++)
						{
							temp_i = _i - m;
							temp_j = _j - n;
							invI = 2 * i - temp_i;
							invJ = 2 * j - temp_j;

							sumTemp += ((((float*)(src->imageData + src->widthStep*temp_i))[temp_j]
								+ (((float*)(src->imageData + src->widthStep*invI))[invJ]))
								* cvmGet(kernel, m, n));
						} //end for n
					}//end for m

					for (n = 0; n <= iKernelCenX; n++)
					{
						temp_i = _i - m;
						temp_j = _j - n;
						invI = 2 * i - temp_i;
						invJ = 2 * j - temp_j;

						sumTemp += ((((float*)(src->imageData + src->widthStep*temp_i))[temp_j]
							+ (((float*)(src->imageData + src->widthStep*invI))[invJ]))
							* cvmGet(kernel, m, n));
					}//end for

					sumTemp += (((float*)(src->imageData + src->widthStep*i))[j] * cvmGet(kernel, m, n));

					((float*)(dst->imageData + dst->widthStep*i))[j] = sumTemp;//accumulateT(src, kernel, i, j, kernelH, kernelW);

					sumTemp = 0.f;
				}
			}//end for j
		}//end for i       	
	}//end if
	else
	{
		for (i = iKernelCenY; i < (src->height - iKernelCenY); i++)
		{
			for (j = iKernelCenX; j<(src->width - iKernelCenX); j++)
			{
				if (((float*)(src->imageData + src->widthStep*i))[j] > 80.0 / 255)
				{
					int m = 0;
					int _i = i + iKernelCenY;
					int _j = j + iKernelCenX;

					for (; m < iKernelCenY; m++)
					{
						for (n = 0; n < iKernelW; n++)
						{
							temp_i = _i - m;
							temp_j = _j - n;
							invI = 2 * i - temp_i;
							invJ = 2 * j - temp_j;

							sumTemp += ((((float*)(src->imageData + src->widthStep*temp_i))[temp_j]
								- (((float*)(src->imageData + src->widthStep*invI))[invJ]))
								* cvmGet(kernel, m, n));
						} //end for n
					}//end for m

					for (n = 0; n < iKernelCenX; n++)
					{
						temp_i = _i - m;
						temp_j = _j - n;
						invI = 2 * i - temp_i;
						invJ = 2 * j - temp_j;

						sumTemp += ((((float*)(src->imageData + src->widthStep*temp_i))[temp_j]
							- (((float*)(src->imageData + src->widthStep*invI))[invJ]))
							* cvmGet(kernel, m, n));
					}//end for

					sumTemp += (((float*)(src->imageData + src->widthStep*i))[j] * cvmGet(kernel, m, n));

					((float*)(dst->imageData + dst->widthStep*i))[j] = sumTemp;//accumulateT(src, kernel, i, j, kernelH, kernelW);

					sumTemp = 0.f;
				}
			}//end for j
		}//end for i 
	} //end else
}


你可能感兴趣的:(《OpenCV》)