跟踪误差分析中的实际位置计算

    在做目标跟踪的仿真实验时,为了定量分析算法的跟踪准确性。我们需要分析目标跟踪结果的质心坐标和目标实际质心坐标的误差绝对值。
 
    目标的跟踪结果质心坐标可是使用椭圆标识框的中心代替。那么实际质心坐标怎么得到呢?立体标定?算了,没有实际的摄像机。
 
    还是用“人工手动逐帧标定”吧。(其实是很笨的方法,得一帧一帧的弄。)
 
    步骤如下:
    用矩形框选定跟踪物体,计算质心,输出质心坐标到文件。
 
    执行时就是不断地:选择物体->Enter->选择物体->Enter……
 
    程序如下:


#include "cv.h"
#include "highgui.h" 
#include  
#include  
#include "iostream.h"
#include "fstream.h"
 
IplImage *image = 0;
IplImage* gray = 0;
IplImage* bi = 0;
int select_object = 0;
CvPoint origin;
CvRect selection;
 
ofstream fout("real_position.txt");
 
 
void on_mouse( int event, int x, int y, int flags,void* param )
{
    if( !image )
        return;
 
    if( image->origin )//origin=1,底左结构
        y = image->height - y;// 换成顶左
 
    if( select_object )//如果处于选择跟踪物体阶段,则对selection用当前的鼠标位置进行设置
    {
        selection.x = MIN(x,origin.x);
        selection.y = MIN(y,origin.y);
        selection.width = selection.x + CV_IABS(x - origin.x);
        selection.height = selection.y + CV_IABS(y - origin.y);
       
        selection.x = MAX( selection.x, 0 );
        selection.y = MAX( selection.y, 0 );
        selection.width = MIN( selection.width, image->width );
        selection.height = MIN( selection.height, image->height );
        selection.width -= selection.x;
        selection.height -= selection.y;
 
 
    }
       cvRectangle(image, cvPoint( selection.x, selection.y),
                    cvPoint(selection.x+selection.width,selection.y+selection.height),
                      CV_RGB(255,0,0), 1, 8, 0 );
       cvShowImage( "Demo", image );
 
    switch( event )
    {
    case CV_EVENT_LBUTTONDOWN:
        origin = cvPoint(x,y);
        selection = cvRect(x,y,0,0);
        select_object = 1;
        break;
    case CV_EVENT_LBUTTONUP:
        select_object = 0;
 
#ifdef _DEBUG
    printf("\n # 鼠标的选择区域:");
    printf("\n   X = %d, Y = %d, Width = %d, Height = %d \n",
        selection.x, selection.y, selection.width, selection.height);
      
       gray=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,1);
       bi=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,1);
       cvCvtColor(image,gray,CV_RGB2GRAY);
       cvThreshold(gray,bi,60,255,CV_THRESH_BINARY);
 
       // 目标质心的计算:
       double M00=0,x0=0,y0=0;
       CvPoint2D 32f center;
       CvMoments m;
       CvMat mat;
       cvMoments(cvGetSubRect(bi,&mat,selection),&m,1);
       M00 = cvGetSpatialMoment(&m,0,0);
       x0 = cvGetSpatialMoment(&m,1,0)/M00;//质心
       y0 = cvGetSpatialMoment(&m,0,1)/M00;
       center.x=x0;
       center.y=y0;
      
       fout<<"center_x="<origin = frame->origin;      
        }
        cvCopy( frame, image, 0 );
 
              CvFont font;
              cvInitFont(&font,CV_FONT_VECTOR0,0.5,0.5,0,1.5,8);
              char text[1024];
              sprintf(text,"%dframe",n);
              cvPutText(image,text,cvPoint(20,20),&font,CV_RGB(255,0,0));
 
              cvShowImage( "Demo", image );                 
              cvWaitKey(0);
 
        if( c == 27 )//quit the program : 27=='esc'
            break;  // exit from for-loop      
       }
   
    cvWaitKey(0);
    cvReleaseCapture( &capture );
    cvDestroyWindow("CamShiftDemo");
 
    return 0;
}
 


你可能感兴趣的:(跟踪误差分析中的实际位置计算)