SLAM: 基于运动和视觉里程方法的实时三维构建:Map-Management/坐标系转换

对每帧图像进行处理:

修改:相机参数、滤波器状态、特征包信息

一、对特征进行提取

//Map management :(adding and deleting features; and converting inverse depth to Euclidean)
		o_Slamer.map_management(filter, features_info, cam,  im, min_number_of_feats_in_im,  step );


#include "StdAfx.h"
#include "EkfSlam.h"

CEkfSlam::CEkfSlam(void)
{
	//全局变量
	//初始化Eigen矩阵使用
	eps            = numeric_limits<double>::epsilon();
	//生成随机球体使用
	gSphereRadiusP = 12.59158724374398;
	//特征更新使用标识符
	gSVOC     = "constant_velocity";
	//需要检测的图像文件夹
	gSequencePath = "E:/Image/slam/pgm3/";
	//起止元素
	gInitIm =   90;
	gLastIm = 2169;
}


CEkfSlam::~CEkfSlam(void)
{
}


void m012345(){}

//管理地图Frect
int  CEkfSlam::map_management(
	StruEkf  &filter, 
	std::vector<StruFeaturesInfo >  &features_info, 
	StruCamP &cam, Mat &im,int min_number_of_features_in_image, int step )
{
	//1. delete features, if necessary
	delete_features( filter, features_info );

	//////////////
	int	measured = 0;
	for (int i=1;i< features_info.size();++i){
		if ( features_info[i].low_innovation_inlier || features_info[i].high_innovation_inlier )
			measured = measured + 1;

	}

	// 2.update features info
	update_features_info( features_info );


	//3. convert features from inverse depth to cartesian, if necessary
	//3. 转移深度到笛卡尔坐标系
	inversedepth_2_cartesian( filter, features_info );//到此函数需要改写//wishchin!!!

	//4. initialize features randomly  //Why 随机初始化?Random!
	if (measured == 0){
		initialize_features( step, cam, filter, features_info, min_number_of_features_in_image, im );
	}
	else
	{
		if (measured < min_number_of_features_in_image){
			initialize_features( step, cam,filter, features_info, min_number_of_features_in_image - measured, im );
		}
	}

	return 1;
}


void m011(){}


int  CEkfSlam::update_features_info( std::vector<StruFeaturesInfo>  &features_info )
{
	//更新特征信息,已完成修改!
	// reset features_info parameters
	for ( int idx=0;idx< features_info.size(); ++idx){

		if (features_info[idx].h.size() <1 ){
			features_info[idx].times_predicted = features_info[idx].times_predicted + 1;
		}

		if (features_info[idx].low_innovation_inlier||features_info[idx].high_innovation_inlier){
			features_info[idx].times_measured = features_info[idx].times_measured + 1;
		}
		features_info[idx].individually_compatible = 0;
		features_info[idx].low_innovation_inlier   = 0;
		features_info[idx].high_innovation_inlier  = 0;
		features_info[idx].h =  Eigen::MatrixXf::Zero(1,1);
		features_info[idx].z =  Eigen::MatrixXf::Zero(1,1);
		features_info[idx].H =  Eigen::MatrixXf::Zero(1,1);
		features_info[idx].S =  Eigen::MatrixXf::Zero(1,1);

	}
	return 1;
}


int  CEkfSlam::delete_features( 
	StruEkf &filter, std::vector<StruFeaturesInfo >  &features_info )
{
	//用于初始化

	return 1;
}



int CEkfSlam::inversedepth_2_cartesian(
	StruEkf &filter, std::vector<StruFeaturesInfo >  &features_info )
{
	//3. convert features from inverse depth to cartesian, if necessary
	//3. 转移深度到笛卡尔坐标系
	//待修改完成//wishchin!!!

	double linearity_index_threshold = 0.1;
	Eigen::MatrixXf   X;//X为109*1M
	Eigen::MatrixXf   P;
	filter.get_x_k_k(X);
	filter.get_p_k_k(P);

	int initialPositionOfFeature = 14;
	for (int i=1;i< features_info.size();++i){

		if( strcmp(features_info[i].type.c_str(), "inversedepth") )
			initialPositionOfFeature = 14;

		for (int j=1;j< i-1;++j)
		{
			if (strcmp(features_info[j].type.c_str(), "cartesian") )
			{
				initialPositionOfFeature = initialPositionOfFeature + 3; //end
				break;
			}
			if (strcmp(features_info[j].type.c_str(), "inversedepth") )	
			{
				initialPositionOfFeature = initialPositionOfFeature + 6;
				break;
			}
			double std_rho = sqrt( P(initialPositionOfFeature + 5,initialPositionOfFeature + 5)  );	
			double   rho   = X(initialPositionOfFeature + 5);
			double std_d   = std_rho/(rho*rho);

			// camera 2 to point distance
			double   theta =  X(initialPositionOfFeature + 3);
			double     phi =  X(initialPositionOfFeature + 4);

			Eigen::MatrixXf      mi;//mi is 3*1 M
			m_(theta,phi,mi);

			Eigen::MatrixXf   x_c1 ;
			x_c1<< X.row(initialPositionOfFeature-1 ),  X.row(initialPositionOfFeature),  X.row(initialPositionOfFeature+1);
			Eigen::MatrixXf   x_c2 ;
			x_c2<< X.row(0),  X.row(1), X.row(2) ;
			Eigen::MatrixXf   x_c3;
			x_c3<<  X.row(initialPositionOfFeature-1 ),  X.row(initialPositionOfFeature),  X.row(initialPositionOfFeature+1), X.row(initialPositionOfFeature+2),  X.row(initialPositionOfFeature+3),  X.row(initialPositionOfFeature+4);

			//转换到笛卡尔坐标系
			Eigen::MatrixXf    pp;//pp is 3*1 M
			inversedepth2cartesian(x_c3, pp);

			double d_c2p = (pp - x_c2).norm();
			// alpha (parallax)
			double  cos_alpha ;
			Eigen::MatrixXf   MMp= ( (pp-x_c1).transpose() * (pp-x_c2) )/((pp-x_c1).norm() *(pp-x_c2).norm() ) ;//error C2440: “初始化”: 无法从“const Eigen::CwiseUnaryOp<UnaryOp,XprType>”转换为“double”//需要运算符重载
			cos_alpha = MMp(0,0);

			// Linearity index
			double linearity_index = 4 * std_d* cos_alpha / d_c2p;


			if (linearity_index<linearity_index_threshold){

				//int size_X_old =X.rows();// size(X,1);

				//// State Vector
				////X = [X(1:initialPositionOfFeature-1); pp; X(initialPositionOfFeature+6:end)];
				//X = [X(1:initialPositionOfFeature-1); pp; X(initialPositionOfFeature+6:end)];

				//// Covariance
				//Eigen::MatrixXf  dm_dtheta(3,1); 
				//dm_dtheta<< cos(phi) * cos(theta) , 0  , -cos(phi)*sin(theta) ;
				//dm_dtheta.transpose();
				//Eigen::MatrixXf    dm_dphi(1,3); 
				//dm_dphi<< (0 - sin(phi) )*sin(theta),  -cos(phi),   -sin(phi)*cos(theta);

				//Eigen::MatrixXf  J =  [ eye(3) (1/rho)*dm_dtheta (1/rho)*dm_dphi -mi/(rho^2) ];
				////Eigen::MatrixXf     J_all = sparse( [eye(initialPositionOfFeature-1) zeros(initialPositionOfFeature-1,6) zeros(initialPositionOfFeature - 1, size_X_old - initialPositionOfFeature - 5);...
				////	zeros(3, initialPositionOfFeature-1) J zeros(3, size_X_old - initialPositionOfFeature - 5);...
				////	zeros(size_X_old - initialPositionOfFeature - 5, initialPositionOfFeature - 1) zeros(size_X_old - initialPositionOfFeature - 5, 6) eye(size_X_old - initialPositionOfFeature - 5)] );

				
				//Eigen::MatrixXf     J_all ;//= Eigen::MatrixXf sparse( [eye(initialPositionOfFeature-1) zeros(initialPositionOfFeature-1,6) zeros(initialPositionOfFeature - 1, size_X_old - initialPositionOfFeature - 5),
				////Eigen::MatrixXf::setZero(3, initialPositionOfFeature-1) J zeros(3, size_X_old - initialPositionOfFeature - 5),
				////Eigen::MatrixXf::setZero(size_X_old - initialPositionOfFeature - 5, initialPositionOfFeature - 1) zeros(size_X_old - initialPositionOfFeature - 5, 6) eye(size_X_old - initialPositionOfFeature - 5)] );

				//Eigen::MatrixXf P = (J_all*P*J_all).transpose();

				////在此更新矩阵!
				//filter.set_x_k_k(X);
				//filter.set_p_k_k(P);
				//features_info[i].type = "cartesian";
				//// for the moment, only convert one feature per step (sorry!)

			}
		}
	}
		
	return 1;
}



double CEkfSlam::inversedepth2cartesian( 
	Eigen::MatrixXf  &inverse_depth ,Eigen::MatrixXf  &cartesian)
{
	//待改写!!!
	//Input: Matrix(6,1)  //Output:Matrix(3,1)

	Eigen::MatrixXf rw(1,3);// = inverse_depth(1:3,:);
	rw << inverse_depth(0,0),inverse_depth(1,0),inverse_depth(2,0);
	double theta = inverse_depth(3,0);
	double   phi = inverse_depth(4,0);
	double   rho = inverse_depth(5,0);

	double cphi = cos(phi);
	Eigen::MatrixXf m(3,1);// = [
	m<< cphi*sin(theta),   -sin(phi),  cphi*cos(theta);   

	//cartesian(1,:) = rw(1) + (1./rho).*m(1,:);
	//cartesian(2,:) = rw(2) + (1./rho).*m(2,:);
	//cartesian(3,:) = rw(3) + (1./rho).*m(3,:);
	cartesian(0,0) = rw(0,0) + (1/rho)*m(0,0);
	cartesian(1,0) = rw(0,1) + (1/rho)*m(1,0);
	cartesian(2,0) = rw(0,2) + (1/rho)*m(2,0);

	return cphi;
}


double  CEkfSlam::m_(double &a, double &b, Eigen::MatrixXf &m)
{
	// Unit vector from azimut-elevation angles
	//m为3*1 double!
	
	double theta = a; 
	double   phi = b;
	double  cphi = cos(phi);

	//Eigen::MatrixXf m(3,1);
	m.resize(3,1);
	m<< cphi*sin(theta), -sin(phi), cphi*cos(theta);//此时不需要转置了!

	//int nargin =0;
	//if (nargin<2){

	//	theta = a(1,:); 
	//	double  phi = a(2,:);
	//	cphi = cos(phi);
	//	m = [cphi.*sin(theta); -sin(phi); cphi.*cos(theta)];
	//}
	//else
	//{
	//	theta = a; 
	//	double  phi = b;
	//	cphi = cos(phi);
	//	m = [cphi.*sin(theta)   -sin(phi)  cphi.*cos(theta)]';
	//}			
	//	//end

	return 1;
}

void m012(){}


int  CEkfSlam::initialize_features(
	int step,  StruCamP &cam, 
	StruEkf &filter,std::vector<StruFeaturesInfo >  &features_info,
	int  num_features_to_initialize, cv::Mat  &im)
{
	//此函数已修改完成,待修改内部函数!
	// settings
	int max_attempts = 50;
	int attempts     =  0;
	int initialized  =  0;
	Eigen::MatrixXf    uv;

	while( initialized < num_features_to_initialize  &&  attempts< max_attempts    ) 
	{
		attempts = attempts+1;
		initialize_a_feature( step, cam, im, filter, features_info ,uv );

		if( uv.cols() ==0) 
			initialized = initialized + 1;//wishchin!!!

	}
	return 1;
}



int  CEkfSlam::initialize_a_feature(
	int step,  StruCamP &cam,  cv::Mat &im_k,
	StruEkf &filter,std::vector<StruFeaturesInfo>  &features_info ,
	Eigen::MatrixXf &uv)
{
	//函数已修改完成,子函数有待修改!
	// numerical values
	int 	half_patch_size_when_initialized = 20;
	int 	half_patch_size_when_matching = 6;

	int 	excluded_band = half_patch_size_when_initialized + 1;
	int 	max_initialization_attempts = 1;

	cv::Size initializing_box_size(60,40);//定义局部搜索框的大小
	cv::Size initializing_box_semisize(initializing_box_size.height/2,initializing_box_size.width/2);

	int 	initial_rho = 1;
	int 	std_rho     = 1;
	int 	std_pxl = filter.std_z;//int 	std_pxl = get_std_z(filter);

	int 	rand_attempt  = 1;
	int 	not_empty_box = 1;
	bool 	detected_new  = 0;

	int total_features_number=0;

	////预测相机参数
	predict_camera_measurements( filter.x_k_k , cam, features_info );

	Eigen::MatrixXf uv_pred;// = [];
	Eigen::MatrixXf features_in_the_box;

	for (int i=0;features_info.size();++i) {
		uv_pred =  features_info[i].h.transpose();//uv_pred = [uv_pred features_info(i).h'];
	}

	//	//for i=1:max_initialization_attempts
	for (int i=0;i< max_initialization_attempts;++i){

		// if a feature has been initialized, exit
		if ( detected_new )  break;

		//center为2*1 double
		//Eigen::MatrixXf  search_region_center(2,1)= 1.0/2* Eigen::MatrixXf::Random(2,1)+ 1.0/2 *Eigen::MatrixXf::Ones(2,1)  ;//Eigen::MatrixXf = rand(2,1);
		CvPoint search_region_center;
		int  search_region_centery = rand();
		double search_region_center_y = (search_region_centery %1000 )/1000.0;
		int search_region_centerx = rand();
		double search_region_center_x = (search_region_centerx %1000 )/1000.0;

		//随机化搜索中心,点值为四舍五入!
		//CvPoint只有整数值,无法使用Float
		search_region_center.x = 
			floor( (float)(search_region_center_x*  (cam.nCols-  2*excluded_band  - 2*initializing_box_semisize.height ) )  ) 
			+  excluded_band  +  initializing_box_semisize.height;

		search_region_center.y = 
			floor( (float)(search_region_center_y*  (cam.nRows-  2*excluded_band  - 2*initializing_box_semisize.width ) ) )
			+ excluded_band+initializing_box_semisize.width;

		//1. 提取快速角点
		//角点为1*2double,为一个二维浮点位置//	cd fast-matlab-src
		//	cs = fast_corner_detect_9(double(im_k(search_region_center(2)-initializing_box_semisize(2):search_region_center(2)+initializing_box_semisize(2), search_region_center(1)-initializing_box_semisize(1):search_region_center(1)+initializing_box_semisize(1))),... // the image,
		//	100);

		//1. Extract FAST corners //直接使用OpenCV
		//取图像局部,进行快速角点检测//使用OpenCV getRect函数 //GetRectSubPix 从图像中提取象素矩形,使用子象素精度 //图像,阈值,角点
		std::vector<CvPoint>  cs(0);
		cv::Mat             RectImg; 
		const double     Thres =100;
		CvSize                 Rect;
		Rect.height=initializing_box_semisize.height;
		Rect.width =initializing_box_semisize.width ;
		cv::getRectSubPix(im_k,Rect,search_region_center,RectImg,-1);

		//cv::imshow("im",im_k);cv::imshow("im2",RectImg);cv::waitKey(0);
		// 获取快速角点
		fast_corner_detect_9(RectImg,Thres,cs);
		//fast_corner_detect_9(im_k,Thres,cs);

		//c为1*2double
		//获取有效角点
		Eigen::MatrixXf  c;
		fast_nonmax(RectImg,Thres, cs, c);

		Eigen::MatrixXf all_uv = c.transpose(); 

		if (all_uv.cols() >0)
		{
			Eigen::MatrixXf X =Eigen::MatrixXf::Ones(1, all_uv.cols() );
			double  TT = - initializing_box_semisize.height + search_region_center.y - 1;
			double TT2 = - initializing_box_semisize.width  + search_region_center.x - 1;

			all_uv.row(0)    = all_uv.row(0) +  ( TT)*X  ;
			all_uv.row(1)    = all_uv.row(1) +  (TT2)*X  ;
		}

		int nPoints =all_uv.cols();//	nPoints=size(all_uv,2);

		//// Are there corners in the box?
		bool are_there_corners =!((bool)(all_uv.cols() ) ) ;

		//// Are there other features in the box?
		//	if ~isempty(uv_pred)		total_features_number = size(uv_pred,2);

		bool are_there_features =false;
		if (uv_pred.cols()>0){
			total_features_number =uv_pred.cols();

			Eigen::MatrixXf  Compare00 =  Eigen::MatrixXf::Ones(1,total_features_number);
			Eigen::MatrixXf  Compare001=  Compare00* (search_region_center.y) ;
			Eigen::MatrixXf  Compare01 =  Eigen::MatrixXf::Ones(1,total_features_number);
			int H= initializing_box_semisize.height;
			Compare00
				= Compare001 - H;
			Compare01
				= Compare01* (search_region_center.y) 
				+ (H);

			Eigen::MatrixXf  Compare10 =Eigen::MatrixXf::Ones(1,total_features_number);
			Eigen::MatrixXf  Compare11 =Eigen::MatrixXf::Ones(1,total_features_number);
			Compare10 = Compare10* search_region_center.x
				-initializing_box_semisize.width;
			Compare11 = Compare11* search_region_center.x 
				+initializing_box_semisize.width;


			//features_in_the_box =...
			//	(uv_pred(1,:)>ones(1,total_features_number)*(search_region_center(1)-initializing_box_semisize(1)))&
			//	(uv_pred(1,:)<ones(1,total_features_number)*(search_region_center(1)+initializing_box_semisize(1)))&
			//	(uv_pred(2,:)>ones(1,total_features_number)*(search_region_center(2)-initializing_box_semisize(2)))&
			//	(uv_pred(2,:)<ones(1,total_features_number)*(search_region_center(2)+initializing_box_semisize(2)));
			Eigen::MatrixXf  Compare0T = uv_pred.row(0);
			Eigen::MatrixXf  Compare1T = uv_pred.row(1);
			int features_in_the_box  =(
				Compare1T > Compare00 
				||  Compare0T < Compare01 
				||  Compare1T > Compare10  
				||  Compare1T < Compare11  );

			//are_there_features = (sum(features_in_the_box)~=0);
			//	else
			//		are_there_features = false;
			//end
			are_there_features = ((features_in_the_box) > 0 );
		}

		//	if(are_there_corners&&(~are_there_features))
		//若存在角点和特征,则更新特征
		Eigen::MatrixXf uv_integer;
		if (are_there_corners && are_there_features)
		{
			uv = all_uv;
			uv_integer = uv;
			uv =uv.row(0);
			detected_new = 1;
		}
		else
			uv = Eigen::MatrixXf::Zero(0,0);

		Eigen::MatrixXf x_k_k,p_k_k, X_RES, P_RES, newFeature;
		filter.get_p_k_k(p_k_k);
		filter.get_x_k_k(x_k_k);

		if(uv.rows()>0){//	if(~isempty(uv))
			add_features_inverse_depth( uv,x_k_k,p_k_k, cam, std_pxl, initial_rho, std_rho,X_RES, P_RES, newFeature );

			filter.set_x_k_k(X_RES);
			filter.set_p_k_k(P_RES);

			//// add the feature to the features_info vector
			add_feature_to_info_vector( uv, im_k, X_RES, features_info, step, newFeature );
			
		}
		for (int i=1;i<(features_info.size());++i){
			features_info[i].h = Eigen::MatrixXf::Identity(13,13);
		}
	}
	return 1;
}
//
void  m0121(){}

int  CEkfSlam::predict_camera_measurements( 
	Eigen::MatrixXf &x_k_k, 
	StruCamP &cam,
	std::vector<StruFeaturesInfo>  &features_info )
{
	//预测相机参数//移植已完成
	//int predict_camera_measurements( get_x_k_k(filter), cam, features_info )
	// Pinhole model
	Eigen::MatrixXf t_wc(3,x_k_k.cols()) ;
	t_wc<< x_k_k.row(0),x_k_k.row(1),x_k_k.row(2);

	Eigen::MatrixXf  TT(4,x_k_k.cols() );
	TT<< x_k_k.row(4), x_k_k.row(5), x_k_k.row(6), x_k_k.row(7);
	Eigen::MatrixXf r_wc;
	q2r(TT,r_wc);

	TT.resize(x_k_k.rows()-13,x_k_k.cols() );
	for (int i =13;i< x_k_k.rows();++i)
	{
		TT<< x_k_k(i);
	}
	Eigen::MatrixXf features=TT;

	Eigen::MatrixXf yi(3, x_k_k.cols() );
	for (int i = 1; i< (features_info.size()); ++i){

		if( strcmp(features_info[i].type.c_str(), "cartesian") ){
			yi << features.row(0) ,features.row(1), features.row(2);

			Eigen::MatrixXf T= features;
			features.resize(features.rows(),features.cols() -6 );
			for (int k=4;k< features.cols();++k)
			{
				features<< T.row(k);
			}

			Eigen::MatrixXf hi;
			hi_cartesian( yi, t_wc, r_wc, cam, features_info[i], hi );
			if ((hi.size()>0) )
				features_info[i].h = hi.transpose();
		}
		if ( strcmp(features_info[i].type.c_str(), "inversedepth") ){

			yi << features.row(0) ,features.row(1), features.row(2), features.row(3) ,features.row(4), features.row(5);
			Eigen::MatrixXf T= features;
			features.resize(features.rows(),features.cols() -6 );
			for (int k=0;k< features.cols();++k)
			{
				features<< T.row(k);
			}

			Eigen::MatrixXf hi ;
			hi_inverse_depth( yi, t_wc, r_wc, cam, features_info[i],hi );
			if (hi.size()>0 )
				features_info[i].h = hi.transpose();
		}
	}
	return 1;
}


double  CEkfSlam::q2r(Eigen::MatrixXf &q, Eigen::MatrixXf &R)
{

	double r=q(0,0);
	double x=q(1,0);//double x=q(0,1);
	double y=q(2,0);//double y=q(0,2);
	double z=q(3,0);

	R = Eigen::MatrixXf::Zero(3,3);	
	R<<
		r*r+x*x-y*y-z*z,  2*(x*y -r*z) ,    2*(z*x+r*y),	
		2*(x*y+r*z) ,     r*r-x*x+y*y-z*z , 2*(y*z-r*x),	
		2*(z*x-r*y) ,     2*(y*z+r*x)   ,   r*r-x*x-y*y+z*z;

	return 1.0;

}



int  CEkfSlam::hi_cartesian(
	Eigen::MatrixXf & yi,Eigen::MatrixXf & t_wc,Eigen::MatrixXf & r_wc,
	StruCamP &cam,StruFeaturesInfo  &features_info ,
	Eigen::MatrixXf &zi)
	//function zi = hi_cartesian( yi, t_wc, r_wc, cam, features_info )
{
	// 移植完成
	// Compute a single measurement
	// Points 3D in camera coordinates
	Eigen::MatrixXf r_cw  = r_wc.array().inverse();// inv( r_wc );//Matlab矩阵
	Eigen::MatrixXf hrl   = r_cw* ( yi - t_wc );

	// Is in front of the camera?
	//double N1;// =atan(  hrl.row( 1)*hrl.row( 2)/(hrl.row(1).norm()*hrl.row( 2).norm() )   ) *180/M_PI;
	//double N2;// =atan(  hrl.row( 0)*hrl.row( 2)/(hrl.row(0).norm()*hrl.row( 2).norm() )   ) *180/M_PI;

	Eigen::VectorXf Line1;
	Line1<< hrl.row(0);
	Eigen::VectorXf Line2= hrl.row(1);
	Eigen::VectorXf Line3= hrl.row(2);
	double N1 =atan( (Line1*Line3).trace()  *180/M_PI );// /( Line1.norm()* Line3.norm() ) 
	double N2 =atan( (Line2*Line3).trace()  *180/M_PI );//  /( Line2.norm()* Line3.norm() )  

	if(N1< -60 || N1>60 || N2 <-60 || N2>60) {
		zi =Eigen::MatrixXf::Zero(0,0);// [];
		return 0;
	}
	// Image coordinates;图像坐标
	Eigen::MatrixXf uv_u;// = hu( hrl, cam );
	hu( hrl, cam,uv_u );

	// Add distortion
	// Distort image coordinates. 扰乱/校正 图像坐标
	Eigen::MatrixXf uv_d;// = distort_fm( uv_u , cam );
	distort_fm( uv_u , cam, uv_d );

	// Is visible in the image?
	//if ( uv_d(1)>0 ) && ( uv_d(1)<cam.nCols ) && ( uv_d(2)>0 ) && ( uv_d(2)<cam.nRows )
	if(  ( uv_d(0,1)>0 ) && ( uv_d(0,1)<cam.nCols ) && ( uv_d(0,2)>0 ) && ( uv_d(0,2)<cam.nRows )  ) {
		zi = uv_d;
		return 0;
	}
	else
		zi = Eigen::MatrixXf::Zero(0,0);// [];
	//return;
	//end

	return 1;
}


int  CEkfSlam::hu(Eigen::MatrixXf &yi, StruCamP &cam, Eigen::MatrixXf &uv_u)
	//function uv_u = hu( yi, cam)
{
	// Image coordinates;图像坐标
	//yi为3*1 double; uv_u为2*1double
	double	u0 = cam.Cx;
	double	v0 = cam.Cy;
	double	f  = cam.f;
	double	ku = 1/cam.dx;
	double	kv = 1/cam.dy;

	uv_u =Eigen::MatrixXf::Zero( 2, yi.cols() );//uv_u = zeros( 2, size( yi, 2 ) );

	//for i = 1:size( yi, 2 )
	for (int i = 0;i< yi.cols();++i){
		uv_u( 0, i ) = u0 + (yi(0,i)/ yi(2,i) ) *f *ku;//uv_u( 1, i ) = u0 + (yi(1,i)/yi(3,i))*f*ku;
		uv_u( 1, i ) = v0 + (yi(1,i)/ yi(2,i) ) *f *kv;
		//end
	}


	return 1;
}

int  CEkfSlam::distort_fm(Eigen::MatrixXf &uv, StruCamP &camera , Eigen::MatrixXf &uvd)
	//function uvd = distort_fm( uv, camera )
{

	//  Distort image coordinates. 扰乱/校正 图像坐标
	//  The function deals with two models:
	//  1.- Real-Time 3D SLAM with Wide-Angle Vision, 
	//      Andrew J. Davison, Yolanda Gonzalez Cid and Nobuyuki Kita, IAV 2004.
	//  2.- Photomodeler full distortion model.
	// input
	//    camera -  camera calibration parameters
	//    uvd    -  distorted image points in pixels    :为2*1double
	// output
	//    uv     -  undistorted coordinate points       :为2*1double
	double	Cx = camera.Cx;
	double	Cy = camera.Cy;
	double	k1 = camera.k1;
	double	k2 = camera.k2;
	double	dx = camera.dx;
	double	dy = camera.dy;

	//xu为1*1double
	double xu = (uv(0,0)-Cx )*dx;//=(uv(1,:)-Cx)*dx;
	double yu = (uv(0,1)-Cy )*dy;//=(uv(2,:)-Cy)*dy;

	double ru = sqrt(xu*xu  +yu*yu);//sqrt(xu.*xu+yu.*yu);
	double rd = ru/(1+    k1*ru*ru+   k2*ru*ru*ru*ru);//ru./(1+k1*ru.^2+k2*ru.^4);
	// rd_km1 = rd;


	for (int k=1;k<10;++k){
		double   f = rd +  k1*rd*rd*rd +   k2*rd*rd*rd*rd*rd - ru;
		double f_p =  1 +   3*k1*rd*rd +    5*k2*rd*rd*rd*rd;

		rd = rd - f/f_p;
	}

	// disp(k);

	double D = 1 + k1*rd*rd + k2*rd*rd*rd*rd;
	double xd = xu/D;
	double yd = yu/D;

	//Eigen::MatrixXf uvd = [ xd/dx+Cx; yd/dy+Cy ];
	uvd << xd/dx+Cx, yd/dy+Cy ;

	return 1;
}




int  CEkfSlam::hi_inverse_depth( 
	Eigen::MatrixXf &yinit,   Eigen::MatrixXf &t_wc,   Eigen::MatrixXf &r_wc,
	StruCamP &cam,  StruFeaturesInfo  & features_info ,
	Eigen::MatrixXf  &zi )
	//function zi = hi_inverse_depth( yinit, t_wc, r_wc, cam, features_info )
{
	//函数有待修改,包括 uv_d 矩阵的形状 需要运行时查看!//wishchin !!!
	// Compute a single measurement
	// Javier Civera, 17/11/05
	// yi为6*1double;   twc为3*1double;  rwc为6*1double;
	// zi为2*1double;  为输出!

	// Points 3D in camera coordinates
	Eigen::MatrixXf r_cw = r_wc.transpose();

	Eigen::MatrixXf yi(3,yinit.cols() );// = yinit(1:3);
	yi<< yinit(0,0) ,yinit(0,1), yinit(0,2);

	double  theta = yinit(0,3);
	double    phi = yinit(0,4);
	double    rho = yinit(0,5);

	Eigen::MatrixXf   mi;// = m_( theta,phi );
	m_( theta,phi ,mi);

	Eigen::MatrixXf hrl;
	hrl= r_cw*( 
		(yi - t_wc)*rho 
		+ mi );

	// // Angle limit condition: 
	// // If seen 45�from the first time it was seen, do not predict it
	// v_corig_p = rho*(features_info.r_wc_when_initialized - t_wc) + mi;
	// v_c_p = mi;
	// alpha = acos(v_corig_p'*v_c_p/(norm(v_corig_p)*norm(v_c_p)));
	// if abs(alpha)>pi/4
	//     zi = [];
	//     return;
	// end
	// 
	// // Scale limit condition:
	// // If seen from double or half the scale, do not predict it
	// scale = norm(v_corig_p)/norm(v_c_p);
	// if (scale>2)||(scale<1/2)
	//     zi = [];
	//     return;
	// end

	// Is in front of the camera?
	//if ((atan2( hrl( 1, : ), hrl( 3, : ) )*180/pi < -60) ||...
	//	(atan2( hrl( 1, : ), hrl( 3, : ) )*180/pi > 60)  ||...
	//	(atan2( hrl( 2, : ), hrl( 3, : ) )*180/pi < -60) ||...
	//	(atan2( hrl( 2, : ), hrl( 3, : ) )*180/pi > 60))

	//反正切,Eigen不能直接求向量反正切,可以用STlib求值得反正切
	//反正切的符号待深入研究!//wishchin!!!
	Eigen::VectorXf Line1;
	Line1<< hrl.row(0);
	Eigen::VectorXf Line2= hrl.row(1);
	Eigen::VectorXf Line3= hrl.row(2);
	double N1 =atan( (Line1*Line3).trace()  *180/M_PI );// /( Line1.norm()* Line3.norm() ) 
	double N2 =atan( (Line2*Line3).trace()  *180/M_PI );//  /( Line2.norm()* Line3.norm() )  

	if(N1< -60 || N1>60 || N2 <-60 || N2>60) {
		zi = Eigen::MatrixXf::Zero(0,0);
		return 0;
	}

	// Image coordinates
	Eigen::MatrixXf uv_u ;
	hu( hrl, cam, uv_u );
	// Add distortion
	Eigen::MatrixXf uv_d;// = distort_fm( uv_u , cam );
	distort_fm( uv_u , cam ,uv_d);

	// Is visible in the image?
	//为什么是1,不是0!
	if (  uv_d(0)>0  &&  uv_d(0)<cam.nCols  &&  uv_d(1)>0  &&  uv_d(1) <cam.nRows  )
	{
		zi = uv_d;	
		return 1;
	}
	else{
		zi = Eigen::MatrixXf::Zero(0,0);
		return 0;
	}
	//end
	return 1;
}


////int  CEkfSlam::hi_inverse_depth( 
////	Eigen::MatrixXf &yinit,   Eigen::MatrixXf &t_wc,   Eigen::MatrixXf &r_wc,
////	StruCamP &cam,  std::vector<StruFeaturesInfo>  & features_info ,
////	Eigen::MatrixXf  &zi )
////	//function zi = hi_inverse_depth( yinit, t_wc, r_wc, cam, features_info )
////{
////	// Points 3D in camera coordinates
////	Eigen::MatrixXf r_cw = r_wc.transpose();
////
////	Eigen::MatrixXf yi(3,yinit.cols() );// = yinit(1:3);
////	yi<< yinit(0,0) ,yinit(0,1), yinit(0,2);
////
////	double  theta = yinit(0,3);
////	double    phi = yinit(0,4);
////	double    rho = yinit(0,5);
////
////	Eigen::MatrixXf   mi;// = m_( theta,phi );
////	m_( theta,phi ,mi);
////
////	Eigen::MatrixXf hrl = r_cw*( (yi - t_wc)*rho + mi );
////
////	// // Angle limit condition: 
////	// // If seen 45�from the first time it was seen, do not predict it
////	// v_corig_p = rho*(features_info.r_wc_when_initialized - t_wc) + mi;
////	// v_c_p = mi;
////	// alpha = acos(v_corig_p'*v_c_p/(norm(v_corig_p)*norm(v_c_p)));
////	// if abs(alpha)>pi/4
////	//     zi = [];
////	//     return;
////	// end
////	// 
////	// // Scale limit condition:
////	// // If seen from double or half the scale, do not predict it
////	// scale = norm(v_corig_p)/norm(v_c_p);
////	// if (scale>2)||(scale<1/2)
////	//     zi = [];
////	//     return;
////	// end
////
////	// Is in front of the camera?
////	//if ((atan2( hrl( 1, : ), hrl( 3, : ) )*180/pi < -60) ||...
////	//	(atan2( hrl( 1, : ), hrl( 3, : ) )*180/pi > 60)  ||...
////	//	(atan2( hrl( 2, : ), hrl( 3, : ) )*180/pi < -60) ||...
////	//	(atan2( hrl( 2, : ), hrl( 3, : ) )*180/pi > 60))
////
////	//反正切,Eigen不能直接求向量反正切,可以用STlib求值得反正切
////	//反正切的符号待深入研究!//wishchin!!!
////	Eigen::VectorXf Line1; Line1<< hrl.row(0);
////	Eigen::VectorXf Line2= hrl.row(1);
////	Eigen::VectorXf Line3= hrl.row(2);
////	double N1 =atan(  hrl.row( 0)*hrl.row( 2)/ ( Line1.norm()* Line3.norm() )   ) *180/M_PI;
////	double N2 =atan(  hrl.row( 1)*hrl.row( 2)/ ( Line2.norm()* Line3.norm() )   ) *180/M_PI;
////
////	if(N1< -60 || N1>60 || N2 <-60 || N2>60) {
////		zi = Eigen::MatrixXf::Zero(0,0);
////		return 0;
////	}
////
////	// Image coordinates
////	Eigen::MatrixXf uv_u ;
////	hu( hrl, cam, uv_u );
////	// Add distortion
////	Eigen::MatrixXf uv_d;// = distort_fm( uv_u , cam );
////	distort_fm( uv_u , cam ,uv_d);
////
////	// Is visible in the image?
////	//为什么是1,不是0!
////	if (  uv_d(1)>0  &&  uv_d(1)<cam.nCols  &&  uv_d(2)>0  &&  uv_d(2) <cam.nRows  )
////	{
////		zi = uv_d;	
////		return 1;
////	}
////	else{
////		zi = Eigen::MatrixXf::Zero(0,0);
////		return 0;
////	}
////	//end
////	return 1;
////}

void m0122(){}


int CEkfSlam::fast_corner_detect_9(
	cv::Mat &im,  double  threshold, std::vector<CvPoint>  &coords)	
{
	//提取快速角点//一次只提取一个角点?//且有时可能会不发现角点,可能一次有多个角点
	//function coords = fast_corner_detect_9(im, threshold)	

	//cv::Mat  image, image1 = cv::imread ("test.jpg"); 
	cv::Mat  image(im.rows, im.cols, 1);
	if (3== im.channels() ){
		cv::cvtColor (im,image,CV_BGR2GRAY);  
	}
	else{
		//cvCopyImage(im,image);
		image =im.clone();
		}
	
	//快速角点检测  
	std::vector<cv::KeyPoint>  keypoints;  
	//cv::FastFeatureDetector fast(threshold , true);  
	//fast .detect (image,keypoints);  
	cv::FAST(image,keypoints,threshold,true);
	//cv::drawKeypoints (image ,keypoints,  image,cv::Scalar::all(255),  cv::DrawMatchesFlags::DRAW_OVER_OUTIMG);  
	coords.resize(keypoints.size() );
	//cv::imshow("kp",image);cv::waitKey(0);
	for (int i=0;i< keypoints.size();++i){
		coords[i].x =keypoints[i].pt.x;
		coords[i].y =keypoints[i].pt.y;
	}

	return 1;
}
//
////int CEkfSlam::fast_corner_detect_9(cv::Mat &im,  double  threshold, CvPoint &coords)	
////{
////	//cv::Mat  image, image1 = cv::imread ("test.jpg"); 
////	cv::Mat  image(im.rows(),im.cols(),1);
////	if (3== im.channels() )
////	{
////		cv::cvtColor (im,image,CV_BGR2GRAY);  
////	}
////
////	//快速角点检测  
////	std::vector<cv::KeyPoint>  keypoints;  
////	cv::FastFeatureDetector fast(threshold , true);  
////	fast .detect (image,keypoints);  
////	//cv::drawKeypoints (image ,keypoints,  image,cv::Scalar::all(255),  cv::DrawMatchesFlags::DRAW_OVER_OUTIMG);  
////
////	return 1;
////}
//


int  CEkfSlam::fast_nonmax(
	cv::Mat &im,double barrier, std::vector<CvPoint> &KPs, Eigen::MatrixXf &ret)
{
	if (KPs.size()<1 )
	{
		ret = Eigen::MatrixXf::Zero(0 , 2);	
		return 0;
	}

	//第一次 运行查看情况,动态增长需要重载函数!
	//function ret = fast_nonmax(im, barrier, c)
	CvSize sz = im.size();
	int ysize = sz.height;
	int LSize = KPs.size();

	// x, y
	Eigen::MatrixXf dir0(16,2);
	Eigen::MatrixXf  dir(16,1);
	dir0<< 0,3,
		1,3,
		2,2,
		3,1,
		3,0,
		3,-1,
		2,-2,
		1,-3,
		0,-3,
		-1,-3,
		-2,-2,
		-3,-1,
		-3,0,
		-3,1,
		-2,2,
		-1,3;

	Eigen::MatrixXf dir01 =dir0.col(0);
	dir.col(0)  = dir0.col(1) + dir01 * ysize;

	//便于矩阵乘法,Copy!
	Eigen::MatrixXf  c( KPs.size(), 2 );
	for (int i=0;i<KPs.size();++i){
		c(i,0) =  KPs[i].x;
		c(i,1) =  KPs[i].y;
	}

	//std::vector<double> centres(1);
	Eigen::MatrixXf  centres( KPs.size(),1);//;( KPs.size(), 1);
	Eigen::MatrixXf  c1(c.cols(),1);//初始化列优先!
	Eigen::MatrixXf  c2(c.cols(),1);
	int It=1;
	c1 = c.col(0);
//	c1 = c1 - It;//不能重载常量!
	c2 = c.col(1);
	centres = c2
		+ 
		c1* ysize;//centres = c(:,2) + (c(:,1)-1) * ysize;

	//First pass: compute scores for all of the pixels:
	Eigen::MatrixXf scores = Eigen::MatrixXf::Zero(sz.height * sz.width, 1);//scores = zeros(sz(1) * sz(2), 1);

	Eigen::MatrixXf   p1(16,1);//16*1 double
	Eigen::MatrixXf  pos(1,1);//1*1  double
	Eigen::MatrixXf   n1(16,1);//16*1 double
	Eigen::MatrixXf  neg(1,1);//1*1  double

	//for i=1:length(centres)
	//获取Scores
	for (int i=0; i<LSize ; ++i){

		//Fuck!OpenCV不能直接进行行列操作
		for (int j=0;j< dir.cols();++j ){

			//p1(j,0) = im(
			//	(int)(centres(i,0)+ dir(j,0))/im.cols, 
			//	(int)(centres(i,0)+ dir(j,0))%im.cols) -im((int)centres(i,0)/im.cols ,
			//	(int)centres(i,0)%im.cols )
			//	+ barrier;
			//p1 = im(centres(i)+dir) -( im(centres(i)) + barrier);//im(0,0)=1;
		}
		pos(0,0) =( p1.transpose() *p1 ).sum();//sum(p1 .* (p1 > 0));

		for (int j=0;j< dir.cols();++j ){
			//n1(j,0)
			//	= im( (int)(centres(i,0) ) /im.cols , (int)(centres(i,0) )%im.cols )
			//	- barrier 
			//	- im((int)(centres(i,0) + dir(j,0) ) /im.cols ,(int)(centres(i,0)+ dir(j,0) )% im.cols  );
		}

		neg(0,0) =(n1.transpose()* n1).sum() ;//sum(n1 .* (n1 > 0));

		scores(centres(i,0) ,0) = std::max(pos(0,0) ,neg(0,0));//scores(centres(i)) = max([pos neg]);
		//end
	}

	//获取CS点值
	//Eigen::MatrixXf cs = Eigen::MatrixXf::Zero((centres.size() ),1 );
	Eigen::VectorXf cs = Eigen::VectorXf::Zero((centres.size() ) );
	int up    = -1;
	int down  =  1;
	int left  =-ysize;
	int right = ysize;

	//second pass: get local maxima
	Eigen::MatrixXf p(1,1);
	Eigen::MatrixXf square(1,8);
	//for i=1:length(centres)
	for (int i=0; i< LSize ; ++i){

		p(0,0) = centres(i,0);

		square <<
			p(0,0)  + up, p(0,0)  +  down, p(0,0)  +  left, p(0,0)  +  right, 
			p(0,0)  + up+left , 
			p(0,0)  + up+right , 
			p(0,0)  + down+left ,
			p(0,0)  + down+right;

		for (int j=0; j<square.cols(); ++j ){

			cs(i) =0;
			if (scores(square(i,j) ) >=8 ){
				cs(i) += scores( p(0,0), 0 );
			}
		}

		//cs(i,0)=(sum( ) >= scores(square) ) == 8); //(sum(scores(p(0,0) ) >= scores(square)) == 8);
		//end
	}

	//Get the maxima positions //可能是多个元素
	//找出矩阵X中的所有非零元素,并将这些元素的线性索引值返回到向量ind中
	//即是再添加一级索引——非0索引
	//Eigen::VectorXf maximas = centres(find(cs));
	Eigen::VectorXf  maximas(0);
	find(cs, maximas, 0);//待修改函数!wishchin!

	//Eigen::MatrixXf 
	ret = Eigen::MatrixXf::Zero(maximas.size() , 2);	
	for (int i=0;i< maximas.size();++i){
		ret(i,0)  += 1 + (int)(maximas(i) ) / ysize;
		ret(i,1)   = (int)(maximas(i) ) % ysize;
	}
	//ret(:,1) = 1 + floor(maximas / ysize);
	//ret(:,2) = mod(maximas, ysize);

	return 1;
}


////第一次 运行查看情况,动态增长需要重载函数!
////function ret = fast_nonmax(im, barrier, c)
////int  CEkfSlam::fast_nonmax(cv::Mat &im,double barrier, std::vector<CvPoint> &KPs, std::vector<CvPoint> &re)
////{
////
////	CvSize sz = im.size();
////	int ysize = sz.height;
////	int LSize = KPs.size();
////
////	// x, y
////	Eigen::MatrixXf dir0(16,2);
////	Eigen::MatrixXf  dir(16,1);
////	dir0<< 0,3,
////		1,3,
////		2,2,
////		3,1,
////		3,0,
////		3,-1,
////		2,-2,
////		1,-3,
////		0,-3,
////		-1,-3,
////		-2,-2,
////		-3,-1,
////		-3,0,
////		-3,1,
////		-2,2,
////		-1,3;
////
////	dir.col(0)  = dir0.col(1) + dir0.col(0) * ysize;
////
////	//便于矩阵乘法,Copy!
////	Eigen::MatrixXf  c( KPs.size(), 2 );
////	for (int i=0;i<KPs.size();++i){
////		c(i,0) =  KPs[i].x;
////		c(i,1) =  KPs[i].y;
////	}
////
////	//std::vector<double> centres(1);
////	Eigen::MatrixXf  centres(KPs.size(), 1);
////	centres.row(0) = c.row(1) + (c.row(0) -1) * ysize;//centres = c(:,2) + (c(:,1)-1) * ysize;
////
////	//First pass: compute scores for all of the pixels:
////	Eigen::MatrixXf scores = Eigen::MatrixXf::Zero(sz.height * sz.width, 1);//scores = zeros(sz(1) * sz(2), 1);
////
////	Eigen::MatrixXf   p1(16,1);//16*1 double
////	Eigen::MatrixXf  pos(1,1);//1*1  double
////	Eigen::MatrixXf   n1(16,1);//16*1 double
////	Eigen::MatrixXf  neg(1,1);//1*1  double
////
////	//for i=1:length(centres)
////	//获取Scores
////	for (int i=0; i<LSize ; ++i){
////
////		//Fuck!OpenCV不能直接进行行列操作
////		for (int j=0;j< dir.cols();++j ){
////			p1(j,0) = im((int)(centres(i,0)+ dir(j,0))/im.cols(), (int)(centres(i,0)+ dir(j,0))%im.cols()) -im((int)centres(i,0)/im.cols() ,(int)centres(i,0)%im.cols() ) + barrier;//p1 = im(centres(i)+dir) -( im(centres(i)) + barrier);//im(0,0)=1;
////		}
////		pos(0,0) =( p1.transpose() *p1 ).sum();//sum(p1 .* (p1 > 0));
////
////		for (int j=0;j< dir.cols();++j ){
////			n1(j,0)
////				= im( (int)(centres(i,0) ) /im.cols() , (int)(centres(i,0) )%im.cols() )
////				- barrier 
////				- im((int)(centres(i,0) + dir(j,0) ) /im.cols() ,(int)(centres(i,0)+ dir(j,0) )% im.cols()  );
////		}
////
////		neg(0,0) =(n1.transpose()* n1).sum() ;//sum(n1 .* (n1 > 0));
////
////		scores(centres(i,0) ,0) = std::max(pos(0,0) ,neg(0,0));//scores(centres(i)) = max([pos neg]);
////		//end
////	}
////
////	//获取CS点值
////	//Eigen::MatrixXf cs = Eigen::MatrixXf::Zero((centres.size() ),1 );
////	Eigen::VectorXf cs = Eigen::VectorXf::Zero((centres.size() ) );
////	int up    = -1;
////	int down  =  1;
////	int left  =-ysize;
////	int right = ysize;
////
////	//second pass: get local maxima
////	Eigen::MatrixXf p(1,1);
////	Eigen::MatrixXf square(1,8);
////	//for i=1:length(centres)
////	for (int i=0; i< LSize ; ++i){
////
////		p(0,0) = centres(i,0);
////
////		square <<
////			p(0,0)  + up, p(0,0)  +  down, p(0,0)  +  left, p(0,0)  +  right, 
////			p(0,0)  + up+left , 
////			p(0,0)  + up+right , 
////			p(0,0)  + down+left ,
////			p(0,0)  + down+right;
////
////		for (int j=0; j<square.cols(); ++j ){
////
////			cs(i) =0;
////			if (scores(square(i,j) ) >=8 ){
////				cs(i) += scores( p(0,0), 0 );
////			}
////		}
////
////		//cs(i,0)=(sum( ) >= scores(square) ) == 8); //(sum(scores(p(0,0) ) >= scores(square)) == 8);
////		//end
////	}
////
////	//Get the maxima positions //可能是多个元素
////	//找出矩阵X中的所有非零元素,并将这些元素的线性索引值返回到向量ind中
////	//即是再添加一级索引——非0索引
////	//Eigen::VectorXf maximas = centres(find(cs));
////	Eigen::VectorXf  maximas(0);
////	find(cs, maximas, 0);
////
////	//Eigen::MatrixXf ret = Eigen::MatrixXf::Zero(maximas.size() , 2);	
////	//for (int i=0;i< maximas.size();++i){
////	//	ret(i,0)  += 1;
////	//	ret(i,1)   = (int)(maximas(i) ) % ysize;
////	//}
////	//ret(:,1) = 1 + floor(maximas / ysize);
////	//ret(:,2) = mod(maximas, ysize);
////
////	return 1;
////}
//
////int  CEkfSlam::fast_nonmax(cv::Mat &im,double barrier, CvPoint &c, CvPoint &re)
////{
////
////	CvSize sz = im.size();
////	int ysize = sz.height;
////
////	// x, y
////	Eigen::MatrixXf dir0(16,2);
////	Eigen::MatrixXf  dir(16,1);
////	dir0<< 0,3,
////		1,3,
////		2,2,
////		3,1,
////		3,0,
////		3,-1,
////		2,-2,
////		1,-3,
////		0,-3,
////		-1,-3,
////		-2,-2,
////		-3,-1,
////		-3,0,
////		-3,1,
////		-2,2,
////		-1,3;
////
////	dir.col(0)  = dir0.col(1) + dir0.col(0) * ysize;
////	std::vector<double> centres(1);
////	centres[0]= c.y + (c.x-1) * ysize;//centres = c(:,2) + (c(:,1)-1) * ysize;
////
////	//First pass: compute scores for all of the pixels:
////	Eigen::MatrixXf scores = Eigen::MatrixXf::Zero(sz.height * sz.width, 1);//scores = zeros(sz(1) * sz(2), 1);
////
////	Eigen::MatrixXf   p1;//16*1 double
////	Eigen::MatrixXf  pos;//1*1  double
////	Eigen::MatrixXf   n1;//16*1 double
////	Eigen::MatrixXf  neg;//1*1  double
////
////	//for i=1:length(centres)
////	for (int i=0; i< 1 ; ++i){
////
////		//Fuck!OpenCV不能直接进行行列操作
////		for (int j=0;j< dir.cols();++j ){
////			p1 = im(centres[i]+ dir(j,0) ) -( im(centres[i] ) + barrier);//p1 = im(centres(i)+dir) -( im(centres(i)) + barrier);//im(0,0)=1;
////		}
////		pos(0,0) =( p1.transpose() *p1 ).sum();//sum(p1 .* (p1 > 0));
////
////		for (int j=0;j< dir.cols();++j ){
////			n1 = im(centres[i] ) - barrier - im(centres(i) + dir(j,0) );
////		}
////
////		neg(0,0) =(n1.transpose()* n1).sum() ;//sum(n1 .* (n1 > 0));
////		scores(centres[i] ) = std::max(pos(0,0) ,neg(0,0));//scores(centres(i)) = max([pos neg]);
////		//end
////	}
////
////	//Eigen::MatrixXf cs = Eigen::MatrixXf::Zero((centres.size() ),1 );
////	Eigen::VectorXf cs = Eigen::VectorXf::Zero((centres.size() ) );
////	int up    = -1;
////	int down  =  1;
////	int left  =-ysize;
////	int right = ysize;
////
////	//second pass: get local maxima
////	Eigen::MatrixXf p(1,1);
////	Eigen::MatrixXf square(1,8);
////	//for i=1:length(centres)
////	for (int i=0; i< 1 ; ++i){
////
////		p(0,0) = centres[i];
////
////		square <<
////			p(i,0)  + up, p(i,0)  +  down, p(i,0)  +  left, p(i,0)  +  right, 
////			p(i,0)  + up+left , 
////			p(i,0)  + up+right , 
////			p(i,0)  + down+left ,
////			p(i,0)  + down+right;
////
////		for (int j=0; j<square.cols(); ++j ){
////			cs(i,0) =0;
////			if (scores(square(i,j) ) >=8 ){
////				cs(i,0) += scores p(i,0);
////			}
////		}
////
////		//cs(i,0)=(sum( ) >= scores(square) ) == 8); //(sum(scores(p(0,0) ) >= scores(square)) == 8);
////		//end
////	}
////
////	//Get the maxima positions //可能是多个元素
////	//找出矩阵X中的所有非零元素,并将这些元素的线性索引值返回到向量ind中
////	//即是再添加一级索引——非0索引
////	//Eigen::VectorXf maximas = centres(find(cs));
////	Eigen::VectorXf  maximas(0);
////	find(cs, maximas, 0);
////
////
////	Eigen::MatrixXf ret = Eigen::MatrixXf::Zero(maximas.size() , 2);	
////	for (int i=0;i< maximas.size();++i){
////		ret(i,0)  += 1;
////		ret(i,1)   = maximas(i) % ysize;
////	}
////	//ret(:,1) = 1 + floor(maximas / ysize);
////	//ret(:,2) = mod(maximas, ysize);
////
////	return 1;
////}
//



int  CEkfSlam::add_features_inverse_depth( 
	Eigen::MatrixXf &uvd,    Eigen::MatrixXf  &X,   Eigen::MatrixXf &P,
	StruCamP &cam,    
	double std_pxl,   double  initial_rho,   double  std_rho,
	Eigen::MatrixXf  &X_RES, Eigen::MatrixXf &P_RES,
	Eigen::MatrixXf &newFeature )
{
	//输出参数:
	//输出 X_RES:19*1 double;P_RES:19*19 double; newFeature:6*1 double;

	int nNewFeat = uvd.cols();//size( uvd, 2 );
	//Eigen::MatrixXf  Xv(1,13);
	Eigen::VectorXf  Xv(13);
	Eigen::MatrixXf  XM(1,13);

	if (nNewFeat == 0){
		X_RES = X;
		P_RES = P;
		return 0;
	}

	else{
		// Camera state
		Xv = X.row(1);
		Xv.segment(0,13);
		XM<<Xv;
		X_RES = X;
		P_RES = P;

		//关于强制转换问题
		Eigen::MatrixXf  uvdi;
		for (int i = 1;i<nNewFeat;++i){
			uvdi =uvd.col(i);
			hinv(uvdi , XM, cam, initial_rho, newFeature );//newFeature = hinv( uvd(:,i), Xv, cam, initial_rho );
			//X_RES = [ X_RES; newFeature ];

			//P_RES = add_a_feature_covariance_inverse_depth( P_RES, uvd(:,i), Xv, std_pxl, std_rho, cam );
			add_a_feature_covariance_inverse_depth( P_RES, uvdi, XM, std_pxl, std_rho, cam,P_RES );
		}

		//end
		//	end
		return 1;
	}
}

int CEkfSlam::add_feature_to_info_vector(
	Eigen::MatrixXf  &uv,cv::Mat  &im_k, Eigen::MatrixXf  &X_RES,
	std::vector<StruFeaturesInfo> &features_info,
	int step, Eigen::MatrixXf &newFeature )
{
	int half_patch_size_when_initialized = 20;
	int half_patch_size_when_matching    =  6;
	int new_position =   features_info.size();// length(features_info)+1;

	cv::Mat       RectImg; 
	CvSize           Rect;
	CvPoint region_center;
	Rect.height= half_patch_size_when_initialized*2;
	Rect.width = Rect.height ;
	region_center.x = uv(1,0);
	region_center.y = uv(0,0);

	cv::getRectSubPix(im_k,Rect,region_center,RectImg,-1);
	Eigen::MatrixXf  RectMat;
	copyCvMat(RectImg,RectMat);

	features_info[new_position].patch_when_initialized  = RectMat;
		//im_k( 
		//uv(2,1)- half_patch_size_when_initialized:uv(2,1)+ half_patch_size_when_initialized,
		//uv(1,1)- half_patch_size_when_initialized:uv(1,1)+ half_patch_size_when_initialized);

	features_info[new_position].patch_when_matching 
		= Eigen::MatrixXf::Zero( 2*half_patch_size_when_matching+1, 2*half_patch_size_when_matching+1 );

	features_info[new_position].r_wc_when_initialized.resize(3,X_RES.cols() );// =X_RES(1:3);
	features_info[new_position].r_wc_when_initialized << X_RES.row(0),X_RES.row(1),X_RES.row(2);

	//features_info[new_position].R_wc_when_initialized;//=
	Eigen::MatrixXf XRES(4, X_RES.cols() );
	XRES<< X_RES.row(3),X_RES.row(4),X_RES.row(5),X_RES.row(6);
	q2r(XRES, features_info[new_position].R_wc_when_initialized );

	features_info[new_position].uv_when_initialized               =                              uv;

	features_info[new_position].half_patch_size_when_initialized  = half_patch_size_when_initialized;

	features_info[new_position].half_patch_size_when_matching     =    half_patch_size_when_matching;

	features_info[new_position].times_predicted                   =                                0;

	features_info[new_position].times_measured                    =                                0;

	features_info[new_position].init_frame                        =                             step;

	features_info[new_position].init_measurement                  =                               uv;

	features_info[new_position].type                              =                   "inversedepth";

	features_info[new_position].yi                                =                       newFeature;

	features_info[new_position].individually_compatible           = 0;

	features_info[new_position].low_innovation_inlier             = 0;

	features_info[new_position].high_innovation_inlier            = 0;

	features_info[new_position].z = Eigen::MatrixXf::Zero(1,1);
	features_info[new_position].h = Eigen::MatrixXf::Zero(1,1);
	features_info[new_position].H = Eigen::MatrixXf::Zero(1,1);
	features_info[new_position].S = Eigen::MatrixXf::Zero(1,1);

	features_info[new_position].state_size                       = 6;
	features_info[new_position].measurement_size                 = 2;
	features_info[new_position].R                                = Eigen::MatrixXf::Identity(features_info[new_position].measurement_size,1);
	//eye(features_info[new_position].measurement_size);

	return 1;
}

////int CEkfSlam::add_feature_to_info_vector(
////	Eigen::MatrixXf  &uv, Eigen::MatrixXf  &im_k, Eigen::MatrixXf  &X_RES,
////	std::vector<StruFeaturesInfo> &features_info,
////	int step, Eigen::MatrixXf &newFeature )
////{
////	int half_patch_size_when_initialized = 20;
////	int half_patch_size_when_matching    =  6;
////	int new_position =   features_info.size();// length(features_info)+1;
////
////	features_info[new_position].patch_when_initialized 
////		=
////		im_k(uv(2,1)-half_patch_size_when_initialized:uv(2,1)+half_patch_size_when_initialized,
////		uv(1,1)-half_patch_size_when_initialized:uv(1,1)+half_patch_size_when_initialized);
////
////	features_info[new_position].patch_when_matching 
////		= Eigen::MatrixXf::Zero( 2*half_patch_size_when_matching+1, 2*half_patch_size_when_matching+1 );
////
////	features_info[new_position].r_wc_when_initialized =X_RES(1:3);
////
////	//features_info[new_position].R_wc_when_initialized;//=
////	q2r(X_RES(4:7),features_info[new_position].R_wc_when_initialized);
////
////	features_info[new_position].uv_when_initialized               =                              uv;
////
////	features_info[new_position].half_patch_size_when_initialized  = half_patch_size_when_initialized;
////
////	features_info[new_position].half_patch_size_when_matching     =    half_patch_size_when_matching;
////
////	features_info[new_position].times_predicted                   =                                0;
////
////	features_info[new_position].times_measured                    =                                0;
////
////	features_info[new_position].init_frame                        =                             step;
////
////	features_info[new_position].init_measurement                  =                               uv;
////
////	features_info[new_position].type                              =                   'inversedepth';
////
////	features_info[new_position].yi                                =                       newFeature;
////
////	features_info[new_position].individually_compatible           = 0;
////
////	features_info[new_position].low_innovation_inlier             = 0;
////
////	features_info[new_position].high_innovation_inlier            = 0;
////
////	features_info[new_position].z = Eigen::MatrixXf::Zero(1,1);
////	features_info[new_position].h = Eigen::MatrixXf::Zero(1,1);
////	features_info[new_position].H = Eigen::MatrixXf::Zero(1,1);
////	features_info[new_position].S = Eigen::MatrixXf::Zero(1,1);
////
////	features_info[new_position].state_size                       = 6;
////	features_info[new_position].measurement_size                 = 2;
////	features_info[new_position].R                                = Eigen::MatrixXf::Identity(features_info[new_position].measurement_size,1);
////		//eye(features_info[new_position].measurement_size);
////
////	return 1;
////}
//
//
//
void m0123(){}

//
int CEkfSlam::hinv(
	Eigen::MatrixXf  &uvd, Eigen::MatrixXf   &Xv, 
	StruCamP &camera,
	double  initial_rho ,Eigen::MatrixXf   &newFeature )
{

	int fku =  camera.K(1,1);
	int fkv =  camera.K(2,2);
	int U0  =  camera.K(1,3);
	int V0  =  camera.K(2,3);

	Eigen::MatrixXf  uv ;//= 
	undistort_fm( uvd,camera ,uv);
	Eigen::MatrixXf  u = uv.row(0);
	Eigen::MatrixXf  v = uv.row(1);

	Eigen::MatrixXf   r_W(3,Xv.cols() ); //= Xv(1:3);
	r_W << Xv(0),Xv(1),Xv(2);
	Eigen::MatrixXf  q_WR(4,Xv.cols() );// = Xv(4:7);
	q_WR<< Xv(3),Xv(4),Xv(5),Xv(6);

	Eigen::MatrixXf  h_LR_x; 
	h_LR_x=-(U0- u)/fku;
	Eigen::MatrixXf  h_LR_y;
	h_LR_y=-(V0- v)/fkv;
	double h_LR_z =1;

	Eigen::MatrixXf  h_LR(3,h_LR_x.cols() );// =[h_LR_x; h_LR_y; h_LR_z];//wishchin//待修改!
	h_LR << h_LR_x, h_LR_y, h_LR_z;

	Eigen::MatrixXf   n;// = 
	q2r( q_WR, n );
	n=n*h_LR;

	Eigen::MatrixXf  nx = n.row(0);
	Eigen::MatrixXf  ny = n.row(1);
	Eigen::MatrixXf  nz = n.row(2);

	newFeature <<  
		r_W, 
		//atan2( (nx,nz).trace() ), //弧度
		//atan2( (-ny,sqrt(nx*nx+nz*nz) ).trace() ), 
		initial_rho;

	return 1;

}



int  CEkfSlam::undistort_fm(Eigen::MatrixXf  &uvd, StruCamP& camera,Eigen::MatrixXf  &uvu  )
{
	//
	// Undistort image coordinates
	double Cx = camera.Cx;
	double Cy = camera.Cy;
	double k1 = camera.k1;
	double k2 = camera.k2;
	double dx = camera.dx;
	double dy = camera.dy;

	Eigen::MatrixXf  xdM,ydM;
	xdM = uvd.row(0);ydM = uvd.row(1);
	Eigen::MatrixXf  xd = ( xdM - Cx )*dx;
	Eigen::MatrixXf  yd = ( ydM - Cy )*dy;// ( uvd(2,:) - Cy )*dy;

	double  rd = sqrt( ( xd.transpose() *xd + yd.transpose()*yd ).sum() );

	double D = 1 + k1*rd*rd + k2*rd*rd*rd*rd;
	Eigen::MatrixXf  xu; 
	xu= xd.transpose()*D;
	Eigen::MatrixXf  yu;
	yu = yd.transpose()*D;

	//uvu = [ xu/dx + Cx; yu/dy + Cy ];
	uvu =Eigen::MatrixXf::Zero(xu.cols(), 2);
	uvu.row(0) = xu/dx +Cx;
	uvu.row(1) = yu/dy +Cy;

	return 1;
}


int CEkfSlam::add_a_feature_covariance_inverse_depth( 
	Eigen::MatrixXf  &P, Eigen::MatrixXf  &uvd,Eigen::MatrixXf  & Xv,
	double std_pxl, double std_rho, StruCamP &cam,
	Eigen::MatrixXf  &P_RES)
{
	////输出为P_RES
	//有待大量修改!
	double fku =  cam.K(0,0);
	double fkv =  cam.K(1,1);
	double U0  =  cam.K(0,2);
	double V0  =  cam.K(1,2);

	Eigen::MatrixXf  q_wc(4,Xv.cols() );// = Xv(4:7);
	q_wc<< Xv(3),Xv(4),Xv(5),Xv(6);
	Eigen::MatrixXf  R_wc; 
	q2r(q_wc,R_wc);

	Eigen::MatrixXf  uvu;
	undistort_fm( uvd, cam, uvu );
	Eigen::MatrixXf  uu = uvu.row(1);
	Eigen::MatrixXf  vu = uvu.row(2);

	Eigen::MatrixXf  X_c;
	X_c= -(U0-uu)/fku;
	Eigen::MatrixXf  Y_c;
	Y_c= -(V0-vu)/fkv;
	Eigen::MatrixXf  Z_c;// 
	Z_c(0,0)= 1;

	Eigen::MatrixXf  XYZ_c ;
	XYZ_c<< X_c, Y_c, Z_c;

	Eigen::MatrixXf  XYZ_w = R_wc*XYZ_c;
	Eigen::MatrixXf  X_w = XYZ_w.row(1);
	Eigen::MatrixXf  Y_w = XYZ_w.row(2);
	Eigen::MatrixXf  Z_w = XYZ_w.row(3);

	// Derivatives

	Eigen::MatrixXf  dtheta_dgw;// = [ Z_w/(X_w^2+Z_w^2) 0 -X_w/(X_w^2+Z_w^2) ];
	
	Eigen::MatrixXf    dphi_dgw;// = [ (X_w*Y_w)/((X_w^2+Y_w^2+Z_w^2)*sqrt(X_w^2+Z_w^2))-sqrt(X_w^2+Z_w^2)/(X_w^2+Y_w^2+Z_w^2) (Z_w*Y_w)/((X_w^2+Y_w^2+Z_w^2)*sqrt(X_w^2+Z_w^2)) ];
	
	Eigen::MatrixXf  dgw_dqwr(0,0); //= 
	dRq_times_a_by_dq( q_wc, XYZ_c ,dgw_dqwr);

	//Eigen::MatrixXf  dtheta_dqwr = dtheta_dgw*dgw_dqwr;
	//Eigen::MatrixXf  dphi_dqwr = dphi_dgw*dgw_dqwr;
	//Eigen::MatrixXf  dy_dqwr = sparse( [ zeros(3,4); dtheta_dqwr; dphi_dqwr; zeros(1,4) ] );
	//Eigen::MatrixXf  dy_drw = sparse( [ eye(3); zeros(3,3) ] );

	//Eigen::MatrixXf  dy_dxv = sparse( [ dy_drw dy_dqwr zeros(6,6) ] );

	//Eigen::MatrixXf  dyprima_dgw = sparse( [ zeros(3,3); dtheta_dgw; dphi_dgw ] );
	//Eigen::MatrixXf  dgw_dgc = R_wc;

	//Eigen::MatrixXf  dgc_dhu
	//	= sparse( [ +1/fku      0       0;
	//0   +1/fkv      0].transpose() );

	//Eigen::MatrixXf  dhu_dhd;// = 
	//jacob_undistor_fm( cam , uvd,dhu_dhd );

	//Eigen::MatrixXf  dyprima_dhd = dyprima_dgw*dgw_dgc*dgc_dhu*dhu_dhd;

	//Eigen::MatrixXf  dy_dhd
	//	= sparse( [ dyprima_dhd zeros(5,1);
	//    zeros(1,2)      1 ] );

	//Eigen::MatrixXf  Ri = speye(2)*std_pxl^2;

	//Eigen::MatrixXf  Padd 
	//	= sparse( [Ri  zeros(2,1);
	//    zeros(1,2)  std_rho^2] );

	////
	Eigen::MatrixXf  P_xv(13,13); //= P( 1:13, 1:13 );
	P_xv=P.block(0,0,13,13);
	Eigen::MatrixXf  P_yxv = P.block(13, 0,P.rows()-13,         13 );// P( 14:end, 1:13 );
	Eigen::MatrixXf  P_y   = P.block(13,13,P.rows()-13,P.cols()-13 );//P( 14:end, 14:end );
	Eigen::MatrixXf  P_xvy = P.block( 0,13,P.rows()-13,P.cols()-13 );//P( 1:13, 14:end );
	//Eigen::MatrixXf  P_RES;
	//P_RES<< 

	//	P_xv  ,    P_xvy  ,    P_xv*dy_dxv.transpose(),
	//	P_yxv ,     P_y   ,   P_yxv*dy_dxv.transpose(),
	//	dy_dxv*P_xv ,  dy_dxv*P_xvy  ,  dy_dxv*P_xv*dy_dxv.transpose()
	//+dy_dhd*Padd*dy_dhd.transpose();
	return 1;
}


void m05(){}
int CEkfSlam::dRq_times_a_by_dq(
	Eigen::MatrixXf &q, Eigen::MatrixXf &aMat, Eigen::MatrixXf &dRq_times_a_by_dqRES)
{

	//Eigen::MatrixXf	 
	dRq_times_a_by_dqRES=Eigen::MatrixXf::Zero(3,4);

	Eigen::MatrixXf TempR;
	Eigen::MatrixXf Temp31;

	dR_by_dq0(q,TempR);
	Temp31= TempR * aMat;
	//dRq_times_a_by_dqRES(1:3,1)=Temp31;//待修改!!!

	dR_by_dqx(q,TempR);
	Temp31 = TempR * aMat;
	//dRq_times_a_by_dqRES(1:3,2)=Temp31;

	dR_by_dqy(q,TempR);
	Temp31 = TempR * aMat;
	//dRq_times_a_by_dqRES(1:3,3)=Temp31;

	dR_by_dqz(q,TempR);
	Temp31 = TempR * aMat;
	//dRq_times_a_by_dqRES(1:3,4)=Temp31;

	return 1;

}

int  CEkfSlam::dR_by_dq0(Eigen::MatrixXf &q, Eigen::MatrixXf &dR_by_dq0RES)
{
	//function dR_by_dq0RES=dR_by_dq0(q)
	double q0 = q(0,0);
	double qx = q(0,1);
	double qy = q(0,2);
	double qz = q(0,3);

	//Eigen::MatrixXf	  
	dR_by_dq0RES.resize(3,3);
	dR_by_dq0RES<<
		2*q0, -2*qz,  2*qy,
		2*qz,  2*q0, -2*qx,
		-2*qy,  2*qx,  2*q0;

	return 1;
}

int CEkfSlam::dR_by_dqx(Eigen::MatrixXf &q, Eigen::MatrixXf &dR_by_dqxRES)
{
	//function dR_by_dqxRES=dR_by_dqx(q)
	double q0 = q(0,0);
	double qx = q(0,1);
	double qy = q(0,2);
	double qz = q(0,3);

	//Eigen::MatrixXf	  
	dR_by_dqxRES.resize(3,3);
	dR_by_dqxRES<<
		2*qx, 2*qy,   2*qz,
		2*qy, -2*qx, -2*q0,
		2*qz, 2*q0,  -2*qx;

	return 1;

}

int CEkfSlam::dR_by_dqy(Eigen::MatrixXf &q, Eigen::MatrixXf &dR_by_dqyRES)
{
	//	function dR_by_dqyRES=dR_by_dqy(q)
	double q0 = q(0,0);
	double qx = q(0,1);
	double qy = q(0,2);
	double qz = q(0,3);

	//Eigen::MatrixXf	  
	dR_by_dqyRES.resize(3,3);

	dR_by_dqyRES<<
		-2*qy, 2*qx,  2*q0,
		2*qx, 2*qy,  2*qz,
		-2*q0, 2*qz, -2*qy;

	return 1;
}

int CEkfSlam::dR_by_dqz(Eigen::MatrixXf &q, Eigen::MatrixXf &dR_by_dqzRES)
{
	//function dR_by_dqzRES=dR_by_dqz(q)
	double q0 = q(0,0);
	double qx = q(0,1);
	double qy = q(0,2);
	double qz = q(0,3);;

	//Eigen::MatrixXf	  
	dR_by_dqzRES.resize(3,3);
	dR_by_dqzRES<<
		-2*qz, -2*q0, 2*qx,
		2*q0, -2*qz, 2*qy,
		2*qx,  2*qy, 2*qz;

	return 1;
}


void m06(){}
int CEkfSlam::find(Eigen::VectorXf &cs,  Eigen::VectorXf  &maximas, double Value)
{
	//返回数组中大于value的下标,返回索引
	//使用Push太慢了,还是遍历两次吧
	int Counter =0;
	for ( int i=0; i< cs.size(); ++i){
		if (cs(i)>Value){
			++Counter;
		}
	}
	//Eigen::VectorXf maximas(Counter);
	maximas.resize(Counter);

	Counter =0;
	for ( int i=0; i< cs.size(); ++i){
		if (cs(i)>Value){
			maximas(Counter) = cs(i);
			++Counter;
		}
	}

	return Counter;
}

int CEkfSlam::copyCvMat(Eigen::MatrixXf &Mat  ,cv::Mat &Img)
{
	Img.resize(Mat.rows(),Mat.cols() );

	for (int i =0;i< Mat.rows();++i){
		for (int j=0;j<Mat.cols();++j){
			Img.at<float>(i,j)=Mat(i,j);
		}
	}

	return 1;
}

int CEkfSlam::copyCvMat(cv::Mat &Img,Eigen::MatrixXf &Mat )
{
	Mat.resize(Img.rows, Img.cols );

	for (int i =0;i< Mat.rows();++i){
		for (int j=0;j<Mat.cols();++j){
			Mat(i,j) =Img.at<float>(i,j);
		}
	}


	return 1;
}

int CEkfSlam::getRect(
	cv::Mat &Img,	CvSize   &Rect, CvPoint  &region_center,cv::Mat &RectImg)
{
	//cv::Mat             RectImg; 
	//CvSize   Rect;
	//CvPoint   region_center;
	cv::getRectSubPix(Img,Rect,region_center,RectImg,-1);

	return 1;
}

int CEkfSlam::getRect(
	cv::Mat &Img, CvRect  &Rect, cv::Mat &RectImg)
{
	//cv::Mat             RectImg; 
	CvSize   RSize;
	RSize.height = Rect.height;
	RSize.width  = Rect.width ;
	CvPoint   region_center;
	region_center.x =Rect.x;
	region_center.y =Rect.y;
	cv::getRectSubPix(Img,RSize,region_center,RectImg,-1);

	return 1;
}

int CEkfSlam::getRect(
	cv::Mat &Img,cv::Mat &RectImg,int xs,int xe,int ys,int ye)
{
	CvSize   RSize;
	RSize.height = ye-ys;
	RSize.width  = xe-xs;
	CvPoint   region_center;
	region_center.x =(xs+xe)/2;
	region_center.y =(ys+ye)/2;
	cv::getRectSubPix(Img,RSize,region_center,RectImg,-1);

	return 1;
}


你可能感兴趣的:(SLAM: 基于运动和视觉里程方法的实时三维构建:Map-Management/坐标系转换)