【slam十四讲第二版】【课本例题代码向】【第三~四讲刚体运动、李群和李代数】【eigen3.3.4和pangolin安装,Sophus及fim的安装使用】【绘制轨迹】【计算轨迹误差】
【slam十四讲第二版】【课本例题代码向】【第五讲~相机与图像】【OpenCV、图像去畸变、双目和RGB-D、遍历图像像素14种】
【slam十四讲第二版】【课本例题代码向】【第六讲~非线性优化】【安装对应版本ceres2.0.0和g2o教程】【手写高斯牛顿、ceres2.0.0、g2o拟合曲线及报错解决方案】
不得不吐槽一句,进度太慢,效率也太慢
所需要的依赖包的安装请看我之前的博客【slam十四讲第二版】【课本例题代码向】【第六讲~非线性优化】【安装对应版本ceres2.0.0和g2o教程】【手写高斯牛顿、ceres2.0.0、g2o拟合曲线及报错解决方案】
我的整个代码功能附上来了,另外需要的两张图片也在里面,如果需要的自取:链接:https://pan.baidu.com/s/1obCQ0Yb2hGOTK9f0Whvwfw
提取码:kfs1
#include
using namespace std;
#include
#include
#include
#include
#include
int main(int argc,char ** argv)
{
cv::Mat img_1,img_2;
if(argc != 3)
{
img_1 = cv::imread("../src/1.png",CV_LOAD_IMAGE_COLOR);
img_2 = cv::imread("../src/2.png",CV_LOAD_IMAGE_COLOR);
// 判断图像文件是否正确读取
if (img_1.data == nullptr || img_2.data == nullptr) { //数据不存在,可能是文件不存在
cout << "usage: feature_extraction ima1 img2" << endl;
return 0;
}
}
else
{
//读取图像
img_1 = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR);
img_2 = cv::imread(argv[2], CV_LOAD_IMAGE_COLOR);
assert(img_1.data != nullptr && img_2.data != nullptr);
}
//初始化
std::vector<cv::KeyPoint> keypoints_1, keypoints_2;
cv::Mat descriptors_1, descriptors_2;
cv::Ptr<cv::FeatureDetector> detector = cv::ORB::create();
cv::Ptr<cv::DescriptorExtractor> descriptor = cv::ORB::create();
cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
//第一步:检测Oriented FAST角点位置
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
//第二步:根据角点位置计算BRIEF描述子
descriptor->compute(img_1,keypoints_1,descriptors_1);
descriptor->compute(img_2,keypoints_2,descriptors_2);
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2-t1);
cout << "extract ORB cost = " << time_used.count() << " seconds." << endl;
cv::Mat outimg1;
cv::drawKeypoints(img_1,keypoints_1,outimg1,cv::Scalar::all(-1),cv::DrawMatchesFlags::DEFAULT);
cv::imshow("ORB features", outimg1);
cv::waitKey(0);//
if(cv::imwrite("../src/ORB_features.png",outimg1) == false)
{
cout << "Failed to save the image" << endl;
}
//--第三步:对两幅图像中的BRIEF描述子进行匹配,使用Hamming距离
vector<cv::DMatch> matches; //cv::DMatch 用于匹配关键点描述子的类
t1 = chrono::steady_clock::now();
matcher->match(descriptors_1,descriptors_2,matches);
t2 = chrono::steady_clock::now();
time_used = chrono::duration_cast<chrono::duration<double>>(t2-t1);
cout << "match ORB cost = " << time_used.count() << " seconds." << endl;
//--第四步:匹配点对筛选
//计算最小距离和最大距离
auto min_max = minmax_element(matches.begin(),matches.end(),
[](const cv::DMatch &m1, const cv::DMatch &m2){return m1.distance<m2.distance;});
double min_dist = min_max.first->distance;
double max_dist = min_max.second->distance;
cout << "--Min dist : " << min_dist << endl;
cout << "--Max dist : " << max_dist << endl;
//当描述子之间的距离大于两倍的最小距离时,即认为匹配有误。但有时最小距离会非常小,所以要设置一个经验值30作为下限
std::vector<cv::DMatch> good_matches;
for(int i = 0; i < descriptors_1.rows;i++)
{
if(matches[i].distance <= max(2 * min_dist, 30.0))
{
good_matches.push_back(matches[i]);
}
}
//--第五步:绘制匹配结果
cv::Mat img_match;
cv::Mat img_goodmatch;
cv::drawMatches(img_1, keypoints_1, img_2,keypoints_2,matches,img_match);
cv::drawMatches(img_1, keypoints_1, img_2,keypoints_2,good_matches,img_goodmatch);
if(cv::imwrite("../src/all_matches.png",img_match) == false)
{
cout << "Failed to save the image" << endl;
}
if(cv::imwrite("../src/good_matches.png",img_goodmatch) == false)
{
cout << "Failed to save the image" << endl;
}
cv::imshow("all matches", img_match);
cv::imshow("good matches", img_goodmatch);
cv::waitKey(0);
return 0;
}
cmake_minimum_required(VERSION 2.8)
project(orb_cv)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE "Release")
find_package(OpenCV 3 REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(orb_cv src/orb_cv.cpp)
target_link_libraries(orb_cv ${OpenCV_LIBRARIES})
/home/bupo/my_study/slam14/slam14_my/cap7/orb_cv/cmake-build-debug/orb_cv
[ INFO:0] Initialize OpenCL runtime...
extract ORB cost = 0.0135794 seconds.
match ORB cost = 0.000664683 seconds.
--Min dist : 7
--Max dist : 95
链接:https://pan.baidu.com/s/1npVLlLt3WBjAAKhOAZ3t1w
提取码:dy1p
#include
using namespace std;
#include
#include
#include
#include
//global variables定义图片路径全局变量
string first_file = "./src/1.png";
string second_file = "./src/2.png";
// 32 bit unsigned int, will have 8, 8x32=256
typedef vector<uint32_t> DescType; // Descriptor type
/**
* compute descriptor of orb keypoints
* @param img input image
* @param keypoints detected fast keypoints
* @param descriptors descriptors
*
* NOTE: if a keypoint goes outside the image boundary (8 pixels),
* descriptors will not be computed and will be left as empty
* 注意:如果一个关键点超出了图像边界(8个像素),描述子将不会被计算,并将作为空
*/
void ComputeORB(const cv::Mat & img,vector<cv::KeyPoint> &keypoints, vector<DescType> & descriptors);
/**
* brute-force match two sets of descriptors
* @param desc1 the first descriptor
* @param desc2 the second descriptor
* @param matches matches of two images
*/
void BfMatch(const vector<DescType> & desc1, const vector<DescType> &desc2, vector<cv::DMatch> &matches);
int main(int argc, char ** argv)
{
// 加载图片
//从全局变量路径中读取图片
cv::Mat first_image = cv::imread(first_file, 0);
cv::Mat second_image = cv::imread(second_file,0);
//检查图片指针是否为空
assert(first_image.data != nullptr && second_image.data != nullptr);
//ORB-Oriented FAST and Rotated BRIEF
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
//ORB使用FAST算法检测特征点
//OpenCV中的ORB采用了图像金字塔来解决尺度变换一致性
//****自定义ComputeORB函数来描述ORB特征点,并旋转使其具备旋转尺度不变性
vector<cv::KeyPoint> keypoints1;
// ORB提取图1特征threshold=40
//利用FAST从图1中提取关键点keypoints1
cv::FAST(first_image,keypoints1,40);
//定义图1的描述子
vector<DescType> descriptor1;
//根据图1和FAST提取的关键点,通过ORB设置描述子descriptor1
ComputeORB(first_image,keypoints1,descriptor1);
//ORB提取图2特征
vector<cv::KeyPoint> keypoints2;
vector<DescType> descriptor2;
cv::FAST(second_image,keypoints2,40);
ComputeORB(second_image,keypoints2,descriptor2);
//计算特征提取耗时
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "extract ORB cost = " << time_used.count() << " seconds. " << endl;
// 进行匹配
vector<cv::DMatch> matches;
t1 = chrono::steady_clock::now();
BfMatch(descriptor1, descriptor2, matches);
t2 = chrono::steady_clock::now();
time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "match ORB cost = " << time_used.count() << " seconds. " << endl;
cout << "matches: " << matches.size() << endl;
// plot the matches
cv::Mat image_show;
cv::drawMatches(first_image, keypoints1,second_image,keypoints2,matches,image_show);
cv::imshow("matches", image_show);
cv::imwrite("../src/matches.png", image_show);
cv::waitKey(0);
cout << "done" <<endl;
return 0;
}
#pragma region ORB_pattern[256 * 4]相当于在以关键点为中心[-13,12]的范围内,随机选点对p,q;进行关键点的向量构建
//这个变量里的数字,在ORBSLAM的代码中总共是256行,代表了256个点对儿,也就是每一个都代表了一对点的坐标,
//如第一行表示点q1(8,-3) 和点 q2(9,5), 接下来就是要对比这两个坐标对应的像素值的大小;
int ORB_pattern[256 * 4] = {
8, -3, 9, 5/*mean (0), correlation (0)*/,
4, 2, 7, -12/*mean (1.12461e-05), correlation (0.0437584)*/,
-11, 9, -8, 2/*mean (3.37382e-05), correlation (0.0617409)*/,
7, -12, 12, -13/*mean (5.62303e-05), correlation (0.0636977)*/,
2, -13, 2, 12/*mean (0.000134953), correlation (0.085099)*/,
1, -7, 1, 6/*mean (0.000528565), correlation (0.0857175)*/,
-2, -10, -2, -4/*mean (0.0188821), correlation (0.0985774)*/,
-13, -13, -11, -8/*mean (0.0363135), correlation (0.0899616)*/,
-13, -3, -12, -9/*mean (0.121806), correlation (0.099849)*/,
10, 4, 11, 9/*mean (0.122065), correlation (0.093285)*/,
-13, -8, -8, -9/*mean (0.162787), correlation (0.0942748)*/,
-11, 7, -9, 12/*mean (0.21561), correlation (0.0974438)*/,
7, 7, 12, 6/*mean (0.160583), correlation (0.130064)*/,
-4, -5, -3, 0/*mean (0.228171), correlation (0.132998)*/,
-13, 2, -12, -3/*mean (0.00997526), correlation (0.145926)*/,
-9, 0, -7, 5/*mean (0.198234), correlation (0.143636)*/,
12, -6, 12, -1/*mean (0.0676226), correlation (0.16689)*/,
-3, 6, -2, 12/*mean (0.166847), correlation (0.171682)*/,
-6, -13, -4, -8/*mean (0.101215), correlation (0.179716)*/,
11, -13, 12, -8/*mean (0.200641), correlation (0.192279)*/,
4, 7, 5, 1/*mean (0.205106), correlation (0.186848)*/,
5, -3, 10, -3/*mean (0.234908), correlation (0.192319)*/,
3, -7, 6, 12/*mean (0.0709964), correlation (0.210872)*/,
-8, -7, -6, -2/*mean (0.0939834), correlation (0.212589)*/,
-2, 11, -1, -10/*mean (0.127778), correlation (0.20866)*/,
-13, 12, -8, 10/*mean (0.14783), correlation (0.206356)*/,
-7, 3, -5, -3/*mean (0.182141), correlation (0.198942)*/,
-4, 2, -3, 7/*mean (0.188237), correlation (0.21384)*/,
-10, -12, -6, 11/*mean (0.14865), correlation (0.23571)*/,
5, -12, 6, -7/*mean (0.222312), correlation (0.23324)*/,
5, -6, 7, -1/*mean (0.229082), correlation (0.23389)*/,
1, 0, 4, -5/*mean (0.241577), correlation (0.215286)*/,
9, 11, 11, -13/*mean (0.00338507), correlation (0.251373)*/,
4, 7, 4, 12/*mean (0.131005), correlation (0.257622)*/,
2, -1, 4, 4/*mean (0.152755), correlation (0.255205)*/,
-4, -12, -2, 7/*mean (0.182771), correlation (0.244867)*/,
-8, -5, -7, -10/*mean (0.186898), correlation (0.23901)*/,
4, 11, 9, 12/*mean (0.226226), correlation (0.258255)*/,
0, -8, 1, -13/*mean (0.0897886), correlation (0.274827)*/,
-13, -2, -8, 2/*mean (0.148774), correlation (0.28065)*/,
-3, -2, -2, 3/*mean (0.153048), correlation (0.283063)*/,
-6, 9, -4, -9/*mean (0.169523), correlation (0.278248)*/,
8, 12, 10, 7/*mean (0.225337), correlation (0.282851)*/,
0, 9, 1, 3/*mean (0.226687), correlation (0.278734)*/,
7, -5, 11, -10/*mean (0.00693882), correlation (0.305161)*/,
-13, -6, -11, 0/*mean (0.0227283), correlation (0.300181)*/,
10, 7, 12, 1/*mean (0.125517), correlation (0.31089)*/,
-6, -3, -6, 12/*mean (0.131748), correlation (0.312779)*/,
10, -9, 12, -4/*mean (0.144827), correlation (0.292797)*/,
-13, 8, -8, -12/*mean (0.149202), correlation (0.308918)*/,
-13, 0, -8, -4/*mean (0.160909), correlation (0.310013)*/,
3, 3, 7, 8/*mean (0.177755), correlation (0.309394)*/,
5, 7, 10, -7/*mean (0.212337), correlation (0.310315)*/,
-1, 7, 1, -12/*mean (0.214429), correlation (0.311933)*/,
3, -10, 5, 6/*mean (0.235807), correlation (0.313104)*/,
2, -4, 3, -10/*mean (0.00494827), correlation (0.344948)*/,
-13, 0, -13, 5/*mean (0.0549145), correlation (0.344675)*/,
-13, -7, -12, 12/*mean (0.103385), correlation (0.342715)*/,
-13, 3, -11, 8/*mean (0.134222), correlation (0.322922)*/,
-7, 12, -4, 7/*mean (0.153284), correlation (0.337061)*/,
6, -10, 12, 8/*mean (0.154881), correlation (0.329257)*/,
-9, -1, -7, -6/*mean (0.200967), correlation (0.33312)*/,
-2, -5, 0, 12/*mean (0.201518), correlation (0.340635)*/,
-12, 5, -7, 5/*mean (0.207805), correlation (0.335631)*/,
3, -10, 8, -13/*mean (0.224438), correlation (0.34504)*/,
-7, -7, -4, 5/*mean (0.239361), correlation (0.338053)*/,
-3, -2, -1, -7/*mean (0.240744), correlation (0.344322)*/,
2, 9, 5, -11/*mean (0.242949), correlation (0.34145)*/,
-11, -13, -5, -13/*mean (0.244028), correlation (0.336861)*/,
-1, 6, 0, -1/*mean (0.247571), correlation (0.343684)*/,
5, -3, 5, 2/*mean (0.000697256), correlation (0.357265)*/,
-4, -13, -4, 12/*mean (0.00213675), correlation (0.373827)*/,
-9, -6, -9, 6/*mean (0.0126856), correlation (0.373938)*/,
-12, -10, -8, -4/*mean (0.0152497), correlation (0.364237)*/,
10, 2, 12, -3/*mean (0.0299933), correlation (0.345292)*/,
7, 12, 12, 12/*mean (0.0307242), correlation (0.366299)*/,
-7, -13, -6, 5/*mean (0.0534975), correlation (0.368357)*/,
-4, 9, -3, 4/*mean (0.099865), correlation (0.372276)*/,
7, -1, 12, 2/*mean (0.117083), correlation (0.364529)*/,
-7, 6, -5, 1/*mean (0.126125), correlation (0.369606)*/,
-13, 11, -12, 5/*mean (0.130364), correlation (0.358502)*/,
-3, 7, -2, -6/*mean (0.131691), correlation (0.375531)*/,
7, -8, 12, -7/*mean (0.160166), correlation (0.379508)*/,
-13, -7, -11, -12/*mean (0.167848), correlation (0.353343)*/,
1, -3, 12, 12/*mean (0.183378), correlation (0.371916)*/,
2, -6, 3, 0/*mean (0.228711), correlation (0.371761)*/,
-4, 3, -2, -13/*mean (0.247211), correlation (0.364063)*/,
-1, -13, 1, 9/*mean (0.249325), correlation (0.378139)*/,
7, 1, 8, -6/*mean (0.000652272), correlation (0.411682)*/,
1, -1, 3, 12/*mean (0.00248538), correlation (0.392988)*/,
9, 1, 12, 6/*mean (0.0206815), correlation (0.386106)*/,
-1, -9, -1, 3/*mean (0.0364485), correlation (0.410752)*/,
-13, -13, -10, 5/*mean (0.0376068), correlation (0.398374)*/,
7, 7, 10, 12/*mean (0.0424202), correlation (0.405663)*/,
12, -5, 12, 9/*mean (0.0942645), correlation (0.410422)*/,
6, 3, 7, 11/*mean (0.1074), correlation (0.413224)*/,
5, -13, 6, 10/*mean (0.109256), correlation (0.408646)*/,
2, -12, 2, 3/*mean (0.131691), correlation (0.416076)*/,
3, 8, 4, -6/*mean (0.165081), correlation (0.417569)*/,
2, 6, 12, -13/*mean (0.171874), correlation (0.408471)*/,
9, -12, 10, 3/*mean (0.175146), correlation (0.41296)*/,
-8, 4, -7, 9/*mean (0.183682), correlation (0.402956)*/,
-11, 12, -4, -6/*mean (0.184672), correlation (0.416125)*/,
1, 12, 2, -8/*mean (0.191487), correlation (0.386696)*/,
6, -9, 7, -4/*mean (0.192668), correlation (0.394771)*/,
2, 3, 3, -2/*mean (0.200157), correlation (0.408303)*/,
6, 3, 11, 0/*mean (0.204588), correlation (0.411762)*/,
3, -3, 8, -8/*mean (0.205904), correlation (0.416294)*/,
7, 8, 9, 3/*mean (0.213237), correlation (0.409306)*/,
-11, -5, -6, -4/*mean (0.243444), correlation (0.395069)*/,
-10, 11, -5, 10/*mean (0.247672), correlation (0.413392)*/,
-5, -8, -3, 12/*mean (0.24774), correlation (0.411416)*/,
-10, 5, -9, 0/*mean (0.00213675), correlation (0.454003)*/,
8, -1, 12, -6/*mean (0.0293635), correlation (0.455368)*/,
4, -6, 6, -11/*mean (0.0404971), correlation (0.457393)*/,
-10, 12, -8, 7/*mean (0.0481107), correlation (0.448364)*/,
4, -2, 6, 7/*mean (0.050641), correlation (0.455019)*/,
-2, 0, -2, 12/*mean (0.0525978), correlation (0.44338)*/,
-5, -8, -5, 2/*mean (0.0629667), correlation (0.457096)*/,
7, -6, 10, 12/*mean (0.0653846), correlation (0.445623)*/,
-9, -13, -8, -8/*mean (0.0858749), correlation (0.449789)*/,
-5, -13, -5, -2/*mean (0.122402), correlation (0.450201)*/,
8, -8, 9, -13/*mean (0.125416), correlation (0.453224)*/,
-9, -11, -9, 0/*mean (0.130128), correlation (0.458724)*/,
1, -8, 1, -2/*mean (0.132467), correlation (0.440133)*/,
7, -4, 9, 1/*mean (0.132692), correlation (0.454)*/,
-2, 1, -1, -4/*mean (0.135695), correlation (0.455739)*/,
11, -6, 12, -11/*mean (0.142904), correlation (0.446114)*/,
-12, -9, -6, 4/*mean (0.146165), correlation (0.451473)*/,
3, 7, 7, 12/*mean (0.147627), correlation (0.456643)*/,
5, 5, 10, 8/*mean (0.152901), correlation (0.455036)*/,
0, -4, 2, 8/*mean (0.167083), correlation (0.459315)*/,
-9, 12, -5, -13/*mean (0.173234), correlation (0.454706)*/,
0, 7, 2, 12/*mean (0.18312), correlation (0.433855)*/,
-1, 2, 1, 7/*mean (0.185504), correlation (0.443838)*/,
5, 11, 7, -9/*mean (0.185706), correlation (0.451123)*/,
3, 5, 6, -8/*mean (0.188968), correlation (0.455808)*/,
-13, -4, -8, 9/*mean (0.191667), correlation (0.459128)*/,
-5, 9, -3, -3/*mean (0.193196), correlation (0.458364)*/,
-4, -7, -3, -12/*mean (0.196536), correlation (0.455782)*/,
6, 5, 8, 0/*mean (0.1972), correlation (0.450481)*/,
-7, 6, -6, 12/*mean (0.199438), correlation (0.458156)*/,
-13, 6, -5, -2/*mean (0.211224), correlation (0.449548)*/,
1, -10, 3, 10/*mean (0.211718), correlation (0.440606)*/,
4, 1, 8, -4/*mean (0.213034), correlation (0.443177)*/,
-2, -2, 2, -13/*mean (0.234334), correlation (0.455304)*/,
2, -12, 12, 12/*mean (0.235684), correlation (0.443436)*/,
-2, -13, 0, -6/*mean (0.237674), correlation (0.452525)*/,
4, 1, 9, 3/*mean (0.23962), correlation (0.444824)*/,
-6, -10, -3, -5/*mean (0.248459), correlation (0.439621)*/,
-3, -13, -1, 1/*mean (0.249505), correlation (0.456666)*/,
7, 5, 12, -11/*mean (0.00119208), correlation (0.495466)*/,
4, -2, 5, -7/*mean (0.00372245), correlation (0.484214)*/,
-13, 9, -9, -5/*mean (0.00741116), correlation (0.499854)*/,
7, 1, 8, 6/*mean (0.0208952), correlation (0.499773)*/,
7, -8, 7, 6/*mean (0.0220085), correlation (0.501609)*/,
-7, -4, -7, 1/*mean (0.0233806), correlation (0.496568)*/,
-8, 11, -7, -8/*mean (0.0236505), correlation (0.489719)*/,
-13, 6, -12, -8/*mean (0.0268781), correlation (0.503487)*/,
2, 4, 3, 9/*mean (0.0323324), correlation (0.501938)*/,
10, -5, 12, 3/*mean (0.0399235), correlation (0.494029)*/,
-6, -5, -6, 7/*mean (0.0420153), correlation (0.486579)*/,
8, -3, 9, -8/*mean (0.0548021), correlation (0.484237)*/,
2, -12, 2, 8/*mean (0.0616622), correlation (0.496642)*/,
-11, -2, -10, 3/*mean (0.0627755), correlation (0.498563)*/,
-12, -13, -7, -9/*mean (0.0829622), correlation (0.495491)*/,
-11, 0, -10, -5/*mean (0.0843342), correlation (0.487146)*/,
5, -3, 11, 8/*mean (0.0929937), correlation (0.502315)*/,
-2, -13, -1, 12/*mean (0.113327), correlation (0.48941)*/,
-1, -8, 0, 9/*mean (0.132119), correlation (0.467268)*/,
-13, -11, -12, -5/*mean (0.136269), correlation (0.498771)*/,
-10, -2, -10, 11/*mean (0.142173), correlation (0.498714)*/,
-3, 9, -2, -13/*mean (0.144141), correlation (0.491973)*/,
2, -3, 3, 2/*mean (0.14892), correlation (0.500782)*/,
-9, -13, -4, 0/*mean (0.150371), correlation (0.498211)*/,
-4, 6, -3, -10/*mean (0.152159), correlation (0.495547)*/,
-4, 12, -2, -7/*mean (0.156152), correlation (0.496925)*/,
-6, -11, -4, 9/*mean (0.15749), correlation (0.499222)*/,
6, -3, 6, 11/*mean (0.159211), correlation (0.503821)*/,
-13, 11, -5, 5/*mean (0.162427), correlation (0.501907)*/,
11, 11, 12, 6/*mean (0.16652), correlation (0.497632)*/,
7, -5, 12, -2/*mean (0.169141), correlation (0.484474)*/,
-1, 12, 0, 7/*mean (0.169456), correlation (0.495339)*/,
-4, -8, -3, -2/*mean (0.171457), correlation (0.487251)*/,
-7, 1, -6, 7/*mean (0.175), correlation (0.500024)*/,
-13, -12, -8, -13/*mean (0.175866), correlation (0.497523)*/,
-7, -2, -6, -8/*mean (0.178273), correlation (0.501854)*/,
-8, 5, -6, -9/*mean (0.181107), correlation (0.494888)*/,
-5, -1, -4, 5/*mean (0.190227), correlation (0.482557)*/,
-13, 7, -8, 10/*mean (0.196739), correlation (0.496503)*/,
1, 5, 5, -13/*mean (0.19973), correlation (0.499759)*/,
1, 0, 10, -13/*mean (0.204465), correlation (0.49873)*/,
9, 12, 10, -1/*mean (0.209334), correlation (0.49063)*/,
5, -8, 10, -9/*mean (0.211134), correlation (0.503011)*/,
-1, 11, 1, -13/*mean (0.212), correlation (0.499414)*/,
-9, -3, -6, 2/*mean (0.212168), correlation (0.480739)*/,
-1, -10, 1, 12/*mean (0.212731), correlation (0.502523)*/,
-13, 1, -8, -10/*mean (0.21327), correlation (0.489786)*/,
8, -11, 10, -6/*mean (0.214159), correlation (0.488246)*/,
2, -13, 3, -6/*mean (0.216993), correlation (0.50287)*/,
7, -13, 12, -9/*mean (0.223639), correlation (0.470502)*/,
-10, -10, -5, -7/*mean (0.224089), correlation (0.500852)*/,
-10, -8, -8, -13/*mean (0.228666), correlation (0.502629)*/,
4, -6, 8, 5/*mean (0.22906), correlation (0.498305)*/,
3, 12, 8, -13/*mean (0.233378), correlation (0.503825)*/,
-4, 2, -3, -3/*mean (0.234323), correlation (0.476692)*/,
5, -13, 10, -12/*mean (0.236392), correlation (0.475462)*/,
4, -13, 5, -1/*mean (0.236842), correlation (0.504132)*/,
-9, 9, -4, 3/*mean (0.236977), correlation (0.497739)*/,
0, 3, 3, -9/*mean (0.24314), correlation (0.499398)*/,
-12, 1, -6, 1/*mean (0.243297), correlation (0.489447)*/,
3, 2, 4, -8/*mean (0.00155196), correlation (0.553496)*/,
-10, -10, -10, 9/*mean (0.00239541), correlation (0.54297)*/,
8, -13, 12, 12/*mean (0.0034413), correlation (0.544361)*/,
-8, -12, -6, -5/*mean (0.003565), correlation (0.551225)*/,
2, 2, 3, 7/*mean (0.00835583), correlation (0.55285)*/,
10, 6, 11, -8/*mean (0.00885065), correlation (0.540913)*/,
6, 8, 8, -12/*mean (0.0101552), correlation (0.551085)*/,
-7, 10, -6, 5/*mean (0.0102227), correlation (0.533635)*/,
-3, -9, -3, 9/*mean (0.0110211), correlation (0.543121)*/,
-1, -13, -1, 5/*mean (0.0113473), correlation (0.550173)*/,
-3, -7, -3, 4/*mean (0.0140913), correlation (0.554774)*/,
-8, -2, -8, 3/*mean (0.017049), correlation (0.55461)*/,
4, 2, 12, 12/*mean (0.01778), correlation (0.546921)*/,
2, -5, 3, 11/*mean (0.0224022), correlation (0.549667)*/,
6, -9, 11, -13/*mean (0.029161), correlation (0.546295)*/,
3, -1, 7, 12/*mean (0.0303081), correlation (0.548599)*/,
11, -1, 12, 4/*mean (0.0355151), correlation (0.523943)*/,
-3, 0, -3, 6/*mean (0.0417904), correlation (0.543395)*/,
4, -11, 4, 12/*mean (0.0487292), correlation (0.542818)*/,
2, -4, 2, 1/*mean (0.0575124), correlation (0.554888)*/,
-10, -6, -8, 1/*mean (0.0594242), correlation (0.544026)*/,
-13, 7, -11, 1/*mean (0.0597391), correlation (0.550524)*/,
-13, 12, -11, -13/*mean (0.0608974), correlation (0.55383)*/,
6, 0, 11, -13/*mean (0.065126), correlation (0.552006)*/,
0, -1, 1, 4/*mean (0.074224), correlation (0.546372)*/,
-13, 3, -9, -2/*mean (0.0808592), correlation (0.554875)*/,
-9, 8, -6, -3/*mean (0.0883378), correlation (0.551178)*/,
-13, -6, -8, -2/*mean (0.0901035), correlation (0.548446)*/,
5, -9, 8, 10/*mean (0.0949843), correlation (0.554694)*/,
2, 7, 3, -9/*mean (0.0994152), correlation (0.550979)*/,
-1, -6, -1, -1/*mean (0.10045), correlation (0.552714)*/,
9, 5, 11, -2/*mean (0.100686), correlation (0.552594)*/,
11, -3, 12, -8/*mean (0.101091), correlation (0.532394)*/,
3, 0, 3, 5/*mean (0.101147), correlation (0.525576)*/,
-1, 4, 0, 10/*mean (0.105263), correlation (0.531498)*/,
3, -6, 4, 5/*mean (0.110785), correlation (0.540491)*/,
-13, 0, -10, 5/*mean (0.112798), correlation (0.536582)*/,
5, 8, 12, 11/*mean (0.114181), correlation (0.555793)*/,
8, 9, 9, -6/*mean (0.117431), correlation (0.553763)*/,
7, -4, 8, -12/*mean (0.118522), correlation (0.553452)*/,
-10, 4, -10, 9/*mean (0.12094), correlation (0.554785)*/,
7, 3, 12, 4/*mean (0.122582), correlation (0.555825)*/,
9, -7, 10, -2/*mean (0.124978), correlation (0.549846)*/,
7, 0, 12, -2/*mean (0.127002), correlation (0.537452)*/,
-1, -6, 0, -11/*mean (0.127148), correlation (0.547401)*/
};
#pragma endregion
// 计算关键点的描述子向量,
/*
* 1、因为以关键点为中心,边长为16×16的方形区域灰度计算关键点角度信息;
* 所以先去掉所有在离图像边缘在8个像素范围内的像素,否则计算关键点角度会出错;
*2、计算关键点周围16×16的领域质心,并计算关键点角度的cos,和sin
*3、根据指定随机规则,选择关键点周围的随机点对计算随机点对的亮度强弱,
* 如果第一个像素pp比第二个qq亮,则为描述符中的相应位分配值 1,否则分配值 0
*4、将计算的关键点描述子向量加入到描述子向量集合中
*/
void ComputeORB(const cv::Mat & img,vector<cv::KeyPoint> &keypoints, vector<DescType> & descriptors)
{
const int half_patch_size = 8;
const int half_boundary = 16;
int bad_points = 0;
for(auto &kp:keypoints)
{
//剔除边缘关键点,边缘的像素值为16,即将图像上靠近四边16个像素的边框区域内所有特征点都去掉,
//因为要以关键点为中心,边长为16×16的方形区域灰度计算关键点角度信息;
if(kp.pt.x < half_boundary || kp.pt.y < half_boundary ||
kp.pt.x > img.cols - half_boundary || kp.pt.y > img.cols - half_boundary)
{
// outside
bad_points++;
descriptors.push_back({});
continue;
}
//计算灰度质心
float m01 = 0, m10 = 0;
//从左到右遍历以关键点为中心的,半径为8的像素点,共256个像素点
for(int dx = -half_patch_size; dx < half_patch_size; dx++)
{
//从上到下遍历
for(int dy = -half_patch_size; dy < half_patch_size; ++dy)
{
//img.at(y,x),参数是点所在的行列而不是点的坐标
//计算区域内的像素坐标,关键点坐标(x,y)+偏移坐标(dx,dy)
uchar pixel = img.at<uchar>(kp.pt.y+dy,kp.pt.x+dx);
//计算x方向灰度总权重,注意:此处使用dx,非X方向坐标值
m01 += dx * pixel;
//计算y方向灰度总权重
m10 += dy * pixel;
}
}
// 计算关键点角度信息 arc tan(m01/m10);
//这里计算灰度质心与关键点构成的直角三角形的长边,用于后面计算角度;没有通过除总灰度值计算质心位置 avoid divide by zero
float m_sqrt = sqrt(m01 * m01 + m10 * m10) + 1e-18;
//计算关键点角度信息sin cos
float sin_theta = m01/m_sqrt;
float cos_theta = m10/m_sqrt;
// 计算关键点描述子向量,原理参考https://www.cnblogs.com/alexme/p/11345701.html
//8个描述子向量,每个向量中的元素占据32位,初始化为0;每个描述子使用256位二进制数进行描述
DescType desc(8,0);
for (int i = 0; i < 8; i++) //处理每个向量
{
uint32_t d = 0;
for(int k = 0; k < 32; k++) //处理每一位
{
//在256*4的随机数中随机选一行作为p(idx1,idx2),q(idx3,idx4),,i,k递增,所以所有点选择特征向量的规制一致,才能比较
int idx_pq = i * 8 + k;
//ORB_pattern含义是在16*16图像块中按高斯分布选取点对,选出来的p、q是未经过旋转的
//相当于在以关键点为中心[-13,12]的范围内,随机选点对p,q;进行关键点的向量构建
cv::Point2f p(ORB_pattern[idx_pq * 4],ORB_pattern[idx_pq * 4 +1]);
cv::Point2f q(ORB_pattern[idx_pq * 4 + 2], ORB_pattern[idx_pq * 4 + 3]);
// 计算关键点随机选择的特征点对旋转后的位置
//pp和qq利用了特征点的方向,计算了原始随机选出的p,q点旋转后的位置pp,qq,体现了ORB的旋转不变性
cv::Point2f pp = kp.pt + cv::Point2f(cos_theta * p.x - sin_theta * p.y, sin_theta * p.x + cos_theta * p.y);
//之所以此处需要+ kp.pt的原因是上述随机点对选择是以关键点为中心选择的,需要加关键点坐标获取按照指定规则随机选择后的随机点对绝对图像坐标
cv::Point2f qq = cv::Point2f(cos_theta * q.x - sin_theta * q.y, sin_theta * q.x + cos_theta * q.y)
+ kp.pt;
//计算两个旋转后的关键点对应随机点对的灰度值大小,构建关键点的特征向量
if(img.at<uchar>(pp.y,pp.x) < img.at<uchar>(qq.y, qq.x))
{
//如果第一个像素pp比第二个qq亮,则为描述符中的相应位分配值 1,否则分配值 0
d |= 1 << k;
}
}
desc[i] = d;
}
//将获取的关键点描述子向量添加进描述子集合中
descriptors.push_back(desc);
}
cout << "bad/total: " << bad_points << "/" << keypoints.size() << endl;
}
// brute-force matching
void BfMatch(const vector<DescType> &desc1, const vector<DescType> &desc2, vector<cv::DMatch> &matches)
{
const int d_max = 40;
for(int i1 = 0; i1 < desc1.size(); ++i1)
{
if(desc1[i1].empty()) continue;
cv::DMatch m{i1, 0, 256};
for(size_t i2 = 0; i2 < desc2.size(); i2++)
{
if(desc2[i2].empty()) continue;
int distance = 0;
//此处k8×下行u32=256个二进制描述子,即一个关键点的描述子
for (int k = 0; k < 8; k++)
{
//将选择的两个随机特征的描述子按位进行异或运算,从而计算两个描述子之间的汉明顿距离
distance += _mm_popcnt_u32(desc1[i1][k] ^ desc2[i2][k]);//第一个是第几个特征点,第二个是第几个描述子
}
if(distance < d_max && distance < m.distance)
{
m.distance = distance;
m.trainIdx = i2;
}
}
if(m.distance < d_max)
{
matches.push_back(m);
}
}
}
cmake_minimum_required(VERSION 2.8)
project(orb_self)
set(CMAKE_CXX_FLAGS "-std=c++14 -mfma")
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(orb_self src/orb_self.cpp)
target_link_libraries(orb_self ${OpenCV_LIBRARIES})
echo "SSE 4.2 supported" || echo "SSE 4.2 not supported“"
可以查看,参考文章关于虚拟化中cpu的指令集SSE 4.2的不支持/usr/lib/gcc/x86_64-linux-gnu/7/include/popcntintrin.h:35:1: error: inlining failed in call to always_inline ‘int _mm_popcnt_u32(unsigned int)’: target specific option mismatch _mm_popcnt_u32 (unsigned int __X) ^~~~~~~~~~~~~~
set(CMAKE_CXX_FLAGS "-std=c++11 -O2 ${SSE_FLAGS} -msse4")
set(CMAKE_CXX_FLAGS "-std=c++14 -mfma")
/home/bupo/my_study/slam14/slam14_my/cap7/orb_self/cmake-build-debug/orb_self
bad/total: 43/638
bad/total: 7/595
extract ORB cost = 0.016009 seconds.
match ORB cost = 0.0256946 seconds.
matches: 51
done
项目工程和所需图片都在链接:https://pan.baidu.com/s/1oENApqlZ6i5Oz7Copexswg
提取码:c1cu
另外运行很简单,如果你没有clion,只需要在项目路径下创建一个build文件夹,然后cmake… 然后make 然后运行可执行文件就可以了
#include
using namespace std;
#include
#include
#include
#include
#include
// #include "extra.h" // use this if in OpenCV2
/****************************************************
* 本程序演示了如何使用2D-2D的特征匹配估计相机运动
* **************************************************/
string first_file = "../src/1.png";
string second_file = "../src/2.png";
void find_feature_matches(const cv::Mat& img_1, const cv::Mat& img_2,
std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch>& matches);
void pose_estimation_2d2d(std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch>& matches,
cv::Mat& R, cv::Mat& t);
// 像素坐标转相机归一化坐标
cv::Point2d pixel2cam (const cv::Point2d& p, const cv::Mat& K);
int main(int argc, char ** argv)
{
//-- 读取图像
cout << "读取src文件下的图片" << endl;
cv::Mat img_1 = cv::imread(first_file,CV_LOAD_IMAGE_COLOR);
cv::Mat img_2 = cv::imread(second_file, CV_LOAD_IMAGE_COLOR);
assert(img_1.data != nullptr && img_2.data != nullptr);
vector<cv::KeyPoint> keypoints_1, keypoints_2;
vector<cv::DMatch> matches;
find_feature_matches(img_1,img_2,keypoints_1,keypoints_2,matches);
cout<<"一共找到了"<<matches.size() <<"组匹配点"<<endl;
//-- 估计两张图像间运动
cv::Mat R,t;
pose_estimation_2d2d(keypoints_1,keypoints_2,matches,R,t);
//-- 验证E=t^R*scale
cv::Mat t_x = (cv::Mat_<double>(3,3) <<
0, -t.at<double>(2,0),t.at<double>(1,0),
t.at<double>(2,0), 0, -t.at<double>(0,0)
-t.at<double>(1,0),t.at<double>(0,0),0
);
cout << "t^R = " << endl << t_x * R << endl;
//-- 验证对极约束 // 相机内参
cv::Mat K = (cv::Mat_<double>(3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0,0,1);
for(cv::DMatch m:matches)
{
cv::Point2d pt1 = pixel2cam(keypoints_1[m.queryIdx].pt,K);
cv::Mat y1 = (cv::Mat_<double>(3,1) << pt1.x,pt1.y,1);
cv::Point2d pt2 = pixel2cam ( keypoints_2[ m.trainIdx ].pt, K );
cv::Mat y2 = ( cv::Mat_<double> ( 3,1 ) << pt2.x, pt2.y, 1 );
cv::Mat d = y2.t() * t_x * R * y1;
cout << "epipolar constraint = " << d << endl;
}
return 0;
}
void find_feature_matches(const cv::Mat& img_1, const cv::Mat& img_2,
std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch>& matches)
{
//-- 初始化
cv::Mat descriptors_1, descriptors_2;
// used in OpenCV3
cv::Ptr<cv::FeatureDetector> detector = cv::ORB::create();
cv::Ptr<cv::DescriptorExtractor> descriptor = cv::ORB::create();
// use this if you are in OpenCV2
// Ptr detector = FeatureDetector::create ( "ORB" );
// Ptr descriptor = DescriptorExtractor::create ( "ORB" );
cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
//-- 第一步:检测 Oriented FAST 角点位置
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
//-- 第二步:根据角点位置计算 BRIEF 描述子
descriptor->compute(img_1, keypoints_1, descriptors_1);
descriptor->compute(img_2, keypoints_2, descriptors_2);
//-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
vector<cv::DMatch> match;
matcher->match(descriptors_1,descriptors_2,match);
//-- 第四步:匹配点对筛选
double min_dist = 10000, max_dist = 0;
//找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = match[i].distance;
if(dist < min_dist) min_dist = dist;
if(dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f\n", max_dist);
printf("-- Min dist : %f\n", min_dist);
//当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
for(int i = 0; i < descriptors_1.rows; i++)
{
if(match[i].distance <= max(2*min_dist, 30.0))
{
matches.push_back(match[i]);
}
}
}
// 像素坐标转相机归一化坐标
cv::Point2d pixel2cam (const cv::Point2d& p, const cv::Mat& K)
{
return cv::Point2d( //at是内参数矩阵
(p.x - K.at<double> (0,2)) / K.at<double>(0,0),
(p.y - K.at<double> (1,2)) / K.at<double>(1,1)
);
}
void pose_estimation_2d2d(std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch>& matches,
cv::Mat& R, cv::Mat& t)
{
// 相机内参,TUM Freiburg2
cv::Mat K = (cv::Mat_<double>(3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
//-- 把匹配点转换为vector的形式
vector<cv::Point2f> points1;
vector<cv::Point2f> points2;
for(int i = 0; i < (int) matches.size(); i++)
{
points1.push_back(keypoints_1[matches[i].queryIdx].pt);//匹配点对中第一张图片上的点
points2.push_back(keypoints_2[matches[i].trainIdx].pt);//匹配点对中第二张图片上的点
}
//-- 计算基础矩阵
cv::Mat fundamental_matrix;
fundamental_matrix = cv::findFundamentalMat(points1, points2,CV_FM_8POINT);//计算给定一组对应点的基本矩阵 八点法
cout << "fundamental_matrix is" << endl << fundamental_matrix << endl;
//-- 计算本质矩阵
cv::Point2d principal_point (325.1, 249.7); //相机光心, TUM dataset标定值
double focal_length = 521; //相机焦距, TUM dataset标定值
cv::Mat essential_matrix;
essential_matrix = cv::findEssentialMat(points1,points2,focal_length,principal_point);
cout<<"essential_matrix is "<<endl<< essential_matrix<<endl;
//-- 计算单应矩阵
cv::Mat homography_matrix;
homography_matrix = cv::findHomography(points1, points2, cv::RANSAC, 3);
cout<<"homography_matrix is "<<endl<<homography_matrix<<endl;
//-- 从本质矩阵中恢复旋转和平移信息.
cv::recoverPose(essential_matrix, points1, points2, R,t,
focal_length,principal_point);
cout<<"R is "<<endl<<R<<endl;
cout<<"t is "<<endl<<t<<endl;
}
cmake_minimum_required(VERSION 2.8)
project(pose_estimation_2d2d)
set(CMAKE_BUILD_TYPE "Release")
set( CMAKE_CXX_STANDARD 14)
# 添加cmake模块以使用g2o
#list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)
find_package(OpenCV 3.1 REQUIRED)
# find_package( OpenCV REQUIRED ) # use this if in OpenCV2
include_directories("/usr/include/eigen3")
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(pose_estimation_2d2d src/pose_estimation_2d2d.cpp)
target_link_libraries(pose_estimation_2d2d ${OpenCV_LIBRARIES})
/home/bupo/my_study/slam14/slam14_my/cap7/pose_estimation_2d2d/cmake-build-debug/pose_estimation_2d2d
读取src文件下的图片
[ INFO:0] Initialize OpenCL runtime...
-- Max dist : 95.000000
-- Min dist : 7.000000
一共找到了81组匹配点
fundamental_matrix is
[5.435453065936294e-06, 0.0001366043242989641, -0.02140890086948122;
-0.0001321142229824704, 2.339475702778057e-05, -0.006332906454396256;
0.02107630352202776, -0.00366683395295285, 1]
essential_matrix is
[0.01724015832721737, 0.3280543357941316, 0.04737477831442286;
-0.3243229585962945, 0.03292958445202431, -0.6262554366073029;
-0.005885857752317864, 0.6253830041920339, 0.01531678649092664]
homography_matrix is
[0.91317517918067, -0.1092435315821776, 29.95860009981271;
0.02223560352310949, 0.9826008005061946, 6.50891083956826;
-0.0001001560381023939, 0.0001037779436396116, 1]
R is
[0.9985534106102478, -0.05339308467584829, 0.006345444621109364;
0.05321959721496342, 0.9982715997131746, 0.02492965459802013;
-0.007665548311698023, -0.02455588961730218, 0.9996690690694515]
t is
[-0.8829934995085557;
-0.05539655431450295;
0.4661048182498381]
t^R =
[-0.02438126572381069, -0.4639388908753586, -0.06699805400667588;
0.4582372816724479, -0.04792977660828776, 0.9410371538411662;
-0.8831734264150997, 0.04247755931090476, 0.1844386941457087]
epipolar constraint = [-0.07304359960899821]
epipolar constraint = [0.2707830639560381]
epipolar constraint = [0.7394658183384351]
...(这里省略的使用所有有效的匹配对验证对极约束的结果,总共应该是81对,也就是所有的匹配点对数)
epipolar constraint = [0.1199788747639846]
epipolar constraint = [0.2506340185166976]
进程已结束,退出代码0
代码自取:链接:https://pan.baidu.com/s/1IVHnjF_KhvjuyMpkPQSpLg
提取码:0fg8
或者直接取图片链接: https://pan.baidu.com/s/1XxodUt9AOBpytMwsab4slA 提取码: 5b14
其实就是在对极约束求解相机运动的基础上加了一个三角代码测量,演示
#include
using namespace std;
#include
#include
#include
#include
#include
// #include "extra.h" // use this if in OpenCV2
/****************************************************
* 本程序演示了如何使用2D-2D的特征匹配估计相机运动
* **************************************************/
string first_file = "../src/1.png";
string second_file = "../src/2.png";
void find_feature_matches(const cv::Mat& img_1, const cv::Mat& img_2,
std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch>& matches);
void pose_estimation_2d2d(std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch>& matches,
cv::Mat& R, cv::Mat& t);
// 像素坐标转相机归一化坐标
cv::Point2d pixel2cam (const cv::Point2d& p, const cv::Mat& K);
//加入了三角测量部分
void triangulation(const vector<cv::KeyPoint>& keypoint_1, const vector<cv::KeyPoint>& keypoint_2,
const std::vector<cv::DMatch>& matches, const cv::Mat& R, const cv::Mat& t,
vector<cv::Point3d>& points);
int main(int argc, char ** argv)
{
//-- 读取图像
cout << "读取src文件下的图片" << endl;
cv::Mat img_1 = cv::imread(first_file,CV_LOAD_IMAGE_COLOR);
cv::Mat img_2 = cv::imread(second_file, CV_LOAD_IMAGE_COLOR);
assert(img_1.data != nullptr && img_2.data != nullptr);
vector<cv::KeyPoint> keypoints_1, keypoints_2;
vector<cv::DMatch> matches;
find_feature_matches(img_1,img_2,keypoints_1,keypoints_2,matches);
cout<<"一共找到了"<<matches.size() <<"组匹配点"<<endl;
//-- 估计两张图像间运动
cv::Mat R,t;
pose_estimation_2d2d(keypoints_1,keypoints_2,matches,R,t);
//-- 三角化
vector<cv::Point3d> points;
triangulation(keypoints_1,keypoints_2,matches,R,t,points);
//-- 验证三角化点与特征点的重投影关系
cv::Mat K = (cv::Mat_<double>(3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );
for (int i = 0; i < matches.size(); i++)
{
cv::Point2d pt1_cam = pixel2cam(keypoints_1[matches[i].queryIdx].pt,K);
cv::Point2d pt1_cam_3d(points[i].x/points[i].z, points[i].y/points[i].z);
cout<<"point in the first camera frame: "<<pt1_cam<<endl;
cout<<"point projected from 3D "<<pt1_cam_3d<<", d="<<points[i].z<<endl;
// 第二个图
cv::Point2f pt2_cam = pixel2cam(keypoints_2[matches[i].trainIdx].pt,K);
cv::Mat pt2_trans = R * (cv::Mat_<double>(3,1) << points[i].x,points[i].y,points[i].z) + t;
pt2_trans /= pt2_trans.at<double>(2,0);
cout<<"point in the second camera frame: "<<pt2_cam<<endl;
cout<<"point reprojected from second frame: "<<pt2_trans.t()<<endl;
cout<<endl;
}
return 0;
}
void find_feature_matches(const cv::Mat& img_1, const cv::Mat& img_2,
std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch>& matches)
{
//-- 初始化
cv::Mat descriptors_1, descriptors_2;
// used in OpenCV3
cv::Ptr<cv::FeatureDetector> detector = cv::ORB::create();
cv::Ptr<cv::DescriptorExtractor> descriptor = cv::ORB::create();
// use this if you are in OpenCV2
// Ptr detector = FeatureDetector::create ( "ORB" );
// Ptr descriptor = DescriptorExtractor::create ( "ORB" );
cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
//-- 第一步:检测 Oriented FAST 角点位置
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
//-- 第二步:根据角点位置计算 BRIEF 描述子
descriptor->compute(img_1, keypoints_1, descriptors_1);
descriptor->compute(img_2, keypoints_2, descriptors_2);
//-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
vector<cv::DMatch> match;
matcher->match(descriptors_1,descriptors_2,match);
//-- 第四步:匹配点对筛选
double min_dist = 10000, max_dist = 0;
//找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = match[i].distance;
if(dist < min_dist) min_dist = dist;
if(dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f\n", max_dist);
printf("-- Min dist : %f\n", min_dist);
//当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
for(int i = 0; i < descriptors_1.rows; i++)
{
if(match[i].distance <= max(2*min_dist, 30.0))
{
matches.push_back(match[i]);
}
}
}
// 像素坐标转相机归一化坐标
cv::Point2d pixel2cam (const cv::Point2d& p, const cv::Mat& K)
{
return cv::Point2d( //at是内参数矩阵
(p.x - K.at<double> (0,2)) / K.at<double>(0,0),
(p.y - K.at<double> (1,2)) / K.at<double>(1,1)
);
}
void pose_estimation_2d2d(std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch>& matches,
cv::Mat& R, cv::Mat& t)
{
// 相机内参,TUM Freiburg2
cv::Mat K = (cv::Mat_<double>(3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
//-- 把匹配点转换为vector的形式
vector<cv::Point2f> points1;
vector<cv::Point2f> points2;
for(int i = 0; i < (int) matches.size(); i++)
{
points1.push_back(keypoints_1[matches[i].queryIdx].pt);//匹配点对中第一张图片上的点
points2.push_back(keypoints_2[matches[i].trainIdx].pt);//匹配点对中第二张图片上的点
}
//-- 计算基础矩阵
cv::Mat fundamental_matrix;
fundamental_matrix = cv::findFundamentalMat(points1, points2,CV_FM_8POINT);//计算给定一组对应点的基本矩阵 八点法
cout << "fundamental_matrix is" << endl << fundamental_matrix << endl;
//-- 计算本质矩阵
cv::Point2d principal_point (325.1, 249.7); //相机光心, TUM dataset标定值
double focal_length = 521; //相机焦距, TUM dataset标定值
cv::Mat essential_matrix;
essential_matrix = cv::findEssentialMat(points1,points2,focal_length,principal_point);
cout<<"essential_matrix is "<<endl<< essential_matrix<<endl;
//-- 计算单应矩阵
cv::Mat homography_matrix;
homography_matrix = cv::findHomography(points1, points2, cv::RANSAC, 3);
cout<<"homography_matrix is "<<endl<<homography_matrix<<endl;
//-- 从本质矩阵中恢复旋转和平移信息.
cv::recoverPose(essential_matrix, points1, points2, R,t,
focal_length,principal_point);
cout<<"R is "<<endl<<R<<endl;
cout<<"t is "<<endl<<t<<endl;
}
//加入了三角测量部分
void triangulation(const vector<cv::KeyPoint>& keypoint_1, const vector<cv::KeyPoint>& keypoint_2,
const std::vector<cv::DMatch>& matches, const cv::Mat& R, const cv::Mat& t,
vector<cv::Point3d>& points)
{
cv::Mat T1 = (cv::Mat_<float> (3,4) <<
1,0,0,0,
0,1,0,0,
0,0,1,0);
cv::Mat T2 = (cv::Mat_<float> (3,4) <<
R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), t.at<double>(0,0),
R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), t.at<double>(1,0),
R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), t.at<double>(2,0));
cv::Mat K = (cv::Mat_<double> (3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
vector<cv::Point2f> pts_1,pts_2;
for(cv::DMatch m : matches)
{
// 将像素坐标转换至相机坐标
pts_1.push_back(pixel2cam(keypoint_1[m.queryIdx].pt, K));
pts_2.push_back(pixel2cam(keypoint_2[m.trainIdx].pt, K));
}
cv::Mat pts_4d;
//第一个相机的3x4投影矩阵。
//第2个相机的3x4投影矩阵。
cv::triangulatePoints(T1,T2,pts_1,pts_2,pts_4d);
// 转换成非齐次坐标
for(int i = 0; i < pts_4d.cols; i++)
{
cv::Mat x = pts_4d.col(i);
x /= x.at<float>(3,0);// 归一化
cv::Point3d p(x.at<float>(0,0), x.at<float>(1,0), x.at<float>(2,0));
points.push_back(p);
}
}
cmake_minimum_required(VERSION 2.8)
project(triangulation)
set(CMAKE_BUILD_TYPE "Release")
set( CMAKE_CXX_STANDARD 14)
# 添加cmake模块以使用g2o
#list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)
find_package(OpenCV 3.1 REQUIRED)
# find_package( OpenCV REQUIRED ) # use this if in OpenCV2
include_directories("/usr/include/eigen3")
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(triangulation src/triangulation.cpp)
target_link_libraries(triangulation ${OpenCV_LIBRARIES})
/home/bupo/my_study/slam14/slam14_my/cap7/triangulation/cmake-build-debug/triangulation
读取src文件下的图片
[ INFO:0] Initialize OpenCL runtime...
-- Max dist : 95.000000
-- Min dist : 7.000000
一共找到了81组匹配点
fundamental_matrix is
[5.435453065936294e-06, 0.0001366043242989641, -0.02140890086948122;
-0.0001321142229824704, 2.339475702778057e-05, -0.006332906454396256;
0.02107630352202776, -0.00366683395295285, 1]
essential_matrix is
[0.01724015832721737, 0.3280543357941316, 0.04737477831442286;
-0.3243229585962945, 0.03292958445202431, -0.6262554366073029;
-0.005885857752317864, 0.6253830041920339, 0.01531678649092664]
homography_matrix is
[0.91317517918067, -0.1092435315821776, 29.95860009981271;
0.02223560352310949, 0.9826008005061946, 6.50891083956826;
-0.0001001560381023939, 0.0001037779436396116, 1]
R is
[0.9985534106102478, -0.05339308467584829, 0.006345444621109364;
0.05321959721496342, 0.9982715997131746, 0.02492965459802013;
-0.007665548311698023, -0.02455588961730218, 0.9996690690694515]
t is
[-0.8829934995085557;
-0.05539655431450295;
0.4661048182498381]
point in the first camera frame: [-0.0136303, -0.302687]
point projected from 3D [-0.0133075, -0.300258], d=66.0186
point in the second camera frame: [-0.00403148, -0.270058]
point reprojected from second frame: [-0.004225950933477142, -0.2724864380718128, 1]
point in the first camera frame: [-0.153772, -0.0742802]
point projected from 3D [-0.153775, -0.0744559], d=21.0728
point in the second camera frame: [-0.180649, -0.0589251]
point reprojected from second frame: [-0.1806548929627975, -0.05875349311330996, 1]
...(一样这里应该有81个输出,这里就省去了,只留下三个参考)
point in the first camera frame: [0.111923, 0.146583]
point projected from 3D [0.111961, 0.146161], d=13.3843
point in the second camera frame: [0.0431343, 0.167215]
point reprojected from second frame: [0.04307433511399977, 0.1676208349311905, 1]
进程已结束,退出代码0
所需要的文件以及我的工程自取:链接:https://pan.baidu.com/s/1NJ-hd2O2Jo8RhcH90iXv-A
提取码:xwc3
注释参考文章SLAM十四讲-ch7(2)-位姿估计(包含2d-2d、3d-2d、3d-3d、以及三角化实现代码的注释)
// Created by nnz on 2022/4/5
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
//该程序用了三种方法实现位姿估计
//第一种,调用cv的函数pnp求解 R ,t
//第二种,手写高斯牛顿进行位姿优化
//第三种,利用g2o进行位姿优化
using namespace cv;
using namespace std;
//VecVector2d可以定义存放二维向量的容器
typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d;
//VecVector3d可以定义存放三维向量的容器
typedef vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>> VecVector3d;
void find_feature_matches(const cv::Mat& img_1, const cv::Mat& img_2,
std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch>& matches);
// 像素坐标转相机归一化坐标
cv::Point2d pixel2cam(const cv::Point2d& p, const cv::Mat& K);
// 利用g2o优化位姿和特征点
void bundleAdjustment(const vector<cv::Point3f> points_3d,
const vector<cv::Point2f> points_2d,
const cv::Mat& K,
cv::Mat& R, cv::Mat& t);
// BA by gauss-newton 手写高斯牛顿进行位姿优化
void bundleAdjustmentGaussNewton(const VecVector3d& points_3d,
const VecVector2d& points_2d,
const cv::Mat & K,
Sophus::SE3d& pose);
//对于用g2o来进行优化的话,首先要定义顶点和边的模板
//顶点,也就是咱们要优化的pose 用李代数表示它 6维
class Vertexpose: public g2o::BaseVertex<6,Sophus::SE3d>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW//必须写,我也不知道为什么
//重载setToOriginImpl函数 这个应该就是把刚开的待优化的pose放进去
virtual void setToOriginImpl() override
{
_estimate = Sophus::SE3d();
}
//重载oplusImpl函数,用来更新pose(待优化的系数)
virtual void oplusImpl(const double *update) override
{
Eigen::Matrix<double,6,1> update_eigen;//更新的量,就是增量呗,dx
update_eigen << update[0], update[1], update[2], update[3], update[4], update[5];
_estimate=Sophus::SE3d::exp(update_eigen)* _estimate;//更新pose 李代数要转换为李群,这样才可以左乘
}
//存盘 读盘 :留空
virtual bool read(istream &in) override {}
virtual bool write(ostream &out) const override {}
};
//定义边模板 边也就是误差,二维 并且把顶点也放进去
class EdgeProjection : public g2o::BaseUnaryEdge<2,Eigen::Vector2d,Vertexpose>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW;//必须写,我也不知道为什么
//有参构造,初始化 图1中的3d点 以及相机内参K
EdgeProjection(const Eigen::Vector3d &pos, const Eigen::Matrix3d &K) : _pos3d(pos),_K(K) {}
//计算误差
virtual void computeError() override
{
const Vertexpose *v=static_cast<const Vertexpose *>(_vertices[0]);
Sophus::SE3d T=v->estimate();
Eigen::Vector3d pos_pixel = _K * (T * _pos3d);//T * _pos3d是图2的相机坐标下的3d点
pos_pixel /= pos_pixel[2];//得到了像素坐标的齐次形式
_error = _measurement - pos_pixel.head<2>();
}
//计算雅克比矩阵
virtual void linearizeOplus() override
{
const Vertexpose *v = static_cast<Vertexpose *> (_vertices[0]);
Sophus::SE3d T = v->estimate();
Eigen::Vector3d pos_cam=T*_pos3d;//图2的相机坐标下的3d点
double fx = _K(0, 0);
double fy = _K(1, 1);
double cx = _K(0, 2);
double cy = _K(1, 2);
double X = pos_cam[0];
double Y = pos_cam[1];
double Z = pos_cam[2];
double Z2 = Z * Z;
//雅克比矩阵见 书 p187 公式7.46
_jacobianOplusXi
<< -fx / Z, 0, fx * X / Z2, fx * X * Y / Z2, -fx - fx * X * X / Z2, fx * Y / Z,
0, -fy / Z, fy * Y / (Z * Z), fy + fy * Y * Y / Z2, -fy * X * Y / Z2, -fy * X / Z;
}
//存盘 读盘 :留空
virtual bool read(istream &in) override {}
virtual bool write(ostream &out) const override {}
private:
Eigen::Vector3d _pos3d;
Eigen::Matrix3d _K;
};
//利用g2o优化pose
void bundleAdjustmentG2O(const VecVector3d& points_3d,
const VecVector2d& points_2d,
const cv::Mat & K,
Sophus::SE3d &pose);
int main(int argc, char ** argv)
{
cv::Mat img_1,img_2,depth1,depth2;
if(argc!=5)
{
//读取图片
img_1 = cv::imread("../src/1.png", CV_LOAD_IMAGE_COLOR);//读取彩色图
img_2 = cv::imread("../src/2.png",1);
//接下来的是建立3d点 利用深度图可以获取深度信息
//depth1是图1对应的深度图 depth2是图2对应的深度图
depth1 = cv::imread("../src/1_depth.png", CV_LOAD_IMAGE_UNCHANGED);// 深度图为16位无符号数,单通道图像
depth2 = cv::imread("../src/2_depth.png", -1);
}
else
{
//读取图片
img_1 = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR);//读取彩色图
img_2 = cv::imread(argv[2],1);
//接下来的是建立3d点 利用深度图可以获取深度信息
//depth1是图1对应的深度图 depth2是图2对应的深度图
depth1 = cv::imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);// 深度图为16位无符号数,单通道图像
depth2 = cv::imread(argv[4], -1);
}
vector<cv::KeyPoint> keypoints_1, keypoints_2;
vector<cv::DMatch> matches;
find_feature_matches (img_1, img_2, keypoints_1,keypoints_2, matches);//得到两个图片的特征匹配点
cout<<"一共找到了"<<matches.size() <<"组匹配点"<<endl;
// 建立3D点,把深度图信息读进来,构造三维点
//d1 = cv::imread(argv[3], -1);// 深度图为16位无符号数,单通道图像
cv::Mat K = (cv::Mat_<double>(3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );
vector<cv::Point3f> pts_3d;//创建容器pts_3d存放3d点(图1对应的特征点的相机坐标下的3d点)
vector<cv::Point2f> pts_2d;//创建容器pts_2d存放图2的特征点
for(cv::DMatch m : matches)
{
//把对应的图1的特征点的深度信息拿出来
ushort d = d1.ptr<unsigned short> (int (keypoints_1[m.queryIdx].pt.y))[int (keypoints_1[m.queryIdx].pt.x)];
if(d == 0) // bad depth
continue;
float dd = d/5000.0;//用dd存放换算过尺度的深度信息
cv::Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);//p1里面放的是图1特征点在相机坐标下的归一化坐标(只包含 x,y)
pts_3d.push_back(cv::Point3f (p1.x *dd, p1.y*dd,dd));//得到图1特征点在相机坐标下的3d坐标
pts_2d.push_back( keypoints_2[m.trainIdx].pt );//得到图2特张点的像素坐标
}
cout << "3d-2d pairs: " << pts_3d.size() << endl;//3d-2d配对个数得用pts_3d的size
cout<<"使用cv求解 位姿"<<endl;
cout<<"***********************************opencv***********************************"<<endl;
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
cv::Mat r,t;
//Mat()这个参数指的是畸变系数向量
cv::solvePnP( pts_3d, pts_2d, K, cv::Mat(), r, t, false ); // 调用OpenCV 的 PnP 求解,可选择EPNP,DLS等方法
cv::Mat R;
cv::Rodrigues(r, R);//r为旋转向量形式,利用cv的Rodrigues()函数将旋转向量转换为旋转矩阵
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2-t1);
cout << "solve pnp in opencv cost time: " << time_used.count() << " seconds." << endl;
cout<<"R="<<endl<<R<<endl;
cout<<"t="<<endl<<t<<endl;
cout<<"calling bundle adjustment"<<endl;
bundleAdjustment(pts_3d, pts_2d, K, R, t);
cout<<"***********************************opencv***********************************"<<endl;
cout<<"手写高斯牛顿优化位姿"<<endl;
cout<<"***********************************手写高斯牛顿***********************************"<<endl;
VecVector3d pts_3d_eigen; //存放3d点(图1对应的特征点的相机坐标下的3d点)
VecVector2d pts_2d_eigen; //存放图2的特征点
for (size_t i =0; i < pts_3d.size(); i++)//size_t
{
pts_3d_eigen.push_back(Eigen::Vector3d(pts_3d[i].x, pts_3d[i].y,pts_3d[i].z));
pts_2d_eigen.push_back(Eigen::Vector2d(pts_2d[i].x, pts_2d[i].y));
}
Sophus::SE3d pose_gn;//位姿(李群)
t1 = chrono::steady_clock::now();
bundleAdjustmentGaussNewton(pts_3d_eigen, pts_2d_eigen,K,pose_gn);
t2 = chrono::steady_clock::now();
time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "solve pnp by gauss newton cost time: " << time_used.count() << " seconds." << endl;
cout << "R = \n" << pose_gn.rotationMatrix() << endl;
cout << "t = " << pose_gn.translation().transpose() << endl;
cout<<"***********************************手写高斯牛顿***********************************"<<endl;
cout<<"g2o优化位姿"<<endl;
cout << "***********************************g2o***********************************" << endl;
Sophus::SE3d pose_g2o;
t1 = chrono::steady_clock::now();
bundleAdjustmentG2O(pts_3d_eigen,pts_2d_eigen,K, pose_g2o);
t2 = chrono::steady_clock::now();
time_used = chrono::duration_cast<chrono::duration<double>>(t2-t1);
cout << "solve pnp by g2o cost time: " << time_used.count() << " seconds." << endl;
cout << "***********************************g2o***********************************" << endl;
return 0;
}
//实现特征匹配
void find_feature_matches(const cv::Mat& img_1, const cv::Mat& img_2,
std::vector<cv::KeyPoint>& keypoints_1, std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch>& matches)
{
//-- 初始化
cv::Mat descriptors_1, descriptors_2;
// used in OpenCV3
cv::Ptr<cv::FeatureDetector> detector = cv::ORB::create();
cv::Ptr<cv::DescriptorExtractor> descriptor = cv::ORB::create();
// use this if you are in OpenCV2
// Ptr detector = FeatureDetector::create ( "ORB" );
// Ptr descriptor = DescriptorExtractor::create ( "ORB" );
cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
//-- 第一步:检测 Oriented FAST 角点位置
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
//-- 第二步:根据角点位置计算 BRIEF 描述子
descriptor->compute(img_1, keypoints_1, descriptors_1);
descriptor->compute(img_2, keypoints_2, descriptors_2);
//-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
vector<cv::DMatch> match;
// BFMatcher matcher ( NORM_HAMMING );
matcher->match(descriptors_1, descriptors_2, match);
//-- 第四步:匹配点对筛选
double min_dist = 10000, max_dist = 0;
//找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
for (int i = 0; i < descriptors_1.rows;i++)
{
double dist = match[i].distance;
if(dist > max_dist) max_dist = dist;
if(dist < min_dist) min_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);;
printf("-- Min dist : %f \n", min_dist);
//当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
for ( int i = 0; i < descriptors_1.rows; i++ )
{
if ( match[i].distance <= max ( 2*min_dist, 30.0 ))
{
matches.push_back ( match[i] );
}
}
}
//实现像素坐标到相机坐标的转换(求出来的只是包含相机坐标下的x,y的二维点)
cv::Point2d pixel2cam(const cv::Point2d& p, const cv::Mat& K)
{
return cv::Point2d(
((p.x - K.at<double>(0,2))/K.at<double>(0,0)),
((p.y - K.at<double>(1,2))/K.at<double>(1,1))
);
}
//手写高斯牛顿
// BA by gauss-newton 手写高斯牛顿进行位姿优化
void bundleAdjustmentGaussNewton(const VecVector3d& points_3d,
const VecVector2d& points_2d,
const cv::Mat & K,
Sophus::SE3d& pose)
{
typedef Eigen::Matrix<double, 6, 1> Vector6d;
const int iters = 10;//迭代次数
double cost = 0, lastcost = 0;//代价函数(目标函数)
//拿出内参
double fx = K.at<double>(0,0);
double fy = K.at<double>(1,1);
double cx = K.at<double>(0,2);
double cy = K.at<double>(1,2);
//进入迭代
for (int iter = 0; iter < iters; iter++)
{
Eigen::Matrix<double,6,6> H = Eigen::Matrix<double,6,6>::Zero();//初始化H矩阵
Vector6d b = Vector6d::Zero();//对b矩阵初始化
cost = 0;
// 遍历所有的特征点 计算cost
for(int i=0;i<points_3d.size();i++)
{
Eigen::Vector3d pc=pose*points_3d[i];//利用待优化的pose得到图2的相机坐标下的3d点
double inv_z=1.0/pc[2];//得到图2的相机坐标下的3d点的z的倒数,也就是1/z
double inv_z2 = inv_z * inv_z;//(1/z)^2
//定义投影
Eigen::Vector2d proj(fx * pc[0] / pc[2] + cx, fy * pc[1] / pc[2] + cy);
//定义误差
Eigen::Vector2d e=points_2d[i]-proj;
cost += e.squaredNorm();//cost=e*e
//定义雅克比矩阵J
Eigen::Matrix<double, 2, 6> J;
J << -fx * inv_z,
0,
fx * pc[0] * inv_z2,
fx * pc[0] * pc[1] * inv_z2,
-fx - fx * pc[0] * pc[0] * inv_z2,
fx * pc[1] * inv_z,
0,
-fy * inv_z,
fy * pc[1] * inv_z2,
fy + fy * pc[1] * pc[1] * inv_z2,
-fy * pc[0] * pc[1] * inv_z2,
-fy * pc[0] * inv_z;
H += J.transpose() * J;
b += -J.transpose() * e;
}
//出了这个内循环,表述结束一次迭代的计算,接下来,要求pose了
Vector6d dx;//P129页 公式6.33 计算增量方程 Hdx=b
dx = H.ldlt().solve(b);//算出增量dx
//判断dx这个数是否有效
if(isnan(dx[0]))
{
cout << "result is nan!" << endl;
break;
}
//如果我们进行了迭代,且最后的cost>=lastcost的话,那就表明满足要求了,可以停止迭代了
if(iter > 0 && cost >= lastcost)
{
// cost increase, update is not good
cout << "cost: " << cost << ", last cost: " << lastcost << endl;
break;
}
//优化pose 也就是用dx更新pose
pose = Sophus::SE3d::exp(dx) * pose; //dx是李代数,要转换为李群
lastcost=cost;
cout << "iteration " << iter << " cost="<< std::setprecision(12) << cost << endl;
//std::setprecision(12)浮点数控制位数为12位
//如果误差特别小了,也结束迭代
if (dx.norm() < 1e-6)
{
// converge
break;
}
}
cout<<"pose by g-n \n"<<pose.matrix()<<endl;
}
//利用g2o优化pose
void bundleAdjustmentG2O(const VecVector3d& points_3d,
const VecVector2d& points_2d,
const cv::Mat & K,
Sophus::SE3d &pose)
{
// 构建图优化,先设定g2o
typedef g2o::BlockSolver<g2o::BlockSolverTraits<6,3>> Block;// pose 维度为 6, landmark 维度为 3
Block::LinearSolverType* linearSolver = new g2o::LinearSolverCSparse<Block::PoseMatrixType>(); // 线性方程求解器
Block* solver_ptr = new Block (unique_ptr<Block::LinearSolverType>(linearSolver)); // 矩阵块求解器
// 梯度下降方法,可以从GN, LM, DogLeg 中选
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(unique_ptr<Block>(solver_ptr));
g2o::SparseOptimizer optimizer; // 图模型
optimizer.setAlgorithm(solver); // 设置求解器 算法g-n
optimizer.setVerbose(false); // 取消调试输出
//加入顶点
Vertexpose *v=new Vertexpose();
v->setEstimate(Sophus::SE3d());
v->setId(0);
optimizer.addVertex(v);
//K
// K
Eigen::Matrix3d K_eigen;
K_eigen <<
K.at<double>(0, 0), K.at<double>(0, 1), K.at<double>(0, 2),
K.at<double>(1, 0), K.at<double>(1, 1), K.at<double>(1, 2),
K.at<double>(2, 0), K.at<double>(2, 1), K.at<double>(2, 2);
//加入边
int index=1;
for(size_t i=0;i<points_2d.size();++i)
{
//遍历 把3d点和像素点拿出来
auto p2d = points_2d[i];
auto p3d = points_3d[i];
EdgeProjection *edge = new EdgeProjection(p3d, K_eigen);//有参构造
edge->setId(index);
edge->setVertex(0,v);
edge->setMeasurement(p2d);//设置观测值,其实就是图2 里的匹配特征点的像素位置
edge->setInformation(Eigen::Matrix2d::Identity());//信息矩阵是二维方阵,因为误差是二维
optimizer.addEdge(edge);//加入边
index++;//边的编号++
}
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
optimizer.setVerbose(false);
optimizer.initializeOptimization();//开始 初始化
optimizer.optimize(10);//迭代次数
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "optimization costs time: " << time_used.count() << " seconds." << endl;
cout << "pose estimated by g2o =\n" << v->estimate().matrix() << endl;
pose = v->estimate();
}
void bundleAdjustment(const vector<cv::Point3f> points_3d,
const vector<cv::Point2f> points_2d,
const cv::Mat& K,
cv::Mat& R, cv::Mat& t)
{
// 初始化g2o
typedef g2o::BlockSolver<g2o::BlockSolverTraits<6,3>> Block;// pose 维度为 6, landmark 维度为 3
Block::LinearSolverType* linearSolver = new g2o::LinearSolverCSparse<Block::PoseMatrixType>(); // 线性方程求解器
Block* solver_ptr = new Block (unique_ptr<Block::LinearSolverType>(linearSolver)); // 矩阵块求解器
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(unique_ptr<Block>(solver_ptr));
g2o::SparseOptimizer optimizer; // 图模型
optimizer.setAlgorithm( solver ); // 设置求解器
// vertex 有两个顶点,一个是优化位姿,一个是优化特征点的空间位置
g2o::VertexSE3Expmap* pose = new g2o::VertexSE3Expmap(); // camera pose
Eigen::Matrix3d R_mat;
R_mat <<
R.at<double> ( 0,0 ), R.at<double> ( 0,1 ), R.at<double> ( 0,2 ),
R.at<double> ( 1,0 ), R.at<double> ( 1,1 ), R.at<double> ( 1,2 ),
R.at<double> ( 2,0 ), R.at<double> ( 2,1 ), R.at<double> ( 2,2 );
pose->setId(0);
pose->setEstimate(g2o::SE3Quat(R_mat, Eigen::Vector3d(t.at<double>(0,0), t.at<double>(1,0), t.at<double>(2,0))));
optimizer.addVertex(pose);
int index = 1;
for (const cv::Point3f p:points_3d) // landmarks
{
g2o::VertexPointXYZ* point = new g2o::VertexPointXYZ();
point->setId(index++);
point->setEstimate(Eigen::Vector3d(p.x, p.y, p.z));
point->setMarginalized(true);// g2o 中必须设置 marg 参见第十讲内容
optimizer.addVertex(point);
}
// parameter: camera intrinsics
g2o::CameraParameters* camera = new g2o::CameraParameters (
K.at<double> ( 0,0 ), Eigen::Vector2d ( K.at<double> ( 0,2 ), K.at<double> ( 1,2 ) ), 0
);
camera->setId ( 0 );
optimizer.addParameter ( camera );
// edges
index = 1;
for ( const cv::Point2f p:points_2d )
{
g2o::EdgeProjectXYZ2UV* edge = new g2o::EdgeProjectXYZ2UV();
edge->setId ( index );
edge->setVertex ( 0, dynamic_cast<g2o::VertexPointXYZ*> ( optimizer.vertex ( index ) ) );
edge->setVertex ( 1, pose );
edge->setMeasurement ( Eigen::Vector2d ( p.x, p.y ) );
edge->setParameterId ( 0,0 );
// 信息矩阵:协方差矩阵之逆 这里为1表示加权为1
edge->setInformation ( Eigen::Matrix2d::Identity() );//这里的信息矩阵可以参考:http://www.cnblogs.com/gaoxiang12/p/5244828.html 里面有说明
optimizer.addEdge ( edge );
index++;
}
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
optimizer.setVerbose ( false );// 打开调试输出,也就是输出迭代信息的设置
optimizer.initializeOptimization();
optimizer.optimize ( 100 );
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>> ( t2-t1 );
cout<<"optimization costs time: "<<time_used.count() <<" seconds."<<endl;
cout<<endl<<"after optimization:"<<endl;
cout<<"T="<<endl<<Eigen::Isometry3d ( pose->estimate() ).matrix() <<endl;
}
cmake_minimum_required(VERSION 2.8)
project(pose_estimation_3d2d)
set( CMAKE_CXX_STANDARD 14)
#SET(G2O_LIBS g2o_cli g2o_ext_freeglut_minimal g2o_simulator g2o_solver_slam2d_linear g2o_types_icp g2o_types_slam2d g2o_core g2o_interface g2o_solver_csparse g2o_solver_structure_only g2o_types_sba g2o_types_slam3d g2o_csparse_extension g2o_opengl_helper g2o_solver_dense g2o_stuff g2o_types_sclam2d g2o_parser g2o_solver_pcg g2o_types_data g2o_types_sim3 cxsparse )
set( CMAKE_BUILD_TYPE Release )
# 添加cmake模块以使用g2o
list( APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules )
find_package(OpenCV 3.1 REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRECTORIES})
include_directories("/usr/include/eigen3")
find_package(G2O REQUIRED)
include_directories(${G2O_INCLUDE_DIRECTORIES})
find_package(CSparse REQUIRED)
include_directories(${CSPARSE_INCLUDE_DIR})
find_package(Sophus REQUIRED)
include_directories(${Sophus_INCLUDE_DIRS})
add_executable(pose_estimation_3d2d src/pose_estimation_3d2d.cpp)
target_link_libraries( pose_estimation_3d2d
${OpenCV_LIBS}
g2o_core g2o_stuff g2o_types_sba g2o_csparse_extension
g2o_types_slam3d
#${G2O_LIBS}
${CSPARSE_LIBRARY}
${Sophus_LIBRARIES} fmt
)
/home/bupo/shenlan/zuoye/cap7/pose_estimation_3d2d/cmake-build-debug/pose_estimation_3d2d 1.png 2.png 1_depth.png 2_depth.png
[ INFO:0] Initialize OpenCL runtime...
-- Max dist : 95.000000
-- Min dist : 7.000000
一共找到了81组匹配点
3d-2d pairs: 77
使用cv求解 位姿
***********************************opencv***********************************
solve pnp in opencv cost time: 0.00274987 seconds.
R=
[0.9979193252225089, -0.05138618904650331, 0.03894200717386666;
0.05033852907733834, 0.9983556574295412, 0.02742286944793203;
-0.04028712992734059, -0.02540552801469367, 0.9988651091656532]
t=
[-0.1255867099750398;
-0.007363525258777434;
0.06099926588678889]
calling bundle adjustment
optimization costs time: 0.0012403 seconds.
after optimization:
T=
0.997841 -0.0518393 0.0403291 -0.127516
0.0507013 0.9983 0.028745 -0.00947167
-0.0417507 -0.0266382 0.998773 0.0595037
0 0 0 1
***********************************opencv***********************************
手写高斯牛顿优化位姿
***********************************手写高斯牛顿***********************************
iteration 0 cost=44765.3537799
iteration 1 cost=431.695366816
iteration 2 cost=319.560037493
iteration 3 cost=319.55886789
pose by g-n
0.997919325221 -0.0513861890122 0.0389420072614 -0.125586710123
0.0503385290405 0.998355657431 0.0274228694545 -0.00736352527141
-0.0402871300142 -0.0254055280183 0.998865109162 0.0609992659219
0 0 0 1
solve pnp by gauss newton cost time: 0.000125385 seconds.
R =
0.997919325221 -0.0513861890122 0.0389420072614
0.0503385290405 0.998355657431 0.0274228694545
-0.0402871300142 -0.0254055280183 0.998865109162
t = -0.125586710123 -0.00736352527141 0.0609992659219
***********************************手写高斯牛顿***********************************
g2o优化位姿
***********************************g2o***********************************
optimization costs time: 0.000207278 seconds.
pose estimated by g2o =
0.997919325223 -0.0513861890471 0.0389420071732 -0.125586709974
0.0503385290779 0.998355657429 0.0274228694493 -0.007363525261
-0.0402871299268 -0.0254055280161 0.998865109166 0.0609992658862
0 0 0 1
solve pnp by g2o cost time: 0.000303667 seconds.
***********************************g2o***********************************
进程已结束,退出代码0
set( CMAKE_CXX_FLAGS "-std=c++11 -O3" )
调整为:
set( CMAKE_CXX_STANDARD 14)
然后是g2o使用问题,会报错FAILED: pose_estimation_3d2d /usr/bin/ld: CMakeFiles/pose_estimation_3d2d.dir/src/pose_estimation_3d2d.cpp.o: undefined reference to symbol '_ZTVN3g2o14VertexPointXYZE' //usr/local/lib/libg2o_types_slam3d.so: 无法添加符号: DSO missing from command line collect2: error: ld returned 1 exit status ninja: build stopped: subcommand failed.
这里DSO missing的原因基本上是因为有库没有连接上,导致未识别。最保险的方法是在最前端将所有库设置为G2O_LIBS:
SET(G2O_LIBS g2o_cli g2o_ext_freeglut_minimal g2o_simulator g2o_solver_slam2d_linear g2o_types_icp g2o_types_slam2d g2o_core g2o_interface g2o_solver_csparse g2o_solver_structure_only g2o_types_sba g2o_types_slam3d g2o_csparse_extension g2o_opengl_helper g2o_solver_dense g2o_stuff g2o_types_sclam2d g2o_parser g2o_solver_pcg g2o_types_data g2o_types_sim3 cxsparse )
并在随后对应的链接中加入${G2O_LIBS}:
target_link_libraries( pose_estimation_3d2d
${OpenCV_LIBS}
g2o_core g2o_stuff g2o_types_sba g2o_csparse_extension ${G2O_LIBS}
${CSPARSE_LIBRARY}
)
当然其实不需要把所有的都链接上,这样编译也会很慢,不set,然后只添加g2o_types_slam3d
,随便编译会警告,但是可以运行:
target_link_libraries( pose_estimation_3d2d
${OpenCV_LIBS}
g2o_core g2o_stuff g2o_types_sba g2o_csparse_extension g2o_types_slam3d#${G2O_LIBS}
${CSPARSE_LIBRARY}
)
bundleAdjustment
函数中的 // 初始化g2o
typedef g2o::BlockSolver< g2o::BlockSolverTraits<6,3> > Block; // pose 维度为 6, landmark 维度为 3
Block::LinearSolverType* linearSolver = new g2o::LinearSolverCSparse<Block::PoseMatrixType>(); // 线性方程求解器
Block* solver_ptr = new Block ( linearSolver ); // 矩阵块求解器
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg ( solver_ptr );
调整为:
// 初始化g2o
typedef g2o::BlockSolver< g2o::BlockSolverTraits<6,3> > Block; // pose 维度为 6, landmark 维度为 3
Block::LinearSolverType* linearSolver = new g2o::LinearSolverCSparse<Block::PoseMatrixType>(); // 线性方程求解器
Block* solver_ptr = new Block ( linearSolver ); // 矩阵块求解器
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg ( solver_ptr );
g2o::VertexSBAPointXYZ
替换为g2o::EdgeProjectXYZ2UV
bundleAdjustmentG2O()
函数),出现报错FAILED: pose_estimation_3d2d
: && /usr/bin/c++ -g -rdynamic CMakeFiles/pose_estimation_3d2d.dir/src/pose_estimation_3d2d.cpp.o -o pose_estimation_3d2d -Wl,-rpath,/usr/local/lib /usr/local/lib/libopencv_dnn.so.3.4.1 /usr/local/lib/libopencv_ml.so.3.4.1 /usr/local/lib/libopencv_objdetect.so.3.4.1 /usr/local/lib/libopencv_shape.so.3.4.1 /usr/local/lib/libopencv_stitching.so.3.4.1 /usr/local/lib/libopencv_superres.so.3.4.1 /usr/local/lib/libopencv_videostab.so.3.4.1 /usr/local/lib/libopencv_viz.so.3.4.1 -lg2o_core -lg2o_stuff -lg2o_types_sba -lg2o_csparse_extension -lg2o_cli -lg2o_ext_freeglut_minimal -lg2o_simulator -lg2o_solver_slam2d_linear -lg2o_types_icp -lg2o_types_slam2d -lg2o_core -lg2o_interface -lg2o_solver_csparse -lg2o_solver_structure_only -lg2o_types_sba -lg2o_types_slam3d -lg2o_csparse_extension -lg2o_opengl_helper -lg2o_solver_dense -lg2o_stuff -lg2o_types_sclam2d -lg2o_parser -lg2o_solver_pcg -lg2o_types_data -lg2o_types_sim3 -lcxsparse -lcxsparse -lfmt /usr/local/lib/libopencv_calib3d.so.3.4.1 /usr/local/lib/libopencv_features2d.so.3.4.1 /usr/local/lib/libopencv_flann.so.3.4.1 /usr/local/lib/libopencv_highgui.so.3.4.1 /usr/local/lib/libopencv_photo.so.3.4.1 /usr/local/lib/libopencv_video.so.3.4.1 /usr/local/lib/libopencv_videoio.so.3.4.1 /usr/local/lib/libopencv_imgcodecs.so.3.4.1 /usr/local/lib/libopencv_imgproc.so.3.4.1 /usr/local/lib/libopencv_core.so.3.4.1 -lg2o_core -lg2o_types_sba -lg2o_csparse_extension -lg2o_cli -lg2o_ext_freeglut_minimal -lg2o_simulator -lg2o_solver_slam2d_linear -lg2o_types_icp -lg2o_types_slam2d -lg2o_interface -lg2o_solver_csparse -lg2o_solver_structure_only -lg2o_types_slam3d -lg2o_opengl_helper -lg2o_solver_dense -lg2o_types_sclam2d -lg2o_parser -lg2o_solver_pcg -lg2o_types_data -lg2o_types_sim3 -lcxsparse -lcxsparse -lfmt && :
CMakeFiles/pose_estimation_3d2d.dir/src/pose_estimation_3d2d.cpp.o:在函数‘ceres::internal::FixedArray<double, 6ul, std::allocator<double> >::operator[](unsigned long)’中:
/usr/local/include/ceres/internal/fixed_array.h:215:对‘google::LogMessageFatal::LogMessageFatal(char const*, int, google::CheckOpString const&)’未定义的引用
/usr/local/include/ceres/internal/fixed_array.h:215:对‘google::LogMessage::stream()’未定义的引用
/usr/local/include/ceres/internal/fixed_array.h:215:对‘google::LogMessageFatal::~LogMessageFatal()’未定义的引用
/usr/local/include/ceres/internal/fixed_array.h:215:对‘google::LogMessageFatal::~LogMessageFatal()’未定义的引用
CMakeFiles/pose_estimation_3d2d.dir/src/pose_estimation_3d2d.cpp.o:在函数‘std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >* google::MakeCheckOpString<unsigned long, unsigned long>(unsigned long const&, unsigned long const&, char const*)’中:
/usr/include/glog/logging.h:692:对‘google::base::CheckOpMessageBuilder::CheckOpMessageBuilder(char const*)’未定义的引用
/usr/include/glog/logging.h:694:对‘google::base::CheckOpMessageBuilder::ForVar2()’未定义的引用
/usr/include/glog/logging.h:695:对‘google::base::CheckOpMessageBuilder::NewString[abi:cxx11]()’未定义的引用
/usr/include/glog/logging.h:692:对‘google::base::CheckOpMessageBuilder::~CheckOpMessageBuilder()’未定义的引用
/usr/include/glog/logging.h:692:对‘google::base::CheckOpMessageBuilder::~CheckOpMessageBuilder()’未定义的引用
collect2: error: ld returned 1 exit status
ninja: build stopped: subcommand failed.
set( CMAKE_BUILD_TYPE Release )
./pose_estimation_3d2d: error while loading shared libraries: libg2o_opengl_helper.so: cannot open shared object file: No such file or directory
sudo gedit /etc/ld.so.conf
加入内容:
/usr/local/lib
然后执行命令:
sudo ldconfig
参考:
error while loading shared libraries: libg2o_core.so: cannot open shared object file: No such file o
G2O
库,还要注意CSparse
库//
// Created by czy on 2022/4/6.
//
#include
using namespace std;
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
void find_feature_matches(const cv::Mat &img_1, const cv::Mat &img_2,
std::vector<cv::KeyPoint>& keypoints_1,
std::vector<cv::KeyPoint>& keypoints_2,
std::vector<cv::DMatch> &matches);
// 像素坐标转相机归一化坐标
cv::Point2d pixel2cam(const cv::Point2d &p, const cv::Mat &K);
void pose_estimation_3d3d(const vector<cv::Point3f> &pts1,
const vector<cv::Point3f> &pts2,
cv::Mat &R, cv::Mat &t);
void bundleAdjustment(const vector<cv::Point3f> &pts1,
const vector<cv::Point3f> &pts2,
cv::Mat &R, cv::Mat &t);
int main(int argc, char** argv)
{
if(argc!=5)
{
cout<<" usage: pose_estimation_3d3d img1 img2 depth1 depth2 "<<endl;
return 1;
}
//读取图片
cv::Mat img_1 = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR);//读取彩色图
cv::Mat img_2 = cv::imread(argv[2],1);
vector<cv::KeyPoint> keypoints_1, keypoints_2;//容器keypoints_1, keypoints_2分别存放图1和图2的特征点
vector<cv::DMatch> matches;
//得到图1与图2的特征匹配点
find_feature_matches(img_1,img_2,keypoints_1,keypoints_2,matches);
cout << "一共找到了" << matches.size() << "组匹配点" << endl;
//接下来的是建立3d点 利用深度图可以获取深度信息
//depth1是图1对应的深度图 depth2是图2对应的深度图
cv::Mat depth1 = cv::imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);// 深度图为16位无符号数,单通道图像
cv::Mat depth2 = cv::imread(argv[4], -1);
//内参矩阵
cv::Mat K = (cv::Mat_<double>(3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
vector<cv::Point3f> pts1,pts2;
for(cv::DMatch m:matches)
{
//先把两图特征匹配点对应的深度拿出来
ushort d1 = depth1.ptr<unsigned short>(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
ushort d2 = depth2.ptr<unsigned short>(int(keypoints_1[m.trainIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
if(d1 == 0 || d2 == 0)//深度无效
continue;
cv::Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt,K);//得到图1的特征匹配点在其相机坐标下的x ,y
cv::Point2d p2 = pixel2cam(keypoints_2[m.trainIdx].pt,K);//得到图2的特征匹配点在其相机坐标下的x ,y
//对深度进行尺度变化得到真正的深度
float dd1 = float(d1)/5000.0;
float dd2 = float(d2)/5000.0;
//容器 pts_1与pts_2分别存放 图1中的特征匹配点其相机坐标下的3d点 和 图2中的特征匹配点其相机坐标下的3d点
pts1.push_back(cv::Point3f(p1.x*dd1, p1.y*dd1,dd1));
pts2.push_back(cv::Point3f(p2.x*dd2, p2.y*dd2,dd2));
}
//这样就可以得到 3d-3d的匹配点
cout << "3d-3d pairs: " << pts1.size() << endl;
cv::Mat R,t;
cout<<" ************************************ICP 3d-3d by SVD**************************************** "<<endl;
pose_estimation_3d3d(pts1,pts2,R,t);
cout << "ICP via SVD results: " << endl;
cout << "R = " << R << endl;
cout << "t = " << t << endl;
cout << "R^T = " << R.t() << endl;
cout << "t^T = " << -R.t() * t << endl;
cout<<" ************************************ICP 3d-3d by SVD**************************************** "<<endl;
cout<<endl;
cout<<" ************************************ICP 3d-3d by g2o**************************************** "<<endl;
bundleAdjustment(pts1, pts2, R, t);
cout<<"R= \n"<<R<<endl;
cout<<"t = "<< t.t() <<endl;
cout<<"验证 p2 = R*P1 +t "<<endl;
for (int i = 0; i < 5; i++) {
cout << "p1 = " << pts1[i] << endl;
cout << "p2 = " << pts2[i] << endl;
cout << "(R*p1+t) = " <<
R * (cv::Mat_<double>(3,1) << pts1[i].x, pts1[i].y, pts1[i].z) + t
<< endl;
cout << endl;
}
cout<<" ************************************ICP 3d-3d by g2o**************************************** "<<endl;
return 0;
}
void find_feature_matches(const cv::Mat &img_1, const cv::Mat &img_2,
std::vector<cv::KeyPoint> &keypoints_1,
std::vector<cv::KeyPoint> &keypoints_2,
std::vector<cv::DMatch> &matches) {
//-- 初始化
cv::Mat descriptors_1, descriptors_2;
// used in OpenCV3
cv::Ptr<cv::FeatureDetector> detector = cv::ORB::create();
cv::Ptr<cv::DescriptorExtractor> descriptor = cv::ORB::create();
// use this if you are in OpenCV2
// Ptr detector = FeatureDetector::create ( "ORB" );
// Ptr descriptor = DescriptorExtractor::create ( "ORB" );
cv::Ptr<cv::DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
//-- 第一步:检测 Oriented FAST 角点位置
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
//-- 第二步:根据角点位置计算 BRIEF 描述子
descriptor->compute(img_1, keypoints_1, descriptors_1);
descriptor->compute(img_2, keypoints_2, descriptors_2);
//-- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
vector<cv::DMatch> match;
// BFMatcher matcher ( NORM_HAMMING );
matcher->match(descriptors_1, descriptors_2, match);
//-- 第四步:匹配点对筛选
double min_dist = 10000, max_dist = 0;
//找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
for (int i = 0; i < descriptors_1.rows; i++) {
double dist = match[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
for (int i = 0; i < descriptors_1.rows; i++) {
if (match[i].distance <= max(2 * min_dist, 30.0)) {
matches.push_back(match[i]);
}
}
}
//实现像素坐标到相机坐标的转换(求出来的只是包含相机坐标下的x,y的二维点)
cv::Point2d pixel2cam(const cv::Point2d &p, const cv::Mat &K) {
return cv::Point2d
(
(p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
(p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
);
}
void pose_estimation_3d3d(const vector<cv::Point3f> &pts1,
const vector<cv::Point3f> &pts2,
cv::Mat &R, cv::Mat &t)
{
int N = pts1.size();//匹配的3d点个数
cv::Point3f p1,p2;//质心
for (int i = 0; i < N; i++)
{
p1+=pts1[i];
p2+=pts2[2];
}
p1 = cv::Point3f(cv::Vec3f(p1)/N);//得到质心
p2 = cv::Point3f(cv::Vec3f(p2)/N);
vector<cv::Point3f> q1(N), q2(N);
for(int i = 0; i < N; i++)
{
//去质心
q1[i] = pts1[i] - p1;
q2[i] = pts2[i] - p2;
}
//计算 W+=q1*q2^T(求和)
Eigen::Matrix3d W = Eigen::Matrix3d::Zero();//初始化
for(int i = 0; i < N; i++)
{
W += Eigen::Vector3d(q1[i].x,q1[i].y,q1[i].z) * Eigen::Vector3d(q2[i].x,q2[i].y,q2[i].z).transpose();
}
cout << "W = " << endl << W << endl;
//利用svd分解 W=U*sigema*V
//Eigen::ComputeFullU : 在JacobiSVD中使用,表示要计算方阵U
//Eigen::ComputeFullV : 在JacobiSVD中使用,表示要计算方阵V
Eigen::JacobiSVD<Eigen::Matrix3d> svd(W,Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::Matrix3d U = svd.matrixU();//得到U矩阵
Eigen::Matrix3d V = svd.matrixV();//得到V矩阵
cout << "U=" << U << endl;
cout << "V=" << V << endl;
Eigen::Matrix3d R_ = U * (V.transpose());
if(R_.determinant() < 0)//若旋转矩阵R_的行列式<0 则取负号
{
R_ = -R_;
}
//得到平移向量
Eigen::Vector3d t_ = Eigen::Vector3d (p1.x,p1.y,p1.z) - R_ * Eigen::Vector3d(p2.x,p2.y,p2.z);
//把 Eigen形式的 r 和 t_ 转换为CV 中的Mat格式
R = (cv::Mat_<double>(3,3) <<
R_(0, 0), R_(0, 1), R_(0, 2),
R_(1, 0), R_(1, 1), R_(1, 2),
R_(2, 0), R_(2, 1), R_(2, 2)
);
t = (cv::Mat_<double>(3, 1) << t_(0, 0), t_(1, 0), t_(2, 0));
}
//对于用g2o来进行优化的话,首先要定义顶点和边的模板
//顶点,也就是咱们要优化的pose 用李代数表示它 6维
class Vertexpose : public g2o::BaseVertex<6, Sophus::SE3d>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW;//必须写,我也不知道为什么
//重载setToOriginImpl函数 这个应该就是把刚开的待优化的pose放进去
virtual void setToOriginImpl() override
{
_estimate = Sophus::SE3d();
}
//重载oplusImpl函数,用来更新pose(待优化的系数)
virtual void oplusImpl(const double *update) override
{
Eigen::Matrix<double, 6,1> update_eigen;//更新的量,就是增量呗,dx
update_eigen << update[0], update[1], update[2], update[3], update[4], update[5];
_estimate = Sophus::SE3d::exp(update_eigen) * _estimate;//更新pose 李代数要转换为李群,这样才可以左乘
}
//存盘 读盘 :留空
virtual bool read(istream &in) override {}
virtual bool write(ostream &out) const override {}
};
//定义边
class EdgeProjectXYZRGBD : public g2o::BaseUnaryEdge<3, Eigen::Vector3d, Vertexpose>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
EdgeProjectXYZRGBD(const Eigen::Vector3d &point) : _point(point) {}//赋值这个是图1坐标下的3d点
//计算误差
virtual void computeError() override
{
const Vertexpose *v = static_cast<const Vertexpose*>(_vertices[0]);//顶点v
_error = _measurement - v->estimate() * _point;
}
//计算雅克比
virtual void linearizeOplus() override
{
const Vertexpose *v = static_cast<const Vertexpose *>(_vertices[0]);//顶点v
Sophus::SE3d T = v->estimate();//把顶点的待优化系数拿出来
Eigen::Vector3d xyz_trans = T * _point;//变换到图2下的坐标点
//课本2 p86 and p199
_jacobianOplusXi.block<3,3>(0,0) = -Eigen::Matrix3d::Identity();
_jacobianOplusXi.block<3,3>(0,3) = Sophus::SO3d::hat(xyz_trans);
}
//存盘 读盘 :留空
virtual bool read(istream &in) override {}
virtual bool write(ostream &out) const override {}
protected:
Eigen::Vector3d _point;
};
//利用g2o
void bundleAdjustment(const vector<cv::Point3f> &pts1,
const vector<cv::Point3f> &pts2,
cv::Mat &R, cv::Mat &t)
{
// 构建图优化,先设定g2o
typedef g2o::BlockSolver<g2o::BlockSolverTraits<6,3>> Block;// pose 维度为 6, landmark 维度为 3
Block::LinearSolverType* linearSolver = new g2o::LinearSolverCSparse<Block::PoseMatrixType>(); // 线性方程求解器
Block* solver_ptr = new Block (unique_ptr<Block::LinearSolverType>(linearSolver)); // 矩阵块求解器
g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton(unique_ptr<Block>(solver_ptr));
g2o::SparseOptimizer optimizer;// 图模型
optimizer.setAlgorithm(solver);// 设置求解器
optimizer.setVerbose(false); // 调试输出
//加入顶点
Vertexpose *v = new Vertexpose();
v->setEstimate(Sophus::SE3d());
v->setId(0);
optimizer.addVertex(v);
//加入边
int index = 1;
for (size_t i = 0; i < pts1.size();i++)
{
EdgeProjectXYZRGBD *edge = new EdgeProjectXYZRGBD(Eigen::Vector3d(pts1[i].x,pts1[i].y,pts1[i].z));
edge->setId(index);//边的编号
edge->setVertex(0,v);//设置顶点 顶点编号
edge->setMeasurement(Eigen::Vector3d(pts2[i].x,pts2[i].y,pts2[i].z));
edge->setInformation(Eigen::Matrix3d::Identity());//set信息矩阵为单位矩阵 信息矩阵是3维方阵,因为误差是3维
optimizer.addEdge(edge);//加入边
index++;//边的编号++
}
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
optimizer.setVerbose(false);
optimizer.initializeOptimization();//开始 初始化
optimizer.optimize(10);//迭代次数
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "optimization costs time: " << time_used.count() << " seconds." << endl;
cout << endl << "after optimization:" << endl;
cout << "T=\n" << v->estimate().matrix() << endl;
// 把位姿转换为Mat类型
Eigen::Matrix3d R_ = v->estimate().rotationMatrix();
Eigen::Vector3d t_ = v->estimate().translation();
R = (cv::Mat_<double>(3,3) << R_(0, 0), R_(0, 1), R_(0, 2),
R_(1, 0), R_(1, 1), R_(1, 2),
R_(2, 0), R_(2, 1), R_(2, 2));
t = (cv::Mat_<double>(3, 1) << t_(0, 0), t_(1, 0), t_(2, 0));
}
cmake_minimum_required(VERSION 2.8)
project(pose_estimation_3d3d)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_BUILD_TYPE Release )
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake_modules)
# eigen3
include_directories("/usr/include/eigen3")
#opencv2
find_package(OpenCV 3.1 REQUIRED)
#g2o
find_package(G2O REQUIRED)
find_package(CSparse REQUIRED)
#Sophus
find_package(Sophus REQUIRED)
include_directories(
${OpenCV_INCLUDE_DIRECTORIES}
${G2O_INCLUDE_DIRECTORIES}
${CSPARSE_INCLUDE_DIR}
${Sophus_INCLUDE_DIRECTORIES}
)
add_executable(pose_estimation_3d3d src/pose_estimation_3d3d.cpp)
target_link_libraries(pose_estimation_3d3d
${OpenCV_LIBRARIES}
g2o_core g2o_stuff g2o_types_sba
g2o_csparse_extension
g2o_types_slam3d
${CSPARSE_LIBRARY}
${Sophus_LIBRARIES} fmt
)
/home/bupo/shenlan/zuoye/cap7/pose_estimation_3d3d/cmake-build-release/pose_estimation_3d3d ./src/1.png ./src/2.png ./src/1_depth.png ./src/2_depth.png
[ INFO:0] Initialize OpenCL runtime...
-- Max dist : 95.000000
-- Min dist : 7.000000
一共找到了81组匹配点
3d-3d pairs: 57
************************************ICP 3d-3d by SVD****************************************
W =
10.4543 -0.546814 6.16332
-1.64047 3.245 1.42499
3.16335 -4.96179 -1.15376
U= 0.961894 0.268057 -0.0539041
-0.109947 0.559704 0.821367
0.250343 -0.784141 0.567848
V= 0.8824 -0.0914481 -0.461527
-0.17002 0.852669 -0.494013
0.438707 0.514386 0.736847
ICP via SVD results:
R = [0.8491402669281291, 0.09165187733023789, 0.5201545351749511;
-0.5272840804832376, 0.09016969161333116, 0.8448910729693515;
0.03053367894919912, -0.9917002370152983, 0.1248932918680087]
t = [-0.2341765323736505;
-2.435024883544938;
1.678413644815799]
R^T = [0.8491402669281291, -0.5272840804832376, 0.03053367894919912;
0.09165187733023789, 0.09016969161333116, -0.9917002370152983;
0.5201545351749511, 0.8448910729693515, 0.1248932918680087]
t^T = [-1.136349276840491;
1.905511371012304;
1.969516166693823]
************************************ICP 3d-3d by SVD****************************************
************************************ICP 3d-3d by g2o****************************************
optimization costs time: 0.000511787 seconds.
after optimization:
T=
0.849513 -0.526723 0.0298586 -0.142711
0.0919136 0.0920349 -0.991505 1.65126
0.5195 0.84504 0.126598 1.84464
0 0 0 1
R=
[0.8495126322540705, -0.5267226536295997, 0.02985856316290605;
0.09191361179951504, 0.09203490651701673, -0.9915046464582867;
0.5194999283991564, 0.8450401304883673, 0.1265977972062496]
t = [-0.1427112070493184, 1.651257756926735, 1.844644951727316]
验证 p2 = R*P1 +t
p1 = [-0.0374123, -0.830816, 2.7448]
p2 = [-0.00596015, -0.399253, 1.4784]
(R*p1+t) = [0.3450717715204448;
-1.15012703155679;
1.470622311290227]
p1 = [-0.243698, -0.117719, 1.5848]
p2 = [-0.280475, -0.0914872, 1.5526]
(R*p1+t) = [-0.2404107766432324;
0.0466877051455481;
1.819198314809955]
p1 = [-0.627753, 0.160186, 1.3396]
p2 = [-1.03421, 0.231768, 2.0712]
(R*p1+t) = [-0.7203702645515666;
0.2800818852018891;
1.823481775685739]
p1 = [-0.627221, 0.101454, 1.3116]
p2 = [-0.984101, 0.138962, 1.941]
(R*p1+t) = [-0.6898191354798927;
0.3024874237171442;
1.770581847254317]
p1 = [0.402045, -0.341821, 2.2068]
p2 = [0.376626, -0.261343, 2.2068]
(R*p1+t) = [0.4447679999908566;
-0.5313007260737734;
2.044030900480374]
************************************ICP 3d-3d by g2o****************************************
进程已结束,退出代码0
由于之前的铺垫,本次工程遇到的报错较少,【解决问题】【SLAM十四讲第7讲】【关于自己创建工程,遇到的CSparse警告的解决方案】
摘自6.2
//cv::Mat &R, cv::Mat &t
// 把位姿转换为Mat类型
Eigen::Matrix3d R_ = v->estimate().rotationMatrix();
Eigen::Vector3d t_ = v->estimate().translation();
R = (cv::Mat_<double>(3,3) << R_(0, 0), R_(0, 1), R_(0, 2),
R_(1, 0), R_(1, 1), R_(1, 2),
R_(2, 0), R_(2, 1), R_(2, 2));
t = (cv::Mat_<double>(3, 1) << t_(0, 0), t_(1, 0), t_(2, 0));
摘自5.2
const cv::Mat & K,
Eigen::Matrix3d K_eigen;
K_eigen <<
K.at<double>(0, 0), K.at<double>(0, 1), K.at<double>(0, 2),
K.at<double>(1, 0), K.at<double>(1, 1), K.at<double>(1, 2),
K.at<double>(2, 0), K.at<double>(2, 1), K.at<double>(2, 2);
optimizer.setVerbose(false);
,其中true就是输出调式信息,false就是不输出,这个调试信息是库输出的,不需要自己编写,也就是每次迭代的时间等信息