## 原 cmake 3.5.1 安装 3.10以上
## 本人下载的最新版本,https://cmake.org/download/
## 下载cmake-3.22.1.tar.gz , 并解压 tar –xzvf cmake-3.22.1.tar.gz
## 可以移到你想放的地方,我放在了
megvii@mayunfei:/usr/local/cmake$ ls
cmake-3.22.1-linux-x86_64 cmake-3.22.1-linux-x86_64.tar.gz
## source 该目录即可使用,如果你一直想使用最新版本,可以将其放入 .bashrc 中
export PATH=/usr/local/cmake/cmake-3.22.1-linux-x86_64/bin:$PATH
## 查看是否成功
megvii@mayunfei:~$ cmake --version
cmake version 3.22.1
## pangolin 编译时一堆错误,后来发现是版本最新导致,v0.6 -> v0.5
### 三方库安装
sudo apt-get install libglew-dev
sudo apt-get install cmake
sudo apt-get install libboost-dev libboost-thread-dev libboost-filesystem-dev
### 下载及切换
git clone https://github.com/stevenlovegrove/Pangolin.git
cd Pangolin
git reset --hard 7987c9b
mkdir bui && cd bui
cmake ..
make -j4
sudo make install
# Thirdparty/DBoW2/CMakeLists.txt
find_package(OpenCV 4.0 QUIET)
## 替换为
set(OpenCV_DIR /opt/ros/kinetic/share/OpenCV-3.3.1-dev)
find_package(OpenCV REQUIRED)
# CMakeLists.txt
find_package(OpenCV 4.0)
## 替换为
set(OpenCV_DIR /opt/ros/kinetic/share/OpenCV-3.3.1-dev)
find_package(OpenCV REQUIRED)
# CMakeLists.txt
find_package(Eigen3 3.1.0 REQUIRED)
find_package(Pangolin REQUIRED)
find_package(realsense2)
include_directories(
${PROJECT_SOURCE_DIR}
${PROJECT_SOURCE_DIR}/include
${PROJECT_SOURCE_DIR}/include/CameraModels
${EIGEN3_INCLUDE_DIR}
${Pangolin_INCLUDE_DIRS}
)
## 修改为:
include_directories("/usr/include/eigen3")
#find_package(Eigen3 3.1.0 REQUIRED)
find_package(Pangolin REQUIRED)
find_package(realsense2)
include_directories(
${PROJECT_SOURCE_DIR}
${PROJECT_SOURCE_DIR}/include
${PROJECT_SOURCE_DIR}/include/CameraModels
#${EIGEN3_INCLUDE_DIR}
${Pangolin_INCLUDE_DIRS}
)
# 错误1
/home/megvii/visual_slam/src/ORB_SLAM3/src/LocalMapping.cc:629:49: error: no match for ‘operator/’ (operand types are ‘cv::Matx<float, 3, 1>’ and ‘float’)
x3D = x3D_h.get_minor<3,1>(0,0) / x3D_h(3);
## 替换:
// x3D = x3D_h.get_minor<3,1>(0,0) / x3D_h(3);
x3D = cv::Matx31f(x3D_h.get_minor<3,1>(0,0)(0) / x3D_h(3)
, x3D_h.get_minor<3,1>(0,0)(1) / x3D_h(3)
, x3D_h.get_minor<3,1>(0,0)(2) / x3D_h(3));
# 错误2
/home/megvii/visual_slam/src/ORB_SLAM3/src/CameraModels/KannalaBrandt8.cpp:534:41: error: no match for ‘operator/’ (operand types are ‘cv::Matx<float, 3, 1>’ and ‘float’)
x3D = x3D_h.get_minor<3,1>(0,0) / x3D_h(3);
## 替换:
// x3D = x3D_h.get_minor<3,1>(0,0) / x3D_h(3);
x3D = cv::Matx31f(x3D_h.get_minor<3,1>(0,0)(0) / x3D_h(3)
, x3D_h.get_minor<3,1>(0,0)(1) / x3D_h(3)
, x3D_h.get_minor<3,1>(0,0)(2) / x3D_h(3));
数据下载:
/***/MH01/mav0
修改好路径即可运行:
euroc_eval_examples.sh
:pathDatasetEuroc='/***'
euroc_examples.sh
:pathDatasetEuroc='/***'
运行,即简单的多了
单目案例:
#!/bin/bash
pathDatasetEuroc='/media/megvii/datasheets' #Example, it is necesary to change it by the dataset path
#-------------单个加载包--------------
# Monocular Examples
echo "Launching MH01 with Monocular sensor"
./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Monocular/EuRoC_TimeStamps/MH01.txt dataset-MH01_mono
## 参数:orb词袋路径+单目yaml+数据目录+时间戳.txt+dataset-MH01_mono
#-------------多个加载包--------------
# MultiSession Monocular Examples
echo "Launching Machine Hall with Monocular sensor"
./Monocular/mono_euroc ../Vocabulary/ORBvoc.txt ./Monocular/EuRoC.yaml "$pathDatasetEuroc"/MH01 ./Monocular/EuRoC_TimeStamps/MH01.txt "$pathDatasetEuroc"/MH02 ./Monocular/EuRoC_TimeStamps/MH02.txt "$pathDatasetEuroc"/MH03 ./Monocular/EuRoC_TimeStamps/MH03.txt "$pathDatasetEuroc"/MH04 ./Monocular/EuRoC_TimeStamps/MH04.txt "$pathDatasetEuroc"/MH05 ./Monocular/EuRoC_TimeStamps/MH05.txt dataset-MH01_to_MH05_mono
## 参数: orb词袋路径+单目yaml+数据目录1+时间戳1.txt+数据目录2+时间戳2.txt+...+dataset-MH01_mono
函数说明
主函数
///< 主函数
int main(int argc, char **argv)
{
if(argc < 5) // 至少5个路径,必须
{
cerr << endl << "Usage: ./mono_euroc path_to_vocabulary path_to_settings path_to_sequence_folder_1 path_to_times_file_1 (path_to_image_folder_2 path_to_times_file_2 ... path_to_image_folder_N path_to_times_file_N) (trajectory_file_name)" << endl;
return 1;
}
const int num_seq = (argc-3)/2; // 几组数据,3个必须+n组数据(图像目录+时间戳路径)
cout << "num_seq = " << num_seq << endl;
bool bFileName= (((argc-3) % 2) == 1); //想到于少了一个?
string file_name;
if (bFileName)
{
file_name = string(argv[argc-1]);
cout << "file name: " << file_name << endl;
}
// Load all sequences: 加载所有数
int seq;
vector< vector<string> > vstrImageFilenames;
vector< vector<double> > vTimestampsCam;
vector<int> nImages;
vstrImageFilenames.resize(num_seq);
vTimestampsCam.resize(num_seq);
nImages.resize(num_seq);
int tot_images = 0;
for (seq = 0; seq<num_seq; seq++)
{
cout << "Loading images for sequence " << seq << "...";
// 参数 3 5 7... ,数据目录+/mav0/cam0/data
// 参数 4 6 8... ,时间戳路径
// 生成的两个 每张图片的全路径+每个时间戳的具体值 vecotr
LoadImages(string(argv[(2*seq)+3]) + "/mav0/cam0/data", string(argv[(2*seq)+4]), vstrImageFilenames[seq], vTimestampsCam[seq]);
cout << "LOADED!" << endl;
nImages[seq] = vstrImageFilenames[seq].size();
tot_images += nImages[seq];
}
// Vector for tracking time statistics
vector<float> vTimesTrack;
vTimesTrack.resize(tot_images);// 总图片的个数
cout << endl << "-------" << endl;
cout.precision(17);
// Create SLAM system. It initializes all system threads and gets ready to process frames.
/// 创建 orb系统
ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::MONOCULAR, true);
for (seq = 0; seq<num_seq; seq++) // 每组数据
{
// Main loop
cv::Mat im;
int proccIm = 0;
for(int ni=0; ni<nImages[seq]; ni++, proccIm++) //一组数据的图片遍历
{
// Read image from file 读取图片,原图
im = cv::imread(vstrImageFilenames[seq][ni],cv::IMREAD_UNCHANGED);
double tframe = vTimestampsCam[seq][ni];
if(im.empty()) // 读取失败直接gg
{
cerr << endl << "Failed to load image at: "
<< vstrImageFilenames[seq][ni] << endl;
return 1;
}
#ifdef COMPILEDWITHC11
std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now();
#else
std::chrono::monotonic_clock::time_point t1 = std::chrono::monotonic_clock::now();
#endif
// Pass the image to the SLAM system 将图像传递给 SLAM 系统
SLAM.TrackMonocular(im,tframe); // 跟踪
#ifdef COMPILEDWITHC11
std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now();
#else
std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now();
#endif
#ifdef REGISTER_TIMES
double t_track = std::chrono::duration_cast<std::chrono::duration<double,std::milli> >(t2 - t1).count();
SLAM.InsertTrackTime(t_track);
#endif
double ttrack= std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
vTimesTrack[ni]=ttrack;
// Wait to load the next frame
double T=0;
if(ni<nImages[seq]-1)
T = vTimestampsCam[seq][ni+1]-tframe;
else if(ni>0)
T = tframe-vTimestampsCam[seq][ni-1];
if(ttrack<T)
usleep((T-ttrack)*1e6);
}
if(seq < num_seq - 1)
{
cout << "Changing the dataset" << endl;
SLAM.ChangeDataset();// slam切换数据包
}
}
// Stop all threads
SLAM.Shutdown();
// Save camera trajectory
if (bFileName) // 保存 关键帧 和 帧 位姿
{
const string kf_file = "kf_" + string(argv[argc-1]) + ".txt";
const string f_file = "f_" + string(argv[argc-1]) + ".txt";
SLAM.SaveTrajectoryEuRoC(f_file);
SLAM.SaveKeyFrameTrajectoryEuRoC(kf_file);
}
else
{
SLAM.SaveTrajectoryEuRoC("CameraTrajectory.txt");
SLAM.SaveKeyFrameTrajectoryEuRoC("KeyFrameTrajectory.txt");
}
return 0;
}
读取加载图片路径
// 打开时间文件,按每行读取时间,
/**
* @brief LoadImages 生成加载图片的路径
* @param strImagePath 图片目录
* @param strPathTimes 图片时间路径,每行一个时间
* @param vstrImages 每张图片的路径vector
* @param vTimeStamps 每张图片的时间戳,vector
*/
void LoadImages(const string &strImagePath, const string &strPathTimes,
vector<string> &vstrImages, vector<double> &vTimeStamps)
{
ifstream fTimes;
fTimes.open(strPathTimes.c_str());
vTimeStamps.reserve(5000);
vstrImages.reserve(5000);
while(!fTimes.eof())
{
string s;
getline(fTimes,s);
if(!s.empty())
{
stringstream ss;
ss << s;
vstrImages.push_back(strImagePath + "/" + ss.str() + ".png");
double t;
ss >> t;
vTimeStamps.push_back(t/1e9);
}
}
}
ros_mono_interial.cc
Function
主函数
int main(int argc, char **argv)
{
ros::init(argc, argv, "Mono_Inertial");
ros::NodeHandle n("~");
ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Info);
bool bEqual = false;
if(argc < 3 || argc > 4)
{
cerr << endl << "Usage: rosrun ORB_SLAM3 Mono_Inertial path_to_vocabulary path_to_settings [do_equalize]" << endl;
ros::shutdown();
return 1;
}
if(argc==4)
{
std::string sbEqual(argv[3]);
if(sbEqual == "true")
bEqual = true;
}
// Create SLAM system. It initializes all system threads and gets ready to process frames.
ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::IMU_MONOCULAR,true);
ImuGrabber imugb;
ImageGrabber igb(&SLAM,&imugb,bEqual); // TODO
// Maximum delay, 5 seconds
ros::Subscriber sub_imu = n.subscribe("/imu", 1000, &ImuGrabber::GrabImu, &imugb);
ros::Subscriber sub_img0 = n.subscribe("/camera/image_raw", 100, &ImageGrabber::GrabImage,&igb);
std::thread sync_thread(&ImageGrabber::SyncWithImu,&igb);
ros::spin();
return 0;
}
imu + image 抓取
class ImuGrabber
{
public:
ImuGrabber(){};
void GrabImu(const sensor_msgs::ImuConstPtr &imu_msg);
queue<sensor_msgs::ImuConstPtr> imuBuf;
std::mutex mBufMutex;
};
// 直接放入imubuf中
void ImuGrabber::GrabImu(const sensor_msgs::ImuConstPtr &imu_msg)
{
mBufMutex.lock();
imuBuf.push(imu_msg);
mBufMutex.unlock();
return;
}
class ImageGrabber
{
public:
ImageGrabber(ORB_SLAM3::System* pSLAM, ImuGrabber *pImuGb, const bool bClahe): mpSLAM(pSLAM), mpImuGb(pImuGb), mbClahe(bClahe){}
void GrabImage(const sensor_msgs::ImageConstPtr& msg);
cv::Mat GetImage(const sensor_msgs::ImageConstPtr &img_msg);
void SyncWithImu();
queue<sensor_msgs::ImageConstPtr> img0Buf;
std::mutex mBufMutex;
ORB_SLAM3::System* mpSLAM;
ImuGrabber *mpImuGb;
const bool mbClahe;
cv::Ptr<cv::CLAHE> mClahe = cv::createCLAHE(3.0, cv::Size(8, 8));
};
// 回调就是将其数据放入队列
void ImageGrabber::GrabImage(const sensor_msgs::ImageConstPtr &img_msg)
{
mBufMutex.lock();
if (!img0Buf.empty())
img0Buf.pop();
img0Buf.push(img_msg);
mBufMutex.unlock();
}
imu+image同步
void ImageGrabber::SyncWithImu()
{
while(1)
{
cv::Mat im;
double tIm = 0;
if (!img0Buf.empty()&&!mpImuGb->imuBuf.empty())
{
tIm = img0Buf.front()->header.stamp.toSec();
// 保证:imu 最新时间比 image 新
if(tIm>mpImuGb->imuBuf.back()->header.stamp.toSec())
continue;
{ // 取出图片
this->mBufMutex.lock();
im = GetImage(img0Buf.front());
img0Buf.pop();
this->mBufMutex.unlock();
}
// 取出 小于image时间的imu数据,{time,线加速度,角加速度}
vector<ORB_SLAM3::IMU::Point> vImuMeas;
mpImuGb->mBufMutex.lock();
if(!mpImuGb->imuBuf.empty())
{
// Load imu measurements from buffer
vImuMeas.clear();
while(!mpImuGb->imuBuf.empty() && mpImuGb->imuBuf.front()->header.stamp.toSec()<=tIm)
{
double t = mpImuGb->imuBuf.front()->header.stamp.toSec();
cv::Point3f acc(mpImuGb->imuBuf.front()->linear_acceleration.x, mpImuGb->imuBuf.front()->linear_acceleration.y, mpImuGb->imuBuf.front()->linear_acceleration.z);
cv::Point3f gyr(mpImuGb->imuBuf.front()->angular_velocity.x, mpImuGb->imuBuf.front()->angular_velocity.y, mpImuGb->imuBuf.front()->angular_velocity.z);
vImuMeas.push_back(ORB_SLAM3::IMU::Point(acc,gyr,t));
mpImuGb->imuBuf.pop();
}
}
mpImuGb->mBufMutex.unlock();
if(mbClahe)
mClahe->apply(im,im);
mpSLAM->TrackMonocular(im,tIm,vImuMeas);
}
std::chrono::milliseconds tSleep(1);
std::this_thread::sleep_for(tSleep);
}
}
// cv::CLAHE 是另外一种直方图均衡算法,CLAHE 和 AHE 的区别在于前者对区域对比度实行了限制,并且利用插值来加快计算。它能有效的增强或改善图像(局部)对比度,从而获取更多图像相关边缘信息有利于分割。还能够有效改善 AHE 中放大噪声的问题。另外,CLAHE 的有一个用途是被用来对图像去雾。