double RealTimeCorrelativeScanMatcher2D::Match(
const transform::Rigid2d& initial_pose_estimate,
const sensor::PointCloud& point_cloud, const Grid2D& grid,
transform::Rigid2d* pose_estimate)
输入 :
预计位姿(推算位子)
点云数据
概率栅格地图
估计位姿(引用输出)
返回:
最佳得分
步骤:
// 初始位姿的角度
//将激光转到初始位姿的角度——点云开始为:baselink坐标系
//确定查询窗口 —— 角度+空间平面
//得到旋转的点云——查询角度范围 ×2 /角度分辨率——点云:对各个角度旋转
//将点云转到当前位姿下——平移
//生成平移的候选值, 只记录 x,y的偏移
//求取得分——遍历候选值, 再遍历激光点,遍历激光点时将xy的偏移量传入
//取得分最大的候选值的——偏移量
//输入的初始位子 + 偏移量
void CeresScanMatcher2D::Match(const Eigen::Vector2d& target_translation,
const transform::Rigid2d& initial_pose_estimate,
const sensor::PointCloud& point_cloud,
const Grid2D& grid,
transform::Rigid2d* const pose_estimate,
ceres::Solver::Summary* const summary)
输入 :
预计位姿(推算位子)
scanMatch匹配后位姿
点云数据
概率栅格地图
优化后位姿(输出引用)
ceres 优化器
步骤:
构建观测残差——栅格不连续,三次卷积插值 使其连续可导
构建平移变化量残差
构建旋转变化量残差
ceres 求解,得pose+cost
返回: 最佳得分
https://www.jianshu.com/p/9a8089cf7d3f
https://blog.csdn.net/xiaoma_bk/article/details/85046905
参考:https://blog.csdn.net/datase/article/details/78163063
Beam Model我将它叫做测量光束模型。个人理解,它是一种完全的物理模型,只针对激光发出的测量光束建模。将一次测量误差分解为四个误差。
似然场模型,和测量光束模型相比,考虑了地图的因素。不再是对激光的扫描线物理建模,而是考虑测量到的物体的因素。
似然比模型本身是一个传感器观测模型,之所以可以实现扫描匹配,是通过划分栅格,步进的方式求的最大的Score,将此作为最佳的位姿。
(以下没有具体分析)
for k=1:size(zt,1)
if zt(k,2)>0
d = -grid_dim/2;
else
d = grid_dim/2;
end
phi = pi_to_pi(zt(k,2) + x(3));
if zt(k,1) ~= Z_max
ppx = [x(1),x(1) + zt(k,1)*cos(phi) + d];
ppy = [x(2),x(2) + zt(k,1)*sin(phi) + d];
end_points = [end_points;ppx(2),ppy(2)];
wm = likelihood_field_range_finder_model(X(j,:)',xsensor,...
zt(k,:)',nearest_wall, grid_dim, std_hit,Z_weights,Z_max);
W(j) = W(j) * wm;
else
dist = Z_max + std_hit*randn(1);
ppx = [x(1),x(1) + dist*cos(phi) + d];
ppy = [x(2),x(2) + dist*sin(phi) + d];
missed_points = [missed_points;ppx(2),ppy(2)];
end
set(handle_sensor_ray(k),'XData', ppx, 'YData', ppy)
end
function q = likelihood_field_range_finder_model(X,x_sensor,zt,N,dim,std_hit,Zw,z_max)
% retorna probabilidad de medida range finder :)
% X col, zt col, xsen col
[n,m] = size(N);
% Robot global position and orientation
theta = X(3);
% Beam global angle
theta_sen = zt(2);
phi = pi_to_pi(theta + theta_sen);
%Tranf matrix in case sensor has relative position respecto to robot's CG
rotS = [cos(theta),-sin(theta);sin(theta),cos(theta)];
% Prob. distros parameters
sigmaR = std_hit;
zhit = Zw(1);
zrand = Zw(2);
zmax = Zw(3);
% Actual algo
q = 1;
if zt(1) ~= z_max
% get global pos of end point of measument
xz = X(1:2) + rotS*x_sensor + zt(1)*[cos(phi);
sin(phi)];
xi = floor(xz(1)/dim) + 1;
yi = floor(xz(2)/dim) + 1;
% if end point doesn't lay inside map: unknown
if xi<1 || xi>n || yi<1 || yi>m
q = 1.0/z_max; % all measurements equally likely, uniform in range [0-zmax]
return
end
dist2 = N(xi,yi);
gd = gauss_1D(0,sigmaR,dist2);
q = zhit*gd + zrand/zmax;
end
end
》 https://blog.csdn.net/djfjkj52/article/details/106490787
蒙特卡洛方法
角度直方图
属于ICP系列,经典ICP方法,点到线距离ICP
% Fast scan matching, note this may get stuck in local minimas
function [pose, bestHits] = FastMatch(gridmap, scan, pose, searchResolution)
% Grid map information
metricMap = gridmap.metricMap;
ipixel = 1 / gridmap.pixelSize;
minX = gridmap.topLeftCorner(1);
minY = gridmap.topLeftCorner(2);
nCols = size(metricMap, 2);
nRows = size(metricMap, 1);
% Go down the hill
maxIter = 50;
maxDepth = 3;
iter = 0;
depth = 0;
pixelScan = scan * ipixel;
bestPose = pose;
bestScore = Inf;
t = searchResolution(1);
r = searchResolution(3);
while iter < maxIter
noChange = true;
% Rotation
for theta = pose(3) + [-r, 0, r]
ct = cos(theta);
st = sin(theta);
S = pixelScan * [ct, st; -st, ct];
% Translation
for tx = pose(1) + [-t, 0, t]
Sx = round(S(:,1)+(tx-minX)*ipixel) + 1;
for ty = pose(2) + [-t, 0, t]
Sy = round(S(:,2)+(ty-minY)*ipixel) + 1;
isIn = Sx>1 & Sy>1 & Sx maxDepth
break;
end
end
pose = bestPose;
iter = iter + 1;
end
正态分布变换
结合概率的方法