图像处理中一些涉及到像素位置变换的操作,比如去畸变、投影变换等,如果变换的模式固定(不会实时变化)则可以通过先计算一次map,然后实时运行只需要查找表(LUT),更加高效,而不是每次去畸变或变换都计算一次map。适用于批量处理场景,如视频流等。
这样做的前提是变换矩阵和内参矩阵不变。
initUndistortRectifyMap() 得到的MapX和MapY可以用于remap(),只用生成一次map
fisheye::estimateNewCameraMatrixForUndistortRectify(K, D, srcSize, Matx33d::eye(), newK, balance, srcSize, fovScale);
fisheye::initUndistortRectifyMap(K, D, Matx33d::eye(), newK, srcSize, CV_16SC2, MapX, MapY);
cv::remap(inputImg, undistorted, MapX, MapY, INTER_LINEAR, BORDER_CONSTANT);
投影变换的Map可以根据投影矩阵手工计算出来
// Calculate projection maps according to the H matrix.
for (int i = 0; i < camNum; i++)
{
cv::Mat inverseTransMatrix;
cv::invert(vecH[i], inverseTransMatrix);
// Generate the warp matrix
cv::Mat map_x, map_y, srcTM;
srcTM = inverseTransMatrix.clone(); // If WARP_INVERSE, set srcTM to transformationMatrix
map_x.create(imgSize, CV_32FC1);
map_y.create(imgSize, CV_32FC1);
double M11, M12, M13, M21, M22, M23, M31, M32, M33;
M11 = srcTM.at<double>(0,0);
M12 = srcTM.at<double>(0,1);
M13 = srcTM.at<double>(0,2);
M21 = srcTM.at<double>(1,0);
M22 = srcTM.at<double>(1,1);
M23 = srcTM.at<double>(1,2);
M31 = srcTM.at<double>(2,0);
M32 = srcTM.at<double>(2,1);
M33 = srcTM.at<double>(2,2);
for (int y = 0; y < imgSize.height; y++) {
double fy = (double)y;
for (int x = 0; x < imgSize.width; x++) {
double fx = (double)x;
double w = ((M31 * fx) + (M32 * fy) + M33);
w = w != 0.0f ? 1.f / w : 0.0f;
float new_x = (float)((M11 * fx) + (M12 * fy) + M13) * w;
float new_y = (float)((M21 * fx) + (M22 * fy) + M23) * w;
map_x.at<float>(y,x) = new_x;
map_y.at<float>(y,x) = new_y;
}
}
// fixed-point representation 转为定点计算效率更高(但实际测试效果不明显)
cv::Mat transformation_x, transformation_y;
transformation_x.create(imgSize, CV_16SC2);
transformation_y.create(imgSize, CV_16UC1);
cv::convertMaps(map_x, map_y, transformation_x, transformation_y, false);
vecMapHX[i] = transformation_x.clone();
vecMapHY[i] = transformation_y.clone();
在具有已知逆映射的两个映射的特殊情况下,另一种方法是手动计算映射。这种手动方法比双重映射方法更准确,因为它不涉及坐标映射的插值。大多数应用程序都符合这种特殊情况。因为第一个映射对应于图像去畸变(其逆操作是图像畸变,与一个众所周知的分析模型相关联),而第二个映射对应于一个透视转换(其逆可以解析地表示)。
for (int i = 0; i < camNum; i++)
{
int dst_width = imgSize.width, dst_height=imgSize.height;
Mat newK;
fisheye::estimateNewCameraMatrixForUndistortRectify(vecK.at(i), vecD.at(i), srcSize, Matx33d::eye(), newK, balance, srcSize, fovScale);
cv::Mat Hinv = vecH[i].inv(), Kinv=newK.inv();
cv::Mat map_undist_warped_x32f(dst_height, dst_width, CV_32F);
cv::Mat map_undist_warped_y32f(dst_height, dst_width, CV_32F);
for(int y=0; y<dst_height; ++y) {
std::vector<cv::Point3d> pts_undist_norm(dst_width);
for(int x=0; x<dst_width; ++x) {
cv::Mat_<double> pt(3,1); pt << x,y,1;
pt = Kinv*Hinv*pt;
pt(2) = pt(2) != 0.0f ? 1.f / pt(2) : 0.0f;
pts_undist_norm[x].x = pt(0) * pt(2);
pts_undist_norm[x].y = pt(1) * pt(2);
pts_undist_norm[x].z = 1;
}
std::vector<cv::Point2d> pts_dist;
fisheye::projectPoints(pts_undist_norm, pts_dist, Mat::zeros(3,1,CV_32F), Mat::zeros(3,1,CV_32F), vecK[i], vecD[i]);
for(int x=0; x<dst_width; ++x) {
map_undist_warped_x32f.at<float>(y,x) = pts_dist[x].x;
map_undist_warped_y32f.at<float>(y,x) = pts_dist[x].y;
}
}
// // CPU版本
// cv::Mat map_undist_warped_x16s,map_undist_warped_y16s;
// cv::convertMaps(map_undist_warped_x32f,map_undist_warped_y32f,map_undist_warped_x16s,map_undist_warped_y16s,CV_16SC2);
// vecDoubleMapX[i] = map_undist_warped_x16s.clone();
// vecDoubleMapY[i] = map_undist_warped_y16s.clone();
vecDoubleMapX[i] = map_undist_warped_x32f.clone();
vecDoubleMapY[i] = map_undist_warped_y32f.clone();
// GPU版本:使用Nppi库进行GPU加速处理需要把map传入device内存
Npp32f* d_xMap = NULL;
Npp32f* d_yMap = NULL;
int mapMemSize = imgSize.height * imgSize.width * sizeof(float);
cudaMalloc((void**)&d_xMap, mapMemSize);
cudaMalloc((void**)&d_yMap, mapMemSize);
cudaMemcpy(d_xMap, vecDoubleMapX[i].data, mapMemSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_yMap, vecDoubleMapY[i].data, mapMemSize, cudaMemcpyHostToDevice);
d_xMaps.push_back(d_xMap);
d_yMaps.push_back(d_yMap);
}
当Map难以手工计算时,可以直接对第一个map作用第二个map
cv::remap(map1_x, combined_mapx, map2_x, map2_y, cv::INTER_LINEAR);
cv::remap(map1_y, combined_mapy, map2_x, map2_y, cv::INTER_LINEAR);
// 融合后的map为:combined_mapx,combined_mapy,可以直接它们remap
cv::remap(input_img, res_img, combined_mapx, combined_mapy, cv::INTER_LINEAR);
但是这样存在的问题在于融合的map和独立做两次remap的结果会有略微差异,以下是分析:
https://stackoverflow.com/questions/29944709/how-to-combine-two-remap-operations-into-one