cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, flags);
函数返回:
objectPoints, imagePoints, cameraMatrix, distCoeffs 四个参数作为输入参数
rvec, tvec 作为输出参数
objectPoints是世界坐标系的三维坐标
imagePoints是图像上的二维点坐标
例如 使用标定好的单目相机拍摄一个矩形物体(已知相机内参&畸变系数),
objectPoints:使用量尺测量物体的左上角、右上角、左下角、右下角之间的距离,以任意一个点作为0点,建立世界坐标系,z值设为0,得到所有点的坐标则为objectPoints,保存在vector中。
imagePoints:在图像中找到物体的左上角、右上角、左下角、右下角四个点,其所有点的像素坐标为imagePoints,保存在vector中。
使用示例:
#include
#include
#include
int main() {
std::vector objectPoints; // 世界坐标系中的三维点
std::vector imagePoints; // 图像上的二维点
// 添加 objectPoints 和 imagePoints 的数据
// 创建相机内参数矩阵
cv::Mat cameraMatrix = (cv::Mat_(3,3) << fx, 0, cx, 0, fy, cy, 0, 0, 1);
// 创建相机畸变系数矩阵
cv::Mat distCoeffs = (cv::Mat_(1,5) << k1, k2, p1, p2, k3;
//完善内参参数&畸变系数参数
cv::Mat rvec; // 输出的旋转向量
cv::Mat tvec; // 输出的平移向量
bool success = cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
if (success) {
// 获取旋转向量和平移向量的结果
cv::Mat rotationMatrix;
cv::Rodrigues(rvec, rotationMatrix);
std::cout << "Rotation Vector:" << std::endl << rvec << std::endl;
std::cout << "Translation Vector:" << std::endl << tvec << std::endl;
std::cout << "Rotation Matrix:" << std::endl << rotationMatrix << std::endl;
}
return 0;
}
在使用solvePnP时,需注意objectPoints和imagePoints容器中的点坐标必须一一对应,例如只有四个点时,全部按左上角、右上角、左下角、右下角的顺序存放在容器中;如果顺序不相同,则最终输出值有误。
在笔者使用solvePnP时,拍摄物是四个定位圆,圆的像素坐标是通过opencv的SimpleBlobDetector识别的,识别以后的圆像素坐标是无序的,无法和objectPoints对应上,因此有了下述算法,
用于为四个二维坐标做冒泡排序得到左上角、右上角、左下角、右下角分别对应的点
(如果没有该需求,可以忽略此段)
int main() {
std::vector imagePoints; // 存放四个点的 vector
// 假设已经将四个点的坐标存入 imagePoints 中
// 寻找左上角、右上角、右下角和左下角对应的点
cv::Point2f topLeft, topRight, bottomRight, bottomLeft;
float minX = FLT_MAX, minY = FLT_MAX;
float maxX = FLT_MIN, maxY = FLT_MIN;
for (const auto& point : imagePoints) {
if (point.x <= minX && point.y <= minY) {
topLeft = point;
minX = point.x;
minY = point.y;
}
if (point.x >= maxX && point.y <= minY) {
topRight = point;
maxX = point.x;
minY = point.y;
}
if (point.x >= maxX && point.y >= maxY) {
bottomRight = point;
maxX = point.x;
maxY = point.y;
}
if (point.x <= minX && point.y >= maxY) {
bottomLeft = point;
minX = point.x;
maxY = point.y;
}
}
// 输出左上角、右上角、右下角和左下角对应的点的坐标
std::cout << "左上角坐标: (" << topLeft.x << ", " << topLeft.y << ")" << std::endl;
std::cout << "右上角坐标: (" << topRight.x << ", " << topRight.y << ")" << std::endl;
std::cout << "右下角坐标: (" << bottomRight.x << ", " << bottomRight.y << ")" << std::endl;
std::cout << "左下角坐标: (" << bottomLeft.x << ", " << bottomLeft.y << ")" << std::endl;
// 新建一个vector存放四个点坐标,按照objectPoints的存放顺序进行存放
std::vector imagePoints2;
imagePoints2.push_back(topLeft);
imagePoints2.push_back(topRight);
imagePoints2.push_back(bottomRight);
imagePoints2.push_back(bottomLeft);
return 0;
}
通过solvePnP得到旋转向量rvec和平移向量tvec后,可以计算相机到被测物中心的实际距离
#include
#include
#include
#include
int main() {
std::vector objectPoints; // 世界坐标系中的三维点
std::vector imagePoints; // 图像上的二维点
// 添加 objectPoints 和 imagePoints 的数据
// 创建相机内参数矩阵
cv::Mat cameraMatrix = (cv::Mat_(3,3) << fx, 0, cx, 0, fy, cy, 0, 0, 1);
// 创建相机畸变系数矩阵
cv::Mat distCoeffs = (cv::Mat_(1,5) << k1, k2, p1, p2, k3;
//完善内参参数&畸变系数参数
cv::Mat rvec; // 输出的旋转向量
cv::Mat tvec; // 输出的平移向量
bool success = cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
if (success) {
//计算相机距离被测物的实际距离
float distance = sqrt(tvec.at(0,0) * tvec.at(0,0) + tvec.at(1,0) * tvec.at(1,0) + tvec.at(2,0) * tvec.at(2,0)) / 10;
std::cout << "distance = "<< distance << std::endl;
}
return 0;
}