提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
`
PnP(Perspective-n-Point)是求解3D到2D点的对应方法。不论是相机和雷达的标定还是相机和相机的标定都可以使用PNP来解决,即通过不同坐标系下相同的点对求解变换矩阵。
这里相机多用棋盘格中的角点来实现点的提取。流行方法为张正友标定法,至于详细原理可点击我的博客https://www.cnblogs.com/tangjunjun/p/16240878.html查看,本博客主要使用代码
实现外参求解与相机标定的内参和畸变系数求解。
代码及标定图链接:https://pan.baidu.com/s/1ujX19IUV0EPSIMyIcBnClA?pwd=r63z (相机标定与外参求解.zip文件)
提取码:r63z
相机标定,求解内参与畸变系数,该部分需使用棋盘格采集数据获得参数,其代码如下:
# 相机标定,主要求内参和畸变系数
def calibration_camera(img_root, rand_count=20):
board_h = 8
board_w = 11
objp = np.zeros((board_h * board_w, 3), np.float32)
objp[:, :2] = np.mgrid[0:board_w, 0:board_h].T.reshape(-1, 2) # 将世界坐标系建在标定板上,所有点的Z坐标全部为0,所以只需要赋值x和y
objp = 60 * objp # 打印棋盘格一格的边长为2.6cm
obj_points = [] # 存储3D点
img_points = [] # 存储2D点
images = glob.glob(os.path.join(img_root, '*.jpg')) # 黑白棋盘的图片路径
random.shuffle(images)
if rand_count > len(images):
rand_count = len(images)
for fname in images[:rand_count]:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
size = gray.shape[::-1]
ret, corners = cv2.findChessboardCorners(gray, (board_w, board_h), None)
if ret:
obj_points.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1),
(cv2.TERM_CRITERIA_MAX_ITER | cv2.TERM_CRITERIA_EPS, 30, 0.001))
if [corners2]:
img_points.append(corners2)
else:
img_points.append(corners)
cv2.drawChessboardCorners(img, (board_w, board_h), corners, ret) # 记住,OpenCV的绘制函数一般无返回值
cv2.imshow("img", img)
cv2.waitKey(10)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, size, None, None)
# v_rot_mat, _ = cv2.Rodrigues(np.array(v_rot).reshape(-1))
# print("旋转矩阵=", v_rot_mat)
# print("内参=", mtx)
# print("畸变系数=", dist)
# print("旋转向量=", rvecs)
# print("平移向量=", tvecs)
# 反投影误差
total_error = 0
for i in range(len(obj_points)):
imgpoints2, _ = cv2.projectPoints(obj_points[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(img_points[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
total_error += error
# print("total error: ", total_error / len(obj_points))
return mtx, dist, total_error
通过畸变系数矫正,获得矫正的图像,代码如下:
# 消除畸变
def revise_img(img_root, mtx, dist):
mtx = np.array(mtx)
dist = np.array(dist).reshape(1, 5)
img = cv2.imread(img_root)
h, w = img.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
# undistort
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
x, y, w, h = roi
dst = dst[y:y + h, x:x + w]
cv2.imwrite('./revise_img.jpg', dst)
return dst
使用PnP方法求解外参,代码如下:
# 旋转向量和平移向量求解
def calibration_RT(points_3D, points_2D, cameraMatrix, distCoeffs):
points_3D = np.array(points_3D)
points_2D = np.array(points_2D)
cameraMatrix = np.array(cameraMatrix).astype(np.float32)
distCoeffs = np.array(distCoeffs).astype(np.float32)
_, rvecs, tvecs, inliers = cv2.solvePnPRansac(points_3D.reshape(-1, 1, 3),
points_2D.reshape(-1, 1, 2),
cameraMatrix,
distCoeffs
)
R, _ = cv2.Rodrigues(rvecs)
print('R:\n', R)
print('rvecs:\n', rvecs)
print('tvecs:\n', tvecs)
return R, tvecs
实验使用下图方格为一个单位,获得部分点进行参数标定实验。通过代码遍历每个方格点获得世界坐标系转像素坐标系的图,如下:
更具体内容可点击我的博客园