【双目标定和立体矫正】

双目标定和立体矫正

    • 双目标定和立体匹配
    • 单目标定
    • 单应矩阵
      • 单应矩阵代码一:
      • 单应矩阵代码二:
      • 双目单应矩阵代码一
      • 双目单应矩阵代码二

张正友标定法:

(1)首先求解单应矩阵,单应矩阵是相机内参矩阵与相机外参矩阵的混合矩阵;

(2)将混合矩阵分离成为相机内参矩阵和相机外参矩阵,首先求解相机内参矩阵;

(3)然后求解相机外参矩阵。

双目标定和立体匹配

步骤

  1. 采集左右视图的棋盘格数据
  2. 分别对单目进行标定, 得到每个相机的 内参数矩阵、 畸变系数 、旋转矩阵和 平移向量
  3. 使用 cv2.stereoCalibrate()函数进行双目校正,得到左右相机间的旋转矩阵、平移矩阵
  4. 计算立体校正的映射矩阵,进行左右视图的极限校正
import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
from PIL import Image

def getImageList(img_dir):
    # 获取图片文件夹位置,方便opencv读取
    # 参数:照片文件路径
    # 返回值:数组,每一个元素表示一张照片的绝对路径
    imgPath = []
    if os.path.exists(img_dir) is False:
        print('error dir')
    else:
        for parent, dirNames, fileNames in os.walk(img_dir):
            for fileName in fileNames:
                imgPath.append(os.path.join(parent, fileName))
    return imgPath

def getObjectPoints(m, n, k):
    # 计算真实坐标
    # 参数:内点行数,内点列, 标定板大小
    # 返回值:数组,(m*n行,3列),真实内点坐标
    objP = np.zeros(shape=(m * n, 3), dtype=np.float32)
    for i in range(m * n):
        objP[i][0] = i % m
        objP[i][1] = int(i / m)
    return objP * k

# 立体校正后画直线进行可视化
def drawLine(img, num=16):
    h, w, *_ = img.shape
    for i in range(0, h, h // num):
        cv2.line(img, (0, i), (w, i), (0, 255, 0), 3, 8)
    return img


def make_dir(dir_path):
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)


# 相机标定参数设定(单目,双目)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
criteria_stereo = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# 计算标定板真实坐标,标定板内点9*6,大小30mm*30mm
board_size = (11, 8, 30)
objPoint = getObjectPoints(board_size[0], board_size[1], board_size[2])

objPoints = []
imgPointsL = []
imgPointsR = []
# 相片路径,自行修改
imgPathL = './shuangmu_biaoding/11_10-01/left_1/'
imgPathR = './shuangmu_biaoding/11_10-01/right_1/'
filePathL = getImageList(imgPathL)
filePathR = getImageList(imgPathR)

findCorners_left_save = './shuangmu_biaoding/11_10-01/findCorners_left/'  # 保存左视图棋盘格检测的角点的可视化结果
make_dir(findCorners_left_save)
findCorners_right_save = './shuangmu_biaoding/11_10-01/findCorners_right/'    # 保存右视图棋盘格检测的角点的可视化结果
make_dir(findCorners_right_save)
file_save = './shuangmu_biaoding/11_10-01/files_save/'     # 用于保存得到的各种参数文件
make_dir(file_save)

for i in range(len(filePathL)):
    # 分别读取每张图片并转化为灰度图
    imgL = cv2.imread(filePathL[i])
    imgR = cv2.imread(filePathR[i])
    grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
    # opencv寻找角点
    retL, cornersL = cv2.findChessboardCorners(grayL, board_size[0:2], None)
    retR, cornersR = cv2.findChessboardCorners(grayR, board_size[0:2], None)
    if (retL & retR) is True:
        # opencv对真实坐标格式要求,vector>类型
        objPoints.append(objPoint)
        # 角点细化
        cornersL2 = cv2.cornerSubPix(grayL, cornersL, (11, 11), (-1, -1), criteria)
        cornersR2 = cv2.cornerSubPix(grayR, cornersR, (11, 11), (-1, -1), criteria)
        imgPointsL.append(cornersL2)
        imgPointsR.append(cornersR2)
        # # findHomography 函数是计算变换矩阵
        # # 参数cv2.RANSAC是使用RANSAC算法寻找一个最佳单应性矩阵H,即返回值M
        # # 返回值:M 为变换矩阵,mask是掩模
        # M, mask = cv2.findHomography(cornersL2, cornersR2, cv2.RANSAC, 5.0)
        # # ravel方法将数据降维处理,最后并转换成列表格式
        # matchesMask = mask.ravel().tolist()
        cv2.drawChessboardCorners(imgL, board_size[0:2], cornersL, retL)
        cv2.drawChessboardCorners(imgR, board_size[0:2], cornersR, retR)
        img_L_name = os.path.basename(filePathL[i])
        img_R_name = os.path.basename(filePathR[i])
        cv2.imwrite(findCorners_left_save+ img_L_name, imgL)    # 保存棋盘格检测的角点的可视化结果
        cv2.imwrite(findCorners_right_save+ img_R_name, imgR)   # 保存棋盘格检测的角点的可视化结果


# 对左右相机分别进行单目相机标定(复制时格式可能有点问题,用pycharm自动格式化)
# 标定结果 相机的内参数矩阵 畸变系数 旋转矩阵 平移向量
retL, cameraMatrixL, distMatrixL, RL, TL = cv2.calibrateCamera(objPoints, imgPointsL, grayL.shape[::-1], None, None)
retR, cameraMatrixR, distMatrixR, RR, TR = cv2.calibrateCamera(objPoints, imgPointsR, grayR.shape[::-1], None, None)
# 双目相机校正
retS, mLS, dLS, mRS, dRS, R, T, E, F = cv2.stereoCalibrate(objPoints, imgPointsL,
                                                           imgPointsR, cameraMatrixL,distMatrixL, cameraMatrixR, distMatrixR, grayL.shape[::-1],
                                                           criteria_stereo, flags=cv2.CALIB_USE_INTRINSIC_GUESS)

np.save(file_save+ 'cameraMatrixL', cameraMatrixL)
np.save(file_save+ 'distMatrixL', distMatrixL)
np.save(file_save+ 'RL', RL)
np.save(file_save+ 'TL', TL)

np.save(file_save+ 'cameraMatrixR', cameraMatrixR)
np.save(file_save+ 'distMatrixR', distMatrixR)
np.save(file_save+ 'RR', RR)
np.save(file_save+ 'TR', TR)

np.save(file_save+ 'mLS', mLS)
np.save(file_save+ 'dLS', dLS)
np.save(file_save+ 'mRS', mRS)
np.save(file_save+ 'dRS', dRS)
np.save(file_save+ 'R', R)
np.save(file_save+ 'T', T)

# mLS = np.array([[3.57779836e+03, 0.00000000e+00, 2.19388986e+03],
#                 [0.00000000e+00, 3.57627959e+03, 1.21979501e+03],
#                 [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
# dLS = np.array([[-6.69762889e-03,  -1.27925228e-01,  -2.71378037e-04 , -1.71752147e-03, 2.99125637e-01]])
# mRS = np.array([[3.58048460e+03,  0.00000000e+00 , 2.13889596e+03],
#                 [0.00000000e+00,  3.58498975e+03,  1.25564055e+03],
#                 [0.00000000e+00,  0.00000000e+00,  1.00000000e+00]])
# dRS = np.array([[-0.04610254 , -0.07880463,  0.00058363, -0.00497765,  0.2171918 ]])
# R = np.array([[ 0.94783553,  0.00787214, -0.31866256], [-0.01536381, 0.99966136, -0.02100307], [ 0.31838931,  0.02480332,  0.9476355 ]])
# T = np.array([[-307.01659062], [-9.08185422], [-62.94997066]])
mLS = np.load(file_save+ 'mLS.npy')
dLS = np.load(file_save+ 'dLS.npy')
mRS = np.load(file_save+ 'mRS.npy')
dRS = np.load(file_save+ 'dRS.npy')
R = np.load(file_save+ 'R.npy')
T = np.load(file_save+ 'T.npy')

img_shape = (4352, 2448)
# img_shape = grayL.shape[::-1]
# 利用stereoRectify()计算立体校正的映射矩阵
rectify_scale = 1  # 设置为0的话,对图片进行剪裁,设置为1则保留所有原图像像素
RL, RR, PL, PR, Q, roiL, roiR = cv2.stereoRectify(mLS, dLS, mRS, dRS,
                                                  img_shape, R, T,
                                                  rectify_scale, (0, 0))

# 利用initUndistortRectifyMap函数计算畸变矫正和立体校正的映射变换,实现极线对齐。
Left_Stereo_Map = cv2.initUndistortRectifyMap(mLS, dLS, RL, PL,
                                              img_shape, cv2.CV_16SC2)

Right_Stereo_Map = cv2.initUndistortRectifyMap(mRS, dRS, RR, PR,
                                               img_shape, cv2.CV_16SC2)

rectified_img_save = './shuangmu_biaoding/11_10-01/rectified_img_save/'   # 校正后将左右视图平铺在一起的大图保存
rectified_line_save = './shuangmu_biaoding/11_10-01/rectified_line_save/'
rectified_left_save = './shuangmu_biaoding/11_10-01/rectified_left_save/' # 校正的左视图保存
rectified_right_save = './shuangmu_biaoding/11_10-01/rectified_right_save/'   # 校正后的右视图保存
make_dir(rectified_img_save)
make_dir(rectified_line_save)
make_dir(rectified_left_save)
make_dir(rectified_right_save)

# 立体校正效果显示
for i in range(len(filePathL)):
    # 分别读取每张图片并转化为灰度图
    frameL = cv2.imread(filePathL[i],)
    frameR = cv2.imread(filePathR[i],)
    img_L_name = os.path.basename(filePathL[i])
    img_R_name = os.path.basename(filePathR[i])

    Left_rectified = cv2.remap(frameL, Left_Stereo_Map[0], Left_Stereo_Map[1], cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT,
                               0)  # 使用remap函数完成映射
    im_L = Image.fromarray(Left_rectified)  # numpy 转 image类

    Right_rectified = cv2.remap(frameR, Right_Stereo_Map[0], Right_Stereo_Map[1], cv2.INTER_LANCZOS4,
                                cv2.BORDER_CONSTANT, 0)
    im_R = Image.fromarray(Right_rectified)  # numpy 转 image 类
    res = np.hstack((Left_rectified, Right_rectified))  # 将左右视图以水平方向拼在一起
    cv2.imwrite(rectified_img_save+ img_L_name, res)
    cv2.imwrite(rectified_left_save+ img_L_name, Left_rectified)
    cv2.imwrite(rectified_right_save + img_R_name, Right_rectified)

    # 画直线进行可视化
    res1 = np.concatenate((Left_rectified, Right_rectified), axis=1).copy()
    img_line = drawLine(res1, 24)
    cv2.imwrite(rectified_line_save+ img_L_name, img_line)

单目标定

代码:

import os
import cv2
import numpy as np
import glob

# 找棋盘格角点
# 阈值
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# 棋盘格模板规格
w = 11  # 内角点个数,内角点是和其他格子连着的点
h = 8

# 世界坐标系中的棋盘格点,例如(0,0,0), (1,0,0), (2,0,0) ....,(8,5,0),去掉Z坐标,记为二维矩阵
objp = np.zeros((w * h, 3), np.float32)
objp[:, :2] = np.mgrid[0:w, 0:h].T.reshape(-1, 2)
# 储存棋盘格角点的世界坐标和图像坐标对
objpoints = []  # 在世界坐标系中的三维点
imgpoints = []  # 在图像平面的二维点

imgs_path = './shuangmu_biaoding/11_3/11_3_right_img/'
# images = glob.glob('shuangmu_biaoding/left/*.JPG')  # 标定所用图像
images = os.listdir(imgs_path)

findCorners_imgs_left = './shuangmu_biaoding/11_3/findCorners_11_3_right/'
if not os.path.exists(findCorners_imgs_left):
    os.makedirs(findCorners_imgs_left)

for fname in images:
    img = cv2.imread(imgs_path+ fname)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # cv2.imshow('findCorners', img)
    # 找到棋盘格角点
    # 棋盘图像(8位灰度或彩色图像)  棋盘尺寸  存放角点的位置
    ret, corners = cv2.findChessboardCorners(gray, (w, h), None)
    # 如果找到足够点对,将其存储起来
    if ret:
        # 角点精确检测
        # 输入图像 角点初始坐标 搜索窗口为2*winsize+1 死区 求角点的迭代终止条件
        cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
        objpoints.append(objp)
        imgpoints.append(corners)
        # 将角点在图像上显示
        cv2.drawChessboardCorners(img, (w, h), corners, ret)
        # cv2.imshow('findCorners', img)
        # cv2.waitKey(1000)
        cv2.imwrite(findCorners_imgs_left+ fname, img)
cv2.destroyAllWindows()
# 标定、去畸变
# 输入:世界坐标系里的位置 像素坐标 图像的像素尺寸大小 3*3矩阵,相机内参数矩阵 畸变矩阵
# 输出:标定结果 相机的内参数矩阵 畸变系数 旋转矩阵 平移向量
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
# mtx:内参数矩阵
# dist:畸变系数
# rvecs:旋转向量 (外参数)
# tvecs :平移向量 (外参数)
print(("ret:"), ret)
print(("mtx:\n"), mtx)  # 内参数矩阵
print(("dist:\n"), dist)  # 畸变系数   distortion cofficients = (k_1,k_2,p_1,p_2,k_3)
print(("rvecs:\n"), rvecs)  # 旋转向量  # 外参数
print(("tvecs:\n"), tvecs)  # 平移向量  # 外参数


# # 去畸变
# img2 = cv2.imread('shuangmu_biaoding/right/IMG_1596.JPG')
# h, w = img2.shape[:2]
# # 我们已经得到了相机内参和畸变系数,在将图像去畸变之前,
# # 我们还可以使用cv.getOptimalNewCameraMatrix()优化内参数和畸变系数,
# # 通过设定自由自由比例因子alpha。当alpha设为0的时候,
# # 将会返回一个剪裁过的将去畸变后不想要的像素去掉的内参数和畸变系数;
# # 当alpha设为1的时候,将会返回一个包含额外黑色像素点的内参数和畸变系数,并返回一个ROI用于将其剪裁掉
# newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 0, (w, h))  # 自由比例参数
#
# dst = cv2.undistort(img2, mtx, dist, None, newcameramtx)
# # 根据前面ROI区域裁剪图片
# x, y, w, h = roi
# dst = dst[y:y + h, x:x + w]
# cv2.imwrite('shuangmu_biaoding/calibresult_right.jpg', dst)

# 反投影误差
# 通过反投影误差,我们可以来评估结果的好坏。越接近0,说明结果越理想。
# 通过之前计算的内参数矩阵、畸变系数、旋转矩阵和平移向量,使用cv2.projectPoints()计算三维点到二维图像的投影,
# 然后计算反投影得到的点与图像上检测到的点的误差,最后计算一个对于所有标定图像的平均误差,这个值就是反投影误差。
total_error = 0
for i in range(len(objpoints)):
    imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
    error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
    total_error += error
print(("total error: "), total_error / len(objpoints))

error_value =total_error / len(objpoints)
output_list = [ret, mtx, dist, rvecs, tvecs, error_value]
with open('shuangmu_biaoding/11_3/output_right.txt', 'a')as f:
    f.write('ret'+'\t'+ 'mtx'+'\t'+ 'dist'+'\t'+ 'rvecs'+'\t'+ 'tvecs'+'\t'+ 'error'+ '\n'*2)
    for item in output_list:
        f.write(str(item)+ '\n'*2)

单应矩阵

单应矩阵代码一:

import cv2
import numpy as np

MIN_MATCH_COUNT = 10
img1 = cv2.imread('left_right_frame/129_left.png')          # queryImage
img2 = cv2.imread('left_right_frame/129_right.png')          # trainImage


# 通过SIFT算法寻找匹配的点
def SIFT():
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img2,None)
    kp2, des2 = sift.detectAndCompute(img1,None)
    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1,des2, k=2)
    # Apply ratio test
    good = []
    for m,n in matches:
        if m.distance < 0.75*n.distance:

          good.append(m)
    # cv2.drawMatchesKnn expects list of lists as matches.
    good_2 = np.expand_dims(good, 1)
    matching = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good_2[:20],None, flags=2)

    if len(good)>MIN_MATCH_COUNT:
        # 获取关键点的坐标
        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
        wrap = cv2.warpPerspective(img2, H, (img2.shape[1]+img2.shape[1] , img2.shape[0]+img2.shape[0]))
        wrap[0:img2.shape[0], 0:img2.shape[1]] = img1

        rows, cols = np.where(wrap[:,:,0] !=0)
        min_row, max_row = min(rows), max(rows) +1
        min_col, max_col = min(cols), max(cols) +1
        result = wrap[min_row:max_row,min_col:max_col,:]    # 去除黑色无用部分

        return matching, result

if __name__ == '__main__':
    matching, result = SIFT()
    # cv2.imshow('img3.jpg',matching)
    cv2.imwrite('left_right_frame/matching.jpg', matching)
    # cv2.imshow('shuangmu_biaoding/result.jpg',result)
    cv2.imwrite('left_right_frame/result.jpg', result)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    # cv2.waitKey(1)
    # cv2.waitKey(1)
    # cv2.waitKey(1)
    # cv2.waitKey(1)

单应矩阵代码二:

# orb特征点检测,暴力匹配
import cv2
import numpy as np
import matplotlib.pyplot as plt

# 也能输入RGB图像,不过匹配不到目标
train = cv2.imread('shuangmu_biaoding/11_3/11_3_left_img/9.JPG', 0)
query = cv2.imread('shuangmu_biaoding/11_3/11_3_right_img/9.JPG', 0)

exx = 1
exy = 1

# 不改变尺寸匹配不到
train = cv2.resize(train, dsize=None, fx=exx, fy=exy)
query = cv2.resize(query, dsize=None, fx=exx, fy=exy)

# 暴力匹配
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(train, None)
kp2, des2 = orb.detectAndCompute(query, None)

# 针对ORB算法 NORM_HAMMING 计算特征距离 True判断交叉验证
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# 特征描述子匹配
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)

# 改变数组的表现形式,不改变数据内容,数据内容是每个关键点的坐标位置
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)  # 测试图像的关键点的坐标位置
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)  # 样本图像的关键点的坐标位置
# findHomography 函数是计算变换矩阵
# 参数cv2.RANSAC是使用RANSAC算法寻找一个最佳单应性矩阵H,即返回值M
# 返回值:M 为变换矩阵,mask是掩模
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# ravel方法将数据降维处理,最后并转换成列表格式
matchesMask = mask.ravel().tolist()
# 获取train的图像尺寸
h, w = train.shape

# pts是图像train的四个顶点
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
# 计算变换后的四个顶点坐标位置
# 使用得到的变换矩阵对原图像的四个角进行变换,获得在目标图像上对应的坐标
dst = cv2.perspectiveTransform(pts, M)
# 根据四个顶点坐标位置在query图像画出变换后的边框
query = cv2.polylines(query, [np.int32(dst)], True, (255, 255, 255), 3, cv2.LINE_AA)

img3 = cv2.drawMatches(train, kp1, query, kp2, matches[:20], None, flags=2)
cv2.imwrite('Homography_test/000.png', img3)
plt.imshow(cv2.cvtColor(img3, cv2.COLOR_BGR2RGB))
plt.show()

双目单应矩阵代码一

import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
from PIL import Image

def getImageList(img_dir):
    # 获取图片文件夹位置,方便opencv读取
    # 参数:照片文件路径
    # 返回值:数组,每一个元素表示一张照片的绝对路径
    imgPath = []
    if os.path.exists(img_dir) is False:
        print('error dir')
    else:
        for parent, dirNames, fileNames in os.walk(img_dir):
            for fileName in fileNames:
                imgPath.append(os.path.join(parent, fileName))
    return imgPath

def getObjectPoints(m, n, k):
    # 计算真实坐标
    # 参数:内点行数,内点列, 标定板大小
    # 返回值:数组,(m*n行,3列),真实内点坐标
    objP = np.zeros(shape=(m * n, 3), dtype=np.float32)
    for i in range(m * n):
        objP[i][0] = i % m
        objP[i][1] = int(i / m)
    return objP * k



#
# # 相机标定参数设定(单目,双目)
# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# criteria_stereo = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# # 计算标定板真实坐标,标定板内点9*6,大小30mm*30mm
# board_size = (11, 8, 30)
# objPoint = getObjectPoints(board_size[0], board_size[1], board_size[2])
#
# objPoints = []
# imgPointsL = []
# imgPointsR = []
# # 相片路径,自行修改
# imgPathL = './shuangmu_biaoding/11_10-01/left_1/'
# imgPathR = './shuangmu_biaoding/11_10-01/right_1/'
# filePathL = getImageList(imgPathL)
# filePathR = getImageList(imgPathR)
#
# findCorners_left_save = './shuangmu_biaoding/11_10-01/findCorners_left/'  # 保存左视图棋盘格检测的角点的可视化结果
# if not os.path.exists(findCorners_left_save):
#     os.makedirs(findCorners_left_save)
# findCorners_right_save = './shuangmu_biaoding/11_10-01/findCorners_right/'    # 保存右视图棋盘格检测的角点的可视化结果
# if not os.path.exists(findCorners_right_save):
#     os.makedirs(findCorners_right_save)
file_save = './Homography_test/11_10-01/files_save/'     # 用于保存得到的各种参数文件
# if not os.path.exists(file_save):
#     os.makedirs(file_save)
#
# for i in range(len(filePathL)):
#     # if i == 3:
#     #     break
#     # 分别读取每张图片并转化为灰度图
#     imgL = cv2.imread(filePathL[i])
#     imgR = cv2.imread(filePathR[i])
#     grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
#     grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
#     # opencv寻找角点
#     retL, cornersL = cv2.findChessboardCorners(grayL, board_size[0:2], None)
#     retR, cornersR = cv2.findChessboardCorners(grayR, board_size[0:2], None)
#     if (retL & retR) is True:
#         # opencv对真实坐标格式要求,vector>类型
#         objPoints.append(objPoint)
#         # 角点细化
#         cornersL2 = cv2.cornerSubPix(grayL, cornersL, (11, 11), (-1, -1), criteria)
#         cornersR2 = cv2.cornerSubPix(grayR, cornersR, (11, 11), (-1, -1), criteria)
#         imgPointsL.append(cornersL2)
#         imgPointsR.append(cornersR2)
#         # findHomography 函数是计算变换矩阵
#         # 参数cv2.RANSAC是使用RANSAC算法寻找一个最佳单应性矩阵H,即返回值M
#         # 返回值:M 为变换矩阵,mask是掩模
#         M, mask = cv2.findHomography(cornersL2, cornersR2, cv2.RANSAC, 5.0)
#         # ravel方法将数据降维处理,最后并转换成列表格式
#         print(M)
#         print('------------------------------------------')
#         img_L_name = os.path.basename(filePathL[i])
#         name = img_L_name.split('.')[0]
#         np.save(file_save + name, M)

        # matchesMask = mask.ravel().tolist()
#         cv2.drawChessboardCorners(imgL, board_size[0:2], cornersL, retL)
#         cv2.drawChessboardCorners(imgR, board_size[0:2], cornersR, retR)
#         img_L_name = os.path.basename(filePathL[i])
#         img_R_name = os.path.basename(filePathR[i])
#         cv2.imwrite(findCorners_left_save+ img_L_name, imgL)    # 保存棋盘格检测的角点的可视化结果
#         cv2.imwrite(findCorners_right_save+ img_R_name, imgR)   # 保存棋盘格检测的角点的可视化结果
#
# imgPointsL = np.array(imgPointsL).reshape(-1, 1,2)
# imgPointsR = np.array(imgPointsR).reshape(-1, 1,2)
#
# M, mask = cv2.findHomography(imgPointsL, imgPointsR, cv2.RANSAC, 5.0)
# # M, mask = cv2.findHomography(imgPointsL, imgPointsR, cv2.RANSAC, 5.0, maxIters=10000)
# print('*'*20)
# print(M)
# np.save(file_save + 'totle_M', M)
# with open(file_save+ 'totle_M.txt', 'w')as f:
#     f.write(str(M))
# # 对左右相机分别进行单目相机标定(复制时格式可能有点问题,用pycharm自动格式化)
# # 标定结果 相机的内参数矩阵 畸变系数 旋转矩阵 平移向量
# retL, cameraMatrixL, distMatrixL, RL, TL = cv2.calibrateCamera(objPoints, imgPointsL, grayL.shape[::-1], None, None)
# retR, cameraMatrixR, distMatrixR, RR, TR = cv2.calibrateCamera(objPoints, imgPointsR, grayR.shape[::-1], None, None)
# M = np.array([[1.82211923e+00, -1.47284674e-01, -4.57158368e+03],
#                 [ 3.09248793e-01 , 1.67631352e+00 ,-9.68337402e+02],
#                 [ 2.08157564e-04, -1.47653122e-05 , 1.00000000e+00]])


dst_save = './Homography_test/11_10-01/dst_save/'
if not os.path.exists(dst_save):
    os.makedirs(dst_save)
files = os.listdir(file_save)
for file in files:
    M = np.load(file_save+file)
    # pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
    pts_left = np.float32([[1120, 410],[1120, 528], [1160, 528], [1160, 410],]).reshape(-1, 1, 2)
    pts_right = np.float32([[145, 405], [145, 540], [200, 540], [200, 405]]).reshape(-1, 1, 2)
    # 计算变换后的四个顶点坐标位置
    # 使用得到的变换矩阵对原图像的四个角进行变换,获得在目标图像上对应的坐标
    dst = cv2.perspectiveTransform(pts_left, M)
    # 根据四个顶点坐标位置在query图像画出变换后的边框
    print(dst)
    print('-----------------------------')
    file_name = file.split('.')[0]
    np.save(dst_save+file_name, dst)

双目单应矩阵代码二

import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
from PIL import Image

def getImageList(img_dir):
    # 获取图片文件夹位置,方便opencv读取
    # 参数:照片文件路径
    # 返回值:数组,每一个元素表示一张照片的绝对路径
    imgPath = []
    if os.path.exists(img_dir) is False:
        print('error dir')
    else:
        for parent, dirNames, fileNames in os.walk(img_dir):
            for fileName in fileNames:
                imgPath.append(os.path.join(parent, fileName))
    return imgPath

def getObjectPoints(m, n, k):
    # 计算真实坐标
    # 参数:内点行数,内点列, 标定板大小
    # 返回值:数组,(m*n行,3列),真实内点坐标
    objP = np.zeros(shape=(m * n, 3), dtype=np.float32)
    for i in range(m * n):
        objP[i][0] = i % m
        objP[i][1] = int(i / m)
    return objP * k


def make_dir(dir_path):
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

#
# # 相机标定参数设定(单目,双目)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
criteria_stereo = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# 计算标定板真实坐标,标定板内点9*6,大小30mm*30mm
board_size = (11, 8, 30)
objPoint = getObjectPoints(board_size[0], board_size[1], board_size[2])

objPoints = []
imgPointsL = []
imgPointsR = []
# 相片路径,自行修改
imgPathL = './shuangmu_biaoding/11_10-01/left_1/'
imgPathR = './shuangmu_biaoding/11_10-01/right_1/'
filePathL = getImageList(imgPathL)
filePathR = getImageList(imgPathR)

findCorners_left_save = './shuangmu_biaoding/11_10-01/findCorners_left/'  # 保存左视图棋盘格检测的角点的可视化结果
make_dir(findCorners_left_save)

findCorners_right_save = './shuangmu_biaoding/11_10-01/findCorners_right/'    # 保存右视图棋盘格检测的角点的可视化结果
make_dir(findCorners_right_save)

file_save = './Homography_test/11_10-01/files_save/'     # 用于保存得到的各种参数文件
make_dir(file_save)

H_save_img = './Homography_test/11_10-01/img_save/'
make_dir(H_save_img)

result_img_save = './Homography_test/11_10-01/result_img_save/'
make_dir(result_img_save)

for i in range(len(filePathL)):
    img_L_name = os.path.basename(filePathL[i])
    if img_L_name != '16.JPG':
        continue
    # 分别读取每张图片并转化为灰度图
    imgL = cv2.imread(filePathL[i])
    imgR = cv2.imread(filePathR[i])
    grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
    # opencv寻找角点
    retL, cornersL = cv2.findChessboardCorners(grayL, board_size[0:2], None)
    retR, cornersR = cv2.findChessboardCorners(grayR, board_size[0:2], None)
    if (retL & retR) is True:
        # opencv对真实坐标格式要求,vector>类型
        objPoints.append(objPoint)
        # 角点细化
        cornersL2 = cv2.cornerSubPix(grayL, cornersL, (11, 11), (-1, -1), criteria)
        cornersR2 = cv2.cornerSubPix(grayR, cornersR, (11, 11), (-1, -1), criteria)
        imgPointsL.append(cornersL2)
        imgPointsR.append(cornersR2)
        # findHomography 函数是计算变换矩阵
        # 参数cv2.RANSAC是使用RANSAC算法寻找一个最佳单应性矩阵H,即返回值M
        # 返回值:M 为变换矩阵,mask是掩模
        M, mask = cv2.findHomography(cornersL2, cornersR2, cv2.RANSAC, 5.0)
        # ravel方法将数据降维处理,最后并转换成列表格式
        # print(M)
        # print('------------------------------------------')
        # img_L_name = os.path.basename(filePathL[i])
        # name = img_L_name.split('.')[0]
        # np.save(file_save + name, M)
        #
        # matchesMask = mask.ravel().tolist()
        # cv2.drawChessboardCorners(imgL, board_size[0:2], cornersL, retL)
        # cv2.drawChessboardCorners(imgR, board_size[0:2], cornersR, retR)
        # img_L_name = os.path.basename(filePathL[i])
        # img_R_name = os.path.basename(filePathR[i])
        # cv2.imwrite(findCorners_left_save+ img_L_name, imgL)    # 保存棋盘格检测的角点的可视化结果
        # cv2.imwrite(findCorners_right_save+ img_R_name, imgR)   # 保存棋盘格检测的角点的可视化结果
        point_color , thickness = (0, 255, 0), 2
        res = np.hstack((imgL, imgR))
        img_L_name = os.path.basename(filePathL[i])
        pts_left =cornersL2.tolist()
        pts_right = cornersR2.tolist()
        for pt_left, pt_right in zip(pts_left, pts_right):
            pt_left = tuple([int(item) for item in pt_left[0]])
            pt_right = pt_right[0]
            pt_right[0]+=4352
            pt_right = tuple([int(item) for item in pt_right])

            # cv2.line(res, pt_left, pt_right,(0, 255, 0),  thickness=6)
            # cv2.line(res, pt_left, pt_right, point_color, thickness)
        # cv2.imwrite(H_save_img+ img_L_name, res)

        # pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
        pts_left = np.float32([[3750, 955],[3750, 1090], [3833, 1090], [3833, 955]]).reshape(-1, 1, 2)
        # pts_left = np.float32([[1120, 410], [1120, 528], [1160, 528], [1160, 410], ]).reshape(-1, 1, 2)
        # pts_right = np.float32([[145, 405], [145, 540], [200, 540], [200, 405]]).reshape(-1, 1, 2)
        # 计算变换后的四个顶点坐标位置
        # 使用得到的变换矩阵对原图像的四个角进行变换,获得在目标图像上对应的坐标
        dst = cv2.perspectiveTransform(pts_left, M)
        print('------------------------------')
        print(dst)
        src = imgL.copy()   # 在左原图上画出要映射的框
        query = imgR.copy()   # 在右视图中画出映射的框
        query = cv2.polylines(query, [np.int32(dst)], True, (255, 0, 255), 3, cv2.LINE_AA)
        src = cv2.polylines(src, [np.int32(pts_left)], True, (255, 0, 255), 3, cv2.LINE_AA)
        cv2.imwrite(result_img_save+ 'left_'+img_L_name, src)
        cv2.imwrite(result_img_save+ 'right_'+ img_L_name, query)

你可能感兴趣的:(三维视觉,numpy,python,opencv)