大体思路是,先用SIFT角点检测,然后用KNN将一些相似度较高的点进行匹配,然后取一些执行度较高的点,求其最优变换矩阵,对其中一张图片做变换操作,然后将另一张图叠加上去就OK啦
直接给代码吧,函数自己查一查,实验图片在最后的1.jpg
和2.jpg
,不熟悉的话建议单步调试
我目前的 openCV 版本:
>>> cv2.__version__
'4.5.5'
注意可能需要安装 opencv-contrib-python
import numpy as np
import cv2
# 运行 xfeatures2d 可能存在问题
# 可能需要安装 opencv-contrib-python
# pip install opencv-contrib-python
SHOW = True
# 调库, 用 SIFT 检测角点
def sift_keypoints_detect(image_):
image = image_.copy() # 解耦合
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
keypoints, features = sift.detectAndCompute(image, None)
# - keypoints | 很多的特征点对象的列表
# - features | features.shape : (len(keypoints), 128) 维度的特征向量
keypoints_image = cv2.drawKeypoints(
image, keypoints, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# cv2.DRAW_MATCHES_FLAGS_DEFAULT 就是默认的五彩斑斓的圆圈
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS 绘制一个带方向的圆圈
return keypoints_image, keypoints, features
# 检测后匹配
def sift_feature_match(features_R, features_L, threshold=0.2):
# threshold : 设定的匹配错误的阈值, 阈值越低, 越准确, 点越少
# 创建BFMatcher用于匹配
bfm = cv2.BFMatcher()
# 返回每个特征点的最佳匹配k个匹配点
matches = bfm.knnMatch(features_R, features_L, k=3)
# 最后得到 [len(features_R), k] 个匹配
matches = list(matches)
# --------------------------------------------------
# 进行排序操作, 比值越小, 排越前
matches.sort(key=lambda x:x[0].distance/x[2].distance)
res = []
for a, _, b in matches:
if a.distance / b.distance < threshold:
res.append(a)
return res
# 将匹配后的点, 相连并进行绘图, 后返回
def draw_match(img_r, keypoint_r, img_l, keypoint_l, match):
goodmatch_im = cv2.drawMatches(img_r,
keypoint_r,
img_l,
keypoint_l,
match,
outImg=None,
flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
return goodmatch_im
def panorama_splicing(img_R, keypoint_R, feature_R,
img_L, keypoint_L, feature_L, matches):
assert len(matches) > 4, "" # 匹配必须大于4, Homography 矩阵计算至少需要4个点
# 每一个匹配有四个课调用属性
# match :
# distance - 距离
# imgIdx - 是左图还是右图
# queryIdx - 被匹配图的idx
# trainIdx - 匹配图的idx
# keypoint:
# pt
# size
# angle
# 将 匹配的关键点 拿出来
points_R = np.array(
[keypoint_R[m.queryIdx].pt for m in matches]
)[:, None] # 就是第1维加一维
points_L = np.array(
[keypoint_L[m.trainIdx].pt for m in matches]
)[:, None]
# 计算多二维点对之间的最优单映射变换矩阵, method 可以使用最小均方误差或 RANSAC 方法
Homography, _ = cv2.findHomography( # 将 points_R 映射到 points_L
points_R, points_L, cv2.RANSAC)
# 透视变换函数, 用于解决 cv2.warpAffine 无法处理的视场和图像不平行
# 进行透视变换, 可保持直线不平行
panorama = cv2.warpPerspective(img_R,
Homography,
# 第三个参数是 dsize
(img_R.shape[1]+img_L.shape[1],
img_R.shape[0])
)
cv2.namedWindow("panorama_R")
panorama_show = cv2.resize(panorama, None, fx=0.5, fy=0.5)
cv2.imshow("panorama_R", panorama_show)
# 将两张图合成在一起
panorama[0:img_L.shape[0], 0:img_L.shape[1]] = img_L
return panorama
if __name__ == "__main__":
# left = cv2.imread("building_02.jpg") # left.shape | (1440, 1080, 3)
# right = cv2.imread("building_03.jpg") # right.shape | (1440, 1080, 3)
left = cv2.imread("1.jpg") # left.shape | (1440, 1080, 3)
right = cv2.imread("2.jpg") # right.shape | (1440, 1080, 3)
if SHOW:
left = cv2.resize(left, None, fx=0.5, fy=0.5)
right = cv2.resize(right, None, fx=0.5, fy=0.5)
# 提取 SIFT 特征
keypoints_image_R, keypoints_R, features_R = sift_keypoints_detect(right)
keypoints_image_L, keypoints_L, features_L = sift_keypoints_detect(left)
# # 展示一下 sift 的特征点
# cv2.namedWindow("keypoints_image_R")
# cv2.imshow("keypoints_image_R", np.concatenate([keypoints_image_R, right], axis=1))
# cv2.namedWindow("keypoints_image_L")
# cv2.imshow("keypoints_image_L", np.concatenate([keypoints_image_L, left ], axis=1))
# 用 KNN 进行匹配关键点
matches = sift_feature_match(features_R, features_L)
# 左右点匹配后相连的图片
matched_im = draw_match(left, keypoints_L,
right, keypoints_R,
matches)
cv2.namedWindow("matched_im")
matched_im_show = cv2.resize(matched_im, None, fx=0.5, fy=0.5)
cv2.imshow("matched_im", matched_im_show)
# 全景拼接函数
panorama = panorama_splicing(right, keypoints_R, features_R,
left, keypoints_L, features_L, matches)
cv2.namedWindow("panorama")
panorama = cv2.resize(panorama, None, fx=0.5, fy=0.5)
cv2.imshow("panorama", panorama)
cv2.imwrite("out.png", panorama)
cv2.waitKey(0)
cv2.destroyAllWindows()
角点匹配后的点,可以看出匹配连线并不是平行线,但是我们通过,这一步将不合适的点过滤掉:
res = []
for a, _, b in matches:
if a.distance / b.distance < threshold:
res.append(a)
return res
实验图片们: