先看效果(单映矩阵对一张影像不同平面好像不太一样):
原图——>配准后叠加----------------------------------------------------------------------------------------------------
原始同名点------------------------------------------------------------------------------------------------------------------
剔除误匹配后同名点-----------------------------------------------------------------------------------------------------
源码-python:
numpy == 1.21.4
opencv-python == 4.5.4.58
opencv-contrib-python == 4.5.4.60
# function: image regrestion
# author: yangzhen
# time: 2020.7.29
import numpy as np
import cv2
import random
def GetSamePoints(img1, img2, patchheight=2000, patchwidth=2000):
"""
使用SIFT算法获取同名点
@img1 第一张影像
@img2 第二张影像
@return p1、p2分别为两张影像上点
ps: 当两张影像过大时会进行分块
"""
# 初始化sift
# sift = cv2.xfeatures2d.SIFT_create(600)
sift = cv2.SIFT_create(600)
# 判断是否需要分块
rows, cols = img1.shape[0:2]
rownum = (1 if rows <= patchheight else rows // patchheight)
colnum = (1 if cols <= patchwidth else cols // patchwidth)
# 根据分块结果进行同名点匹配
p1 = np.empty([0, 1, 2], dtype=np.float32)
p2 = np.empty([0, 1, 2], dtype=np.float32)
# 测试
# badp1 = np.empty([0, 1, 2], dtype=np.float32)
# badp2 = np.empty([0, 1, 2], dtype=np.float32)
for i in range(rownum):
for j in range(colnum):
# 获取分块影像
mimg1 = img1[i*patchheight:(i+1)*patchheight,
j*patchwidth:(j+1)*patchwidth]
mimg2 = img2[i*patchheight:(i+1)*patchheight,
j*patchwidth:(j+1)*patchwidth]
timg = np.r_[mimg1, mimg2]
# 测试分块重叠区域是否足够
# cv2.namedWindow('test', 0)
# cv2.imshow('test', timg)
# cv2.waitKey()
# 提取特征点
kp1, des1 = sift.detectAndCompute(mimg1, None)
kp2, des2 = sift.detectAndCompute(mimg2, None)
# 匹配
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
# 剔除误匹配
tp1 = np.float32([kp1[m[0].queryIdx].pt
for m in matches]).reshape(-1, 1, 2)
tp2 = np.float32([kp2[m[0].trainIdx].pt
for m in matches]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(tp1, tp2, cv2.RANSAC, 0.1)
matchmask = mask.ravel().tolist()
pnum = matchmask.count(1)
mp1 = np.zeros([pnum, 1, 2], dtype=np.float32)
mp2 = np.zeros([pnum, 1, 2], dtype=np.float32)
iter = 0
# 剔除误匹配的同时恢复分块点坐标至原影像
for k in range(len(matchmask)):
if matchmask[k] == 1:
mp1[iter, 0, 0] = tp1[k, 0, 0] + j*patchwidth
mp1[iter, 0, 1] = tp1[k, 0, 1] + i*patchheight
mp2[iter, 0, 0] = tp2[k, 0, 0] + j*patchwidth
mp2[iter, 0, 1] = tp2[k, 0, 1] + i*patchheight
iter = iter + 1
# 将每一块的同名点放到一起
p1 = np.vstack((p1, mp1))
p2 = np.vstack((p2, mp2))
# 测试
# mbadp1 = tp1 + j*patchwidth
# mbadp2 = tp2 + i*patchheight
# badp1 = np.vstack((badp1, mbadp1))
# badp2 = np.vstack((badp2, mbadp2))
# drawImg = DrawSamePoint(img1, img2, badp1, badp2)
# cv2.imwrite('data/samepoints_bad.jpg', drawImg)
return p1, p2
def DrawSamePoint(img1, img2, p1, p2):
"""绘制同名点"""
hA, wA = img1.shape[0:2]
hB, wB = img2.shape[0:2]
# 注意这里的3通道和uint8类型
drawImg = np.zeros((max(hA, hB), wA + wB, 3), 'uint8')
drawImg[0:hB, 0:wB] = img1
drawImg[0:hA, wB:] = img2
for i in range(len(p1)):
# 注意将float32 --> int
pt1 = (int(p1[i][0][0]), int(p1[i][0][1]))
pt2 = (int(p2[i][0][0]) + wB, int(p2[i][0][1]))
# 产生随机颜色
r = int(random.random()*255)
g = int(random.random()*255)
b = int(random.random()*255)
# 绘制
cv2.circle(drawImg, pt1, 24, (r, g, b), 4)
cv2.circle(drawImg, pt2, 24, (r, g, b), 4)
cv2.line(drawImg, pt1, pt2, (r, g, b), 4)
return drawImg
if __name__ == "__main__":
# 获取数据
img1 = cv2.imread('data/img1.jpg')
img2 = cv2.imread('data/img2.jpg')
# print(img1.shape)
# 获取同名点
p1, p2 = GetSamePoints(img1, img2, 4000, 4000)
# 绘制同名点
drawImg = DrawSamePoint(img1, img2, p1, p2)
# 配准
T, useless = cv2.findHomography(p2, p1, cv2.RANSAC, 0.1)
nimg2 = cv2.warpPerspective(img2, T, (img1.shape[1], img1.shape[0]))
# 保存影像
cv2.imwrite('data/samepoints.jpg', drawImg)
cv2.imwrite('data/result.jpg', nimg2)
源码-c++:
void SIFTRegistration(const Mat img1, const Mat img2,
vector &p1, vector &p2,
const int PatchHeight = 2000,
const int PatchWidth = 2000)
{
//分块提取关键点
int rows = img1.rows;
int cols = img1.cols;
//块的数目
const int RowNum = rows / PatchHeight;
const int ColNum = cols / PatchWidth;
//对每个块探测关键点与描述并进行匹配与误匹配剔除
for (int i(0); i < RowNum; ++i)
{
for (int j(0); j < ColNum; ++j)
{
//获取对应块影像
Rect rect(j * PatchWidth, i * PatchHeight,
PatchWidth, PatchHeight);
Mat mimg1 = Mat(img1, rect);
Mat mimg2 = Mat(img2, rect);
//SIFT关键点探测
Ptr f2d = xfeatures2d::SIFT::create(100, 1);
//关键点
vector keypoints1;
vector keypoints2;
//关键点描述
Mat descriptions1;
Mat descriptions2;
//探测
f2d->detectAndCompute(mimg1, Mat(), keypoints1, descriptions1);
f2d->detectAndCompute(mimg2, Mat(), keypoints2, descriptions2);
//硬匹配
BFMatcher matcher;
vector matches;
matcher.match(descriptions1, descriptions2, matches);
//RANSAC算法剔除误匹配
vector GoodMatches = matches;
vector RAN_KP1, RAN_KP2;
for (int j(0); j < GoodMatches.size(); ++j)
{
RAN_KP1.push_back(keypoints1[matches[j].queryIdx]);
RAN_KP2.push_back(keypoints2[matches[j].trainIdx]);
}
//坐标数据类型变换
vector p01, p02;
for (int j(0); j < GoodMatches.size(); ++j)
{
p01.push_back(RAN_KP1[j].pt);
p02.push_back(RAN_KP2[j].pt);
}
//RANSAC过程
vector RansacStatus;
Mat Fundamental = findFundamentalMat(p01, p02, RansacStatus, FM_RANSAC);
int iter = 0;
for (int j(0); j < matches.size(); ++j)
{
if (RansacStatus[j] == 0)
GoodMatches.erase(GoodMatches.begin() + iter);
else
iter++;
}
//保存同名点并恢复原图坐标
for (int k(0); k < GoodMatches.size(); ++k)
{
Point2f tp1, tp2;
tp1.x = keypoints1[ GoodMatches[k].queryIdx ].pt.x + j * PatchWidth;
tp1.y = keypoints1[ GoodMatches[k].queryIdx ].pt.y + i * PatchHeight;
tp2.x = keypoints2[ GoodMatches[k].trainIdx ].pt.x + j * PatchWidth;
tp2.y = keypoints2[ GoodMatches[k].trainIdx ].pt.y + i * PatchHeight;
p1.push_back(tp1);
p2.push_back(tp2);
}
}
}
}