方法一:
转载:https://blog.csdn.net/recclay/article/details/109058061
Panorama.py
# 导入必要的包
import numpy as np
import imutils
import cv2
class Stitcher:
def __init__(self):
# 确定是否使用的是OpenCV v3.X
self.isv3 = imutils.is_cv3(or_better=True)
def stitch(self, images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
# 解压缩图像,然后从它们中检测关键点以及提取局部不变描述符(SIFT)
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)
# 匹配两幅图像之间的特征
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)
# 如果匹配结果M返回空,表示没有足够多的关键点匹配信息去创建一副全景图
if M is None:
return None
# 若M不为None,则使用透视变换来拼接图像
(matches, H, status) = M
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
# 检查是否应该可视化关键点匹配
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)
# 返回拼接图像的元组和可视化
return (result, vis)
# 返回拼接图像
return result
def detectAndDescribe(self, image):
# 将图像转换为灰度
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 检查我们是否正在使用OpenCV 3.X
if self.isv3:
# 从图像中检测并提取特征
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)
# 否则,我们将使用OpenCV 2.4.X
else:
# 检测图像中的关键点
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)
# 从图像中提取特征
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)
# 将关键点从KeyPoint对象转换为NumPy数组
kps = np.float32([kp.pt for kp in kps])
# 返回关键点和特征的元组
return (kps, features)
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# 计算原始匹配项并初始化实际匹配项列表
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
# 循环原始匹配
for m in rawMatches:
# 确保距离在一定的比例内(即Lowe's ratio)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))
# 计算单应性至少需要4个匹配项
if len(matches) > 4:
# 构造两组点
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
# 计算两组点之间的单应性
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)
# 返回匹配以及单应矩阵和每个匹配点的状态
return (matches, H, status)
# 否则,将无法计算单应性
return None
def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB
# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
# return the visualization
return vis
main.py
# 导入必要的包
from Panorama import Stitcher
import imutils
import cv2
# imageA = cv2.imread('./foto1B.jpg')
# imageB = cv2.imread('./foto1A.jpg')
frame1 = './1614052639261.jpg'
frame2 = './1614052639261.jpg'
# Bouguet立体校正原理
# 根据更正map对图片进行重构
# 根据标定数据对图片进行重构消除图片的畸变
imageA = cv2.imread(frame1)
# cv2.imwrite('img1.jpg',img1)
imageB = cv2.imread(frame2)
imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)
# 将图像拼接在一起以创建全景
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
# 显示图像
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
方法二:
import time
import cv2
import numpy as np
# https://www.cnblogs.com/my-love-is-python/p/10422152.html
# https://blog.csdn.net/qiao_lili/article/details/89736237
def cv_show(name, image):
cv2.imshow(name, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def detectAndCompute(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
(kps, features) = sift.detectAndCompute(image, None)
kps = np.float32([kp.pt for kp in kps]) # 得到的点需要进一步转换才能使用
return (kps, features)
def matchKeyPoints(kpsA, kpsB, featuresA, featuresB, ratio = 0.75, reprojThresh = 4.0):
# ratio是最近邻匹配的推荐阈值
# reprojThresh是随机取样一致性的推荐阈值
matcher = cv2.BFMatcher()
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches = []
for m in rawMatches:
if len(m) == 2 and m[0].distance < ratio * m[1].distance:
matches.append((m[0].queryIdx, m[0].trainIdx))
kpsA = np.float32([kpsA[m[0]] for m in matches]) # 使用np.float32转化列表
kpsB = np.float32([kpsB[m[1]] for m in matches])
(M, status) = cv2.findHomography(kpsA, kpsB, cv2.RANSAC, reprojThresh)
return (M, matches, status) # 并不是所有的点都有匹配解,它们的状态存在status中
def stich(imgA, imgB, M):
result = cv2.warpPerspective(imgA, M, (imgA.shape[1] + imgB.shape[1], imgA.shape[0]))
result[0:imageA.shape[0], 0:imageB.shape[1]] = imageB
# cv2.imwrite('result',result)
cv_show('result', result)
def drawMatches(imgA, imgB, kpsA, kpsB, matches, status):
(hA, wA) = imgA.shape[0:2]
(hB, wB) = imgB.shape[0:2]
# 注意这里的3通道和uint8类型
drawImg = np.zeros((max(hA, hB), wA + wB, 3), 'uint8')
drawImg[0:hB, 0:wB] = imageB
drawImg[0:hA, wB:] = imageA
for ((queryIdx, trainIdx) ,s) in zip(matches, status):
if s == 1:
# 注意将float32 --> int
pt1 = (int(kpsB[trainIdx][0]), int(kpsB[trainIdx][1]))
pt2 = (int(kpsA[trainIdx][0]) + wB, int(kpsA[trainIdx][1]))
cv2.line(drawImg, pt1, pt2, (0, 0, 255))
cv_show("drawImg", drawImg)
t1 = time.time()
# cv2.error: OpenCV(3.4.2) c:\projects\opencv-python\opencv\modules\imgproc\src\color.hpp:253: error: (-215:Assertion failed) VScn::contains(scn) && VDcn::contains(dcn) && VDepth::contains(depth) in function 'cv::CvtHelper,struct cv::Set<1,-1,-1>,struct cv::Set<0,2,5>,2>::CvtHelper'
# 然后就解决了 主要是图片路径中“文件夹分隔符”使用的错误,“\”改成“/”就好了
# 读取图像
# IndexError: index 467 is out of bounds for axis 0 with size 466
# ValueError: could not broadcast input array from shape (398,542,3) into shape (409,542,3)
imageA = cv2.imread('./1614240140.4660490.jpg')
imageB = cv2.imread('./1614240140.4660490.jpg')
# 计算SIFT特征点和特征向量
(kpsA, featuresA) = detectAndCompute(imageA)
(kpsB, featuresB) = detectAndCompute(imageB)
# 基于最近邻和随机取样一致性得到一个单应性矩阵
(M, matches, status) = matchKeyPoints(kpsA, kpsB, featuresA, featuresB)
# 绘制匹配结果
drawMatches(imageA, imageB, kpsA, kpsB, matches, status)
# 拼接
stich(imageA, imageB, M)
print('loss time',time.time()-t1) #loss time 5.796463251113892
# 貌似是该算法被申请了专利还是咋的,将opencv版本退到3.4.2即可解决,卸载之前的包,然后
# pip install opencv-python == 3.4.2.16
# pip install opencv-contrib-python == 3.4.2.16
# 亲测有效!