之前写的sfm ,因为在类中写的,但是调用自定义函数时候没有考虑到-《-self.变量》在类中是
全局调用的。写的程序中,存在重复调用某一个函数,大量的冗余!!!!!!!!!!!,运行的时候非常的慢!!!!!
优化后的是:
利用self.变量 全局调用的属性,去掉很多函数return 语句
在最终的某一个函数里,调用所有定义的函数,据自己情况使用全局变量,这样程序才会正常速度
# -*- coding: utf-8 -*-
###notepad++ and python (cmd /k E:\Python3.6\python.exe "$(FULL_CURRENT_PATH)"& PAUSE & EXIT)
'''sfm'''
import cv2
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os
import vtk_visualizer as vv
import sys
from PyQt5.QtWidgets import *
class sfm(object):
def __init__(self,f1_path,f2_fath,feature_algorithm,k_matrix):
self.f1_path=os.path.abspath(f1_path)
self.f2_path=os.path.abspath(f2_fath)
self.im1=cv2.imread(self.f1_path)
self.im2=cv2.imread(self.f2_path)
self.features_algorithm=feature_algorithm
self.k_matrix=k_matrix
pass
def features_extract(self):
if str.lower(self.features_algorithm)=="sift":
sift=cv2.xfeatures2d.SIFT_create()
kp1,des1=sift.detectAndCompute(self.im1,None)
kp2,des2=sift.detectAndCompute(self.im2,None)
elif str.lower(self.features_algorithm)=="surf":
surf=cv2.xfeatures2d.SURF_create()
kp1,des1=surf.detectAndCompute(self.im1,None)
kp2,des2=surf.detectAndCompute(self.im2,None)
return kp1,kp2,des1,des2
def match_and_findF(self):
FLANN_INDEX_KDTREE=0
index_p=dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
searth_p=dict(checks=50)
flann=cv2.FlannBasedMatcher(index_p,searth_p)
kp1,kp2,des1,des2=self.features_extract()
matches=flann.knnMatch(des1,des2,k=2)
good =[]
pts1=[]
pts2=[]
for i,(m,n) in enumerate(matches):
if m.distance<0.6*n.distance:
good.append(m)
pts1.append(kp1[m.queryIdx].pt)
pts2.append(kp2[m.trainIdx].pt)
pts1=np.float32(pts1)
pts2=np.float32(pts2)
F,mask=cv2.findFundamentalMat(pts1,pts2,cv2.RANSAC,0.01)
p1_inliner=pts1[mask.ravel()==1]
p2_inliner=pts2[mask.ravel()==1]
self.F=F
self.p1_inliner,self.p2_inliner=p1_inliner,p2_inliner
# return F,p1_inliner,p2_inliner
def find_essential_matrix(self):
self.E=np.transpose(self.k_matrix)@self.F@ self.k_matrix
def recover_Camera_ex(self): #自己分解,原理见《计算机视觉中的多视图几何》
w=np.matrix([[0,-1,0],[1,0,0],[0,0,1]])
u,s,vt=np.linalg.svd(self.E)
r=u@w@vt
t=u[:,2]
p2=np.hstack((r,t.reshape(-1,1)))
p1=np.eye(3,4)
pk1=self.k_matrix@p1
pk2=self.k_matrix@p2
point1=self.p1_inliner
point2=self.p2_inliner
p1_temp=point1[0]
p2_temp=point2[0]
if self.is_front_of_both_cameras(pk1,pk2,p1_temp.T,p2_temp.T)==False:
r=u@w@vt
t=-u[:,2]
p2=np.hstack((r,t.reshape(-1,1)))
pk2=self.k_matrix@p2
if self.is_front_of_both_cameras(pk1,pk2,p1_temp.T,p2_temp.T)==False:
r=u@w.T@vt
t=u[:,2]
p2=np.hstack((r,t.reshape(-1,1)))
pk2=self.k_matrix@ p2
if self.is_front_of_both_cameras(pk1,pk2,p1_temp.T,p2_temp.T)==False:
r=u@w.T@ vt
t=-u[:,2]
p2=np.hstack((r,t.reshape(-1,1)))
pk2=self.k_matrix@p2
self.p1=p1
self.p2=p2
def recoverCamera_ex(self):#opencv 自带函数recoverPose
points,r,t,mask=cv2.recoverPose(self.E,self.p1_inliner,self.p2_inliner)#p1_inliner是匹配得到的内点
P2=np.hstack((r,t.reshape(-1,1)))
P1=np.eye(3,4)
self.P1,self.P2=P1,P2
def dimension_xyz(self):
pk1=self.k_matrix@self.p1
pk2=self.k_matrix@self.p2
world_x=cv2.triangulatePoints(pk1,pk2,self.p1_inliner.T,self.p2_inliner.T)
world_x=world_x/world_x[3]
self.pts3D=world_x[:3,:].T
def plot3d(self):
'''按照步骤-----匹配----估计基本矩阵---本质矩阵--恢复位姿态--三维点重构'''
self.match_and_findF() #顺序不能错->得到self.F,self.p1_inliner.....
self.find_essential_matrix()
self.recover_Camera_ex()
self.dimension_xyz()
Ys = self.pts3D[:, 0]
Zs = self.pts3D[:, 1]
Xs = self.pts3D[:, 2]
fig = plt.figure(figsize=(8,6),dpi=120)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(Xs, Ys, Zs, c='r', marker='o',s=10,linewidth=1,alpha=1,cmap='spectral')
ax.set_xlabel('Y')
ax.set_ylabel('Z')
ax.set_zlabel('X')
plt.title('3D point cloud: Use pan axes button below to inspect')
plt.show()
'''finally'''
# @jit
def vtk_plot(self):##在Python中用vtk绘制鼠标可旋转的点云-->qt5
'''按照步骤-----匹配----估计基本矩阵---本质矩阵--恢复位姿态--三维点重构'''
self.match_and_findF() #匹配
self.find_essential_matrix()# 找E矩阵
self.recover_Camera_ex()#自己分解得到的左右相机位姿
self.recoverCamera_ex()#opencv函数分解得到的左右相机位姿
self.dimension_xyz()#计算世界坐标三维点
self.get_yaw_pitch_roll()#得到摄影中心的角元素
self.get_camera_positions()#得到摄影中心的线元素
self.print_results()#打印结果
vtkControl = vv.VTKVisualizerControl()
vtkControl.AddPointCloudActor(self.pts3D)
app = QApplication.instance()
if app is None:
app = QApplication(sys.argv)
app.exec_()
def get_camera_positions(self):
cam1_R,cam2_R=self.p1[:,:3],self.p2[:,:3]
cam1=self.p1[:,-1]
cam2=self.p2[:,-1]
self.cam1_pos,self.cam2_pos=-cam1_R.T@cam1,-cam2_R.T@cam2
def get_yaw_pitch_roll(self):#计算cam2的欧拉角z-y-x转角系统
cam2_R=self.p2[:,:3]
thelta_x=np.arctan2(cam2_R[2,1],cam2_R[2,2])
thelta_y=np.arctan2(-cam2_R[2,0],np.sqrt(cam2_R[2,1]**2+cam2_R[2,2]**2))
thelta_z=np.arctan2(cam2_R[1,0],cam2_R[0,0])
self.thelta_x,self.thelta_y,self.thelta_z=np.rad2deg(thelta_x),np.rad2deg(thelta_y),np.rad2deg(thelta_z)
def is_front_of_both_cameras(self,P1,P2,first_p1,second_p2):
'''检查重构点是否在两个相机前面,p1:3*4,first_p1:2*n'''
world_x=cv2.triangulatePoints(P1,P2,first_p1,second_p2)
world_x=world_x/world_x[3]
if world_x[2]>0:
return True
else:
return False
def print_results(self):
print("***********两视图sfm*************\n")
print("匹配点(内点)个数:%g\n"%len(self.p1_inliner))
print("*******opencv函数分解得到相机位姿***********\n")
print("左相机Pl:{}\n".format(self.P1))
print("左相机Pr:{}\n".format(self.P2))
print("*******自己分解得到相机位姿***********\n")
print("左相机Pl:{}\n".format(self.p1))
print("左相机Pr:{}\n".format(self.p2))
print("左相机位置Xs1={},Ys1={},Zs1={}".format(self.cam1_pos[0],self.cam1_pos[1],self.cam1_pos[2]))
print("右相机位置Xs2={},Ys2={},Zs2={}".format(self.cam2_pos[0,0],self.cam2_pos[1,0],self.cam2_pos[2,0]))
# print(self.cam1_pos.shape) #cam1_pos 是(3,)
# print(self.cam2_pos.shape)#cam2_pos 是(3,1)
print("********自己分解得到R->roll:{},pitch:{},yaw:{}*************".format(self.thelta_x,self.thelta_y,self.thelta_z))
if __name__=="__main__":
f1=r"C:\Users\Y\Desktop\12.jpg"
f2=r"C:\Users\Y\Desktop\23.jpg"
k_matrix=np.array([[648,0,1944],[0,648,1296],[0,0,1]]).astype("float32")
algorithm="surf"
app=sfm(f1,f2,algorithm,k_matrix)
# app.plot3d()
app.vtk_plot()
分解E矩阵得R,T,详情请戳Multiple View Geometry in Computer Vision
Second Edition