import numpy as np
import cv2
from matplotlib import pyplot as plt
# img = cv2.imread('./resource/opencv/image/chessboard.png', cv2.IMREAD_COLOR)
img = cv2.imread('./resource/opencv/image/pattern.png', cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
# 输入图像必须是float32,最后一个参数在0.04到0.05之间
dst = cv2.cornerHarris(gray, 2, 3, 0.05)
dst = cv2.dilate(dst, None)
img[dst>0.01*dst.max()] = [0, 0, 255]
cv2.imshow('dst', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('./resource/opencv/image/subpixel.png', cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
dst = cv2.dilate(dst, None)
ret, dst = cv2.threshold(dst, 0.01*dst.max(), 255, 0)
dst = np.uint8(dst)
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(gray, np.float32(centroids), (5,5), (-1, -1), criteria)
res = np.hstack((centroids, corners))
res = np.int0(res)
img[res[:,1],res[:,0]]=[0,0,255]
img[res[:,3],res[:,2]]=[0,255,0]
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('./resource/opencv/image/shitomasi_block.jpg', cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray, 25, 0.01, 10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img, (x,y), 3, 255, -1)
plt.imshow(img)
plt.show()
SIFT,即尺度不变特征变换(Scale-invariant feature transform,SIFT),是用于图像处理领域的一种描述。这种描述具有尺度不变性,可在图像中检测出关键点,是一种局部特征描述子。
cv2.SIFT_create()
cv2.drawKeypoints(img, kp, out_img, flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS):画特征点
import numpy as np
import cv2
# 读取图片
# img = cv2.imread('./resource/opencv/image/home.jpg')
img = cv2.imread('./resource/opencv/image/AverageMaleFace.jpg')
key_points = img.copy()
# 实例化SIFT算法
sift = cv2.SIFT_create()
# 得到特征点
kp = sift.detect(img, None)
print(np.array(kp).shape)
# 绘制特征点
cv2.drawKeypoints(img, kp, key_points, flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
# 图片展示
cv2.imshow("key points", key_points)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 保存图片
# cv2.imwrite("key_points.jpg", key_points)
# 计算特征
kp, des = sift.compute(img, kp)
# 调试输出
print(des.shape)
print(des[0])
cv2.imshow('kp', key_points)
cv2.waitKey(0)
cv2.destroyAllWindows()
img = cv2.imread(‘fly.png’, 0)
surf = cv2.SURF(400)
kp, des = surf.detectAndCompute(img, None)
len(kp) # 699
print(surf.hessianThreshold)
surf.hessianThreshold = 50000
kp, des = surf.detectAndCompute(img,None)
print(len(kp)) # 47
不检测关键点的方向
print(surf.upright) #False
surf.upright = True
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('./resource/opencv/image/fly.jpg', cv2.IMREAD_GRAYSCALE)
# fast = cv2.FastFeatureDetector_create(threshold=100, nonmaxSuppression=False, type=cv2.FAST_FEATURE_DETECTOR_TYPE_5_8)
fast = cv2.FastFeatureDetector_create(threshold=400)
kp = fast.detect(img, None)
img2 = cv2.drawKeypoints(img, kp, img.copy(), color=(0, 0, 255), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('fast', img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('./resource/opencv/image/fly.jpg', cv2.IMREAD_GRAYSCALE)
# Initiate STAR detector
star = cv2.FeatureDetector_create("STAR")
# Initiate BRIEF extractor
brief = cv2.DescriptorExtractor_create("BRIEF")
# find the keypoints with STAR
kp = star.detect(img,None)
# compute the descriptors with BRIEF
kp, des = brief.compute(img, kp)
print(brief.getInt('bytes'))
print(des.shape)
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('./resource/opencv/image/fly.jpg', cv2.IMREAD_GRAYSCALE)
# ORB_create(nfeatures=..., scaleFactor=..., nlevels=..., edgeThreshold=..., firstLevel=..., WTA_K=..., scoreType=..., patchSize=..., fastThreshold=...)
orb = cv2.ORB_create()
kp = orb.detect(img, None)
kp, des = orb.compute(img, kp)
img2 = cv2.drawKeypoints(img, kp, img.copy(), color=(255, 0, 0), flags=0)
plt.imshow(img2)
plt.show()
OpenCV 中的特征匹配
import numpy as np
import cv2
from matplotlib import pyplot as plt
img1 = cv2.imread('./resource/opencv/image/box.png', 0)
img2 = cv2.imread('./resource/opencv/image/box_in_scene.png', 0)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher_create(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
# matches = bf:match(des1; des2) 返回值是一个 DMatch 对象列表。这个
# DMatch 对象具有下列属性:
# • DMatch.distance - 描述符之间的距离。越小越好。
# • DMatch.trainIdx - 目标图像中描述符的索引。
# • DMatch.queryIdx - 查询图像中描述符的索引。
# • DMatch.imgIdx - 目标图像的索引。
# 距离排序
matches = sorted(matches, key = lambda x:x.distance)
# 画出前30匹配
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:30], None, flags=2)
cv2.imshow('img', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
现在我们使用 BFMatcher.knnMatch() 来获得 k 对最佳匹配。在本例中我们设置 k = 2,这样我们就可以使用 D.Lowe 文章中的比值测试了。
import numpy as np
import cv2
from matplotlib import pyplot as plt
img1 = cv2.imread('./resource/opencv/image/box.png', 0)
img2 = cv2.imread('./resource/opencv/image/box_in_scene.png', 0)
sift = cv2.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
bf = cv2.BFMatcher_create()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
# drawMatchesKnn(img1, keypoints1, img2, keypoints2, matches1to2, outImg, matchColor=..., singlePointColor=..., matchesMask=..., flags: int = ...)
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[:100], None, flags=2)
plt.imshow(img3)
plt.show()
FLANN 是快速最近邻搜索包(Fast_Library_for_Approximate_Nearest_Neighbors)的简称。它是一个对大数据集和高维特征进行最近邻搜索的算法的集合,而且这些算法都已经被优化过了。在面对大数据集时它的效果要好于 BFMatcher。我们来对第二个例子使用 FLANN 匹配看看它的效果。
import numpy as np
import cv2
from matplotlib import pyplot as plt
img1 = cv2.imread('./resource/opencv/image/box.png', 0)
img2 = cv2.imread('./resource/opencv/image/box_in_scene.png', 0)
sift = cv2.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
flann = cv2.FlannBasedMatcher_create()
matches = flann.knnMatch(des1, des2, k=2)
matchesMask = [[0,0] for i in range(len(matches))]
for i, (m, n) in enumerate(matches):
if m.distance < 0.7*n.distance:
matchesMask[i] = [1,0]
draw_params = dict(matchColor = (0, 255, 0),
singlePointColor = (255, 0, 0),
matchesMask = matchesMask,
flags = 0)
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
plt.imshow(img3)
plt.show()