opencv4.4版本由于sift专利原因已经无法使用,opencv4.2与python3.8不匹配,python版本需降到3.6,opencv版本需降到3.4.2.17
pip uninstall opencv-python
pip install opencv-python==3.4.2.17
pip install opencv-contrib-python==3.4.2.17
import pandas as pd
import pickle
import cv2
img_path = r'../image/paojie.jpg'
img = cv2.imread(img_path)
# print(img.shape)
# img = cv2.resize(img,(136 * 3,512 * 3))
cv2.imshow("original", img)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 使用SIFT
sift = cv2.xfeatures2d.SIFT_create()
keypoints, descriptor = sift.detectAndCompute(gray, None)
cv2.drawKeypoints(image=img,
outImage=img,
keypoints=keypoints,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
color=(51, 163, 236))
cv2.imshow("SIFT", img)
img = cv2.imread(img_path)
# img = cv2.resize(img,(136 * 3,76 * 3))
while True:
if cv2.waitKey() & 0xff == ord('q'):
break
cv2.destroyAllWindows()
import cv2
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
img_path = r'../image/paojie.jpg'
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 使用SIFT
sift = cv2.xfeatures2d.SIFT_create()
keypoints, descriptor = sift.detectAndCompute(gray, None)
# print(descriptor.shape)
# m,n=descriptor.shape
# print(m,n)
descriptor = StandardScaler().fit_transform(descriptor)
pca = PCA(n_components=100)
pca.fit(descriptor)
print(pca.singular_values_) # 查看特征值
print(pca.components_) # 打印查看特征值对应的特征向量
# print(pca.components_.shape)
cv2.drawKeypoints(image=img,
outImage=img,
keypoints=keypoints,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
color=(255, 0, 255))
cv2.imshow("SIFT", img)
while True:
if cv2.waitKey() & 0xff == ord('q'):
break
cv2.destroyAllWindows()
import cv2
img = cv2.imread('../image/paojie.jpg', cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('origin', img);
# SIFT
detector = cv2.xfeatures2d.SIFT_create()
keypoints = detector.detect(gray, None)
img = cv2.drawKeypoints(image=gray,
keypoints=keypoints,
outImage=None,
color=(255, 0, 255))
# img = cv2.drawKeypoints(gray,keypoints,flags = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('test', img)
while True:
if cv2.waitKey() & 0xff == ord('q'):
break
cv2.destroyAllWindows()
# coding: utf-8
from matplotlib import pyplot as plt
from imagedt.decorator import time_cost
import cv2
print('cv version: ', cv2.__version__)
def bgr_rgb(img):
(r, g, b) = cv2.split(img)
return cv2.merge([b, g, r])
def orb_detect(image_a, image_b):
# feature match
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(image_a, None)
kp2, des2 = orb.detectAndCompute(image_b, None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1, des2)
# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
# Draw first 10 matches.
img3 = cv2.drawMatches(image_a, kp1, image_b, kp2, matches[:100], None, flags=2)
return bgr_rgb(img3)
@time_cost
def sift_detect(img1, img2, detector='surf'):
if detector.startswith('si'):
print("sift detector......")
sift = cv2.xfeatures2d.SURF_create()
else:
print("surf detector......")
sift = cv2.xfeatures2d.SURF_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
# Apply ratio test
good = [[m] for m, n in matches if m.distance < 0.5 * n.distance]
# cv2.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
return bgr_rgb(img3)
if __name__ == "__main__":
# load image
image_a = cv2.imread('../image/dongman1.jpg')
image_b = cv2.imread('../image/dongman2.jpg')
# ORB
# img = orb_detect(image_a, image_b)
# SIFT or SURF
img = sift_detect(image_a, image_b)
plt.imshow(img)
plt.show()
import cv2
import numpy as np
from os import walk
from os.path import join
def create_descriptors(folder):
files = []
for (dirpath, dirnames, filenames) in walk(folder):
files.extend(filenames)
for f in files:
if '.jpg' in f:
save_descriptor(folder, f, cv2.xfeatures2d.SIFT_create())
def save_descriptor(folder, image_path, feature_detector):
# 判断图片是否为npy格式
if image_path.endswith("npy"):
return
# 读取图片并检查特征
img = cv2.imread(join(folder,image_path), 0)
keypoints, descriptors = feature_detector.detectAndCompute(img, None)
# 设置文件名并将特征数据保存到npy文件
descriptor_file = image_path.replace("jpg", "npy")
np.save(join(folder, descriptor_file), descriptors)
if __name__=='__main__':
path = 'D://PycharmProjects//pythonProject//image'
create_descriptors(path)
将图片的特征数据保存在npy文件。下一步是根据选择的图域这些特征数据文件进行匹配,从而找出最佳匹配的图片。
from os.path import join
from matplotlib import pyplot as plt
from os import walk
import numpy as np
import cv2
query = cv2.imread('D://PycharmProjects//pythonProject//image/1.jpg', 0)
folder = 'D:\\PycharmProjects\\pythonProject\\image'
descriptors = []
# 获取特征数据文件名
for (dirpath, dirnames, filenames) in walk(folder):
for f in filenames:
if f.endswith("npy"):
descriptors.append(f)
print(descriptors)
# 使用SIFT算法检查图像的关键点和描述符
sift = cv2.xfeatures2d.SIFT_create()
query_kp, query_ds = sift.detectAndCompute(query, None)
# 创建FLANN匹配器
index_params = dict(algorithm=0, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
potential_culprits = {}
for d in descriptors:
# 将图像query与特征数据文件的数据进行匹配
matches = flann.knnMatch(query_ds, np.load(join(folder, d)), k=2)
# 清除错误匹配
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
# 输出每张图片与目标图片的匹配数目
print("img is %s ! matching rate is (%d)" % (d, len(good)))
potential_culprits[d] = len(good)
max_matches = None
potential_suspect = None
for culprit, matches in potential_culprits.items():
if max_matches == None or matches > max_matches:
max_matches = matches
potential_suspect = culprit
print("potential suspect is %s" % potential_suspect.replace("npy", "").upper())
import numpy as np
import cv2
class demo2():
def __init__(self, Videopath='../video/李永乐老师.mp4'):
self.capture = cv2.VideoCapture(Videopath)
def Gaussian(self, drawContours=False, drawRectangle=True):
cap = self.capture
# 创建形态学操作时需要使用的核
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# 创建混合高斯模型
fgbg = cv2.createBackgroundSubtractorMOG2()
# 将行人在视频中实时标记出
while (True):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
# 形态学开运算去噪点
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
# 寻找视频中的轮廓
im, contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if drawContours: # 背景差分提取前景目标,将轮廓信息标示
n = len(contours)
for i in range(n):
temp = np.zeros(frame.shape, np.uint8)
temp = cv2.drawContours(temp, contours, i, (255, 255, 255), 2)
cv2.imshow('frame', frame)
cv2.imshow("contours", temp)
cv2.waitKey()
if drawRectangle:
for c in contours:
# 计算各轮廓的周长
perimeter = cv2.arcLength(c, True)
if perimeter > 188:
# 找到一个直矩形(不会旋转)
x, y, w, h = cv2.boundingRect(c)
# 画出这个矩形
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('frame', frame)
cv2.imshow('fgmask', fgmask)
k = cv2.waitKey(20)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
demo2().Gaussian()
import cv2
def demo3():
# 初始化视频捕获设备
# gVideoDevice = cv2.VideoCapture("./video.avi")
gVideoDevice = cv2.VideoCapture(0)
if not gVideoDevice.isOpened():
print('open video failed')
return
else:
print('open video succeeded')
# 选择 框选帧
print("按 enter 选择当前帧,否则继续下一帧")
while True:
gCapStatus, gFrame = gVideoDevice.read()
cv2.imshow("pick frame", gFrame)
k = cv2.waitKey()
if k == 13:
break
# 框选感兴趣区域
cv2.destroyWindow("pick frame")
gROI = cv2.selectROI("ROI frame", gFrame, False)
if (not gROI):
print("空框选,退出")
quit()
# 初始化追踪器
gTracker = cv2.TrackerKCF_create()
gTracker.init(gFrame, gROI)
# 循环帧读取,开始跟踪
while True:
gCapStatus, gFrame = gVideoDevice.read()
if (gCapStatus):
# 展示跟踪图片
status, coord = gTracker.update(gFrame)
if status:
message = {"coord": [((int(coord[0]), int(coord[1])),
(int(coord[0] + coord[2]), int(coord[1] + coord[3])))]}
p1 = (int(coord[0]), int(coord[1]))
p2 = (int(coord[0] + coord[2]), int(coord[1] + coord[3]))
cv2.rectangle(gFrame, p1, p2, (255, 0, 0), 2, 1)
message['msg'] = "is tracking"
else:
message['msg'] = "KCF error,需要重新使用调用跟踪器"
cv2.imshow('tracked image', gFrame)
print(message)
key = cv2.waitKey()
if key == 27:
break
else:
print("捕获帧失败")
quit()
demo3()