opencv是一个计算机视觉库,用于图像处理。
API:
import numpy as np
import cv2 as cv
# 读取图片
original = cv.imread('')
# 显示图片
cv.imshow('title',original)
cv.waitkey() # 不wait直接闪退,waitkey靠点击退出
# 剪裁图片
h,w = original.shape[:2]
l,t = int(w/4),int(h/4)
r,b = int(w*3/4),int(h*3/4)
cropped = original[t:b,l:r]
# 图像缩放
scaled = cv.resize(original,(int(w/4),int(h/4)),
interpolation = cv.INTER_LINEAR)
# 图像保存
cv.imwrite('path/name',img)
思想:通过识别亮度梯度变化最大的像素点从而检测出物体的边缘
API:
import cv2 as cv
original = cv.imread('',cv.IMREAD_GRAYSCALE)
# 索贝尔边缘识别
s_img = cv.Sobel(origianl,cv.CV_64F,1,0,ksize = 5)
cv.imshow('sobel',s_img)
# 拉普拉斯边缘识别
l_img = cv.Laplacian(original,cv.CV_64F)
cv.imshow('Laplacian',l_img)
# 卡尼边缘识别
c_img = cv.Canny(original,50,240)
cv.imshow('canny',c_img)
cv.waitkey()
直方图均衡化实现亮度提升,更有利于边缘识别
import cv2 as cv
original = cv.imread('')
# 彩色图转为灰度图
gray = cv.cvtColor(original,cv.COLOR_BGR2GRAY)
# 直方图均衡化
equalized_gray = cv.equalizeHist(gray)
cv.imshow('Equalized Gray',equalized_gray)
# 提升亮度
yuv = cv.cvtColot(original,cv.COLOR_BGR2YUV)
yuv[...,0] = cv.equalizeHist(yuv[...,0])
equalize_color = cv.cvtColor(yuv,cv.COLOR_YUV2BGR)
cv.imshow('Equalized Color',equalized_color)
cv.waitKey()
角点:平直棱线的交汇点
API:
# 灰度化原图
gray = cv.cvtColor(original,cv.COLOR_BGR2GRAY)
# Harris 角点检测器
# 边缘水平方向、垂直方向颜色值改变超过阈值7、5时即为边缘
# 边缘线方向改变超过阈值0.04弧度即为一个角点。
corners = cv.cornerHarris(gray,7,5,0.04)
# 绘制角点
mixture = original.copy()
mixture[corners>corners.max()*0.01]=[0,0,255]
cv.imshow('Corner',mixture)
cv.waitKey()
常用的特征点检测方法:STAR检测和SIFT检测
特征点检测结合了边缘检测和角点检测
API:
import cv2 as cv
original = cv.imread('')
gray = cv.cvtColor(original,cv.COLOR_BGR_2GRAY)
# 创建star特征点检测器
star = cv.xfeatures2d.StarDetector_create()
# 检测出gray图像所有的特征点
keypoints = star.detect(gray)
mixture = origianl.copy()
cv.drawKeypoints(
original,keypoints,mixture,
flags = cv.DRAN_MATCHES_FLAGS_DRAN_RICH_KEYPOINTS
)
cv.imshow('Mixture',mixture)
cv.waitKey()
API:
import cv2 as cv
original = cv.imread('')
gray = cv.cvtColor(original,cv.COLOR_BGR_2GRAY)
# 创建sift特征点检测器
star = cv.xfeatures2d.SIFT_create()
# 检测出gray图像所有的特征点
keypoints = star.detect(gray)
mixture = origianl.copy()
cv.drawKeypoints(
original,keypoints,mixture,
flags = cv.DRAN_MATCHES_FLAGS_DRAN_RICH_KEYPOINTS
)
cv.imshow('Mixture',mixture)
cv.waitKey()
特征值矩阵记录图像的每个特征点的梯度信息,提取特征值矩阵,只要有足够多的的样本,就可以做隐马尔可夫模型。类似于mfcc(梅尔频率倒谱系数–语音识别)
API:
import cv2 as cv
original = cv.imread('')
gray = cv.cvtColor(original,cv.COLOR_BGR2GRAY)
sift = cv.xfeatures2d.SIFT_create()
keypoints = sift.detect(gray)
# 创建特征值矩阵desc
_,desc = sift.compute(gray,keypoints)
API:
import numpy as np
import cv2 as cv
import hmmlearn.hmm as hl
def search_files(directory):
#详见CSDN ghcjasongo 工具三
pass
# 获取训练集
train_objects = search_files('')
train_x,train_y = [],[]
for label,filenames in train_objects.items():
descs = np.array[]
for filename in filenames:
image = cv.imread(filename)
gray = cv.cvtColor(image,cv.COLOR_BGR2GRAY)
# 范围缩放,使特征描述矩阵样本数量一致
h,w = gray.shape[:2]
f = 200/min(h,w)
gray = cv.resize(gray,None,fx=f,fy=y)
# 提取sift特征值点
sift = cv.xfeatures2d.SIFT_create()
keypoints = sift.detect(gray)
_,desc = sift.compute(gray,keypoints)
if len(descs) == 0:
descs == desc
else:
descs = np.append(descs,desc,axis = 0)
train_x.append(descs)
train_y.appedn(label)
# 训练隐马尔可夫模型
models = {}
for descs,label in zip(train_x,train_y):
model = hl.GaussianHMM(
n_components = 4,covariance_type = 'diag',n_iter = 100
)
model[label] = model.fit(descs)
# 测试集数据整理同训练集
# 测试数据
pred_y = []
for descs in test_x: # 遍历3次
# 验证每个模型对当前desc的匹配度得分
best_score, best_label = None, None
for label, model in models.items():
score = model.score(descs)
if (best_score is None) or (best_score < score):
best_score = score
best_label = label
pred_y.append(best_label)
print(test_y)
print(pred_y)
API:
import cv2 as cv
# 获取视频捕捉设备
video_capture = cv.VideoCapture(0)
while True:
# 读取一帧
frame = video_capture.read()[1]
cv.imshow('VideoCapture',frame)
if cv.waitKey(33) == 27:
break
video_capture.release()
cv.destroyAllWindows()
哈尔级联人脸定位
API:
import cv2 as cv
# 哈尔级联人脸定位
face_d = cv.CascadeClassifier('haar/face.xml')
eye_d = cv.CascadeClassifier('haar/eye.xml')
nose_d = cv.CascadeClassifier('haar/nose.xml')
# 创建人脸捕捉器
vc = cv.VideoCapture(0)
while True:
frame = vc.read[1]
# 1.3为最小人脸尺寸,最多抓5张脸
faces = face_d.detectMultiScale(frame,1.3,5)
for l,t,w,h in faces:
a,b = int(w/2),int(h/2)
cv.ellipse(frame,(l+a,t+b),(a,b),0,0,360,(255,0,255),2)
# 绘制椭圆
#cv.ellipse(
#face, # 图像
#(l + a, t + b), # 椭圆心
#(a, b), # 半径
#0, # 椭圆旋转角度
#0, 360, # 起始角, 终止角
#(255, 0, 255), # 颜色
#2 # 线宽
#)
face = frame[t:t+h,l:l+w]
eyes = eye_d.detectMultiScale(face,1.3,5)
for l,t,w,h in eyes:
a,b = a,b = int(w/2),int(h/2)
cv.ellipse(face,(l+a,t+b),(a,b),0,0,360,(255,0,255),2)
noses = nose_d.detectMultiScale(face,1.3,5)
for l,t,w,h in noses:
a,b = a,b = int(w/2),int(h/2)
cv.ellipse(face,(l+a,t+b),(a,b),0,0,360,(255,0,255),2)
cv.imshow('VideoCapture',frame)
if cv.waitKey(33) == 27:
break
vc.release()
cv.destroyAllWindows()
opencv下的lbph(局部二值模式直方图)
import os
import numpy as np
import cv2 as cv
import sklearn.preprocessing as sp
# 创建哈尔级联人脸定位器
fd = cv.CascadeClassifier('haar/face.xml')
# 详见工具三
def search_faces(directory):
pass
train_faces = search_faces(
'faces/training')
# 创建标签编码器
codec = sp.LabelEncoder()
codec.fit(list(train_faces.keys()))
train_x, train_y = [], []
for label, filenames in train_faces.items():
for filename in filenames:
image = cv.imread(filename)
# 对人脸进行灰度处理
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# 创建人脸识别
faces = fd.detectMultiScale(gray, 1.1, 2,
minSize=(100, 100))
for l, t, w, h in faces:
train_x.append(
gray[t:t + h, l:l + w])
train_y.append(
codec.transform([label])[0])
train_y = np.array(train_y)
# !!!!!!!!!!!!!!!!!!
# 局部二值模式直方图人脸识别分类器
model = cv.face.LBPHFaceRecognizer_create()
# 训练模型
model.train(train_x, train_y)
# 整理测试集数据
test_faces = search_faces(
'faces/testing')
test_x, test_y, test_z = [], [], []
for label, filenames in test_faces.items():
for filename in filenames:
image = cv.imread(filename)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
faces = fd.detectMultiScale(gray, 1.1, 2,
minSize=(100, 100))
for l, t, w, h in faces:
test_x.append(
gray[t:t + h, l:l + w])
test_y.append(
codec.transform([label])[0])
a, b = int(w / 2), int(h / 2)
# 绘制椭圆
cv.ellipse(image, (l + a, t + b),
(a, b), 0, 0, 360,
(255, 0, 255), 2)
test_z.append(image)
test_y = np.array(test_y)
# 预测
pred_test_y = []
for face in test_x:
pred_code = model.predict(face)[0]
pred_test_y.append(pred_code)
escape = False
while not escape:
for code, pred_code, image in zip(
test_y, pred_test_y, test_z):
label, pred_label = \
codec.inverse_transform([code, pred_code])
text = '{} {} {}'.format(
label,
'==' if code == pred_code else '!=',
pred_label)
# 将文本写在图片上
cv.putText(image, text, (10, 60),
cv.FONT_HERSHEY_SIMPLEX, 2,
(255, 255, 255), 6)
cv.imshow('Recognizing...', image)
if cv.waitKey(1000) == 27:
escape = True
break