这里写目录标题
- 只有眨眼
- 重新编写
- 改为摄像头
- 加上张嘴
import cv2 as cv
import dlib
import imutils
import numpy as np
from imutils import face_utils as fu
def eye_aspect_ratio(eye):
# 计算上下的欧式距离
a = np.linalg.norm(eye[1] - eye[5])
b = np.linalg.norm(eye[2] - eye[4])
# 计算左右的欧式距离
c = np.linalg.norm(eye[0] - eye[3])
# 计算纵横比并返回
d = (a + b) / (2.0 * c)
return d
COUNTER = 0
TOTAL = 0
# 打开摄像头
cap = cv.VideoCapture(0, cv.CAP_DSHOW)
# 建立人脸检测器
detector = dlib.get_frontal_face_detector()
# 建立68关键点检测器
predictor = dlib.shape_predictor('./1.dat')
# 返回人脸的左眼和右眼在68关键点中的起始和结束
(lStart, lEnd) = fu.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = fu.FACIAL_LANDMARKS_IDXS["right_eye"]
# 只要能正确打开摄像头
while cap.isOpened():
# 获取每一帧
_, frame = cap.read()
# 设置输出宽度
frame = imutils.resize(frame, width=750)
# 转变为灰度图加快识别
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# 返回这一帧的所有人脸框
faces = detector(gray, 0)
# 遍历所有框框
for face in faces:
# 返回该框的68个坐标
shape = predictor(gray, face)
# 转变为坐标矩阵
shape = fu.shape_to_np(shape)
# 返回左眼和右眼的坐标
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
# 计算左眼和右眼的纵横比
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# 取平均值
earAVG = (leftEAR + rightEAR) / 2.0
# 计算左眼和右眼的凸包
leftEyeHull = cv.convexHull(leftEye)
rightEyeHull = cv.convexHull(rightEye)
# 圈出凸包,即眼睛的范围
cv.drawContours(frame, [leftEyeHull], -1, (255, 255, 255), 1)
cv.drawContours(frame, [rightEyeHull], -1, (255, 255, 255), 1)
'''
若眼睛纵横比小于0.3 且 连续3帧
则认为闭眼
'''
if earAVG < 0.3:
COUNTER += 1
else:
if COUNTER >= 3:
TOTAL += 1
COUNTER = 0
# 接下来是对输出的处理
# 左上角输出眨眼次数
cv.putText(
frame,
"Blinks:{0}".format(TOTAL),
(10, 20),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
# 紧接实时earAVG
cv.putText(
frame,
"earAVG:{0}".format(earAVG),
(200, 20),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
# 右下角提示退出信息
cv.putText(
frame,
"Press 'Esc' to Quit",
(515, 550),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
# 输出每一帧
cv.imshow('camera', frame)
# 按Esc退出
if cv.waitKey(1) & 0xff == 27:
break
# 关闭所有窗口
cv.destroyAllWindows()
# 释放摄像头
cap.release()
dlib 配置及使用方法
# USAGE
# python detect_blinks.py --shape-predictor shape_predictor_68_face_landmarks.dat --video blink_detection_demo.mp4
# python detect_blinks.py --shape-predictor shape_predictor_68_face_landmarks.dat
import cv2
import dlib
import imutils
from imutils import face_utils
# import the necessary packages
from scipy.spatial import distance as dist
def eye_aspect_ratio(eye):
# compute the euclidean distances between the two sets of
# vertical eye landmarks (x, y)-coordinates 欧氏距离
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# compute the euclidean distance between the horizontal
# eye landmark (x, y)-coordinates
C = dist.euclidean(eye[0], eye[3])
'''
[1] [2]
[0] [3]
[5] [4]
'''
# compute the eye aspect ratio
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
# # 我注释掉的 construct the argument parse and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-p", "--shape-predictor", required=True,
# help="path to facial landmark predictor")
# ap.add_argument("-v", "--video", type=str, default="",
# help="path to input video file")
# args = vars(ap.parse_args())
# define two constants, one for the eye aspect ratio to indicate
# blink and then a second constant for the number of consecutive
# frames the eye must be below the threshold
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 3
# initialize the frame counters and the total number of blinks
COUNTER = 0
TOTAL = 0
# 初始化dlib's face detector (HOG-based),然后创建“面部标志预测器”facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector() # 创建识别器
'''
print("detector:",help(detector))
This object represents a sliding window histogram-of-oriented-gradients based object detector.
此对象表示基于定向梯度的对象检测器的滑动窗口直方图。
'''
# predictor = dlib.shape_predictor(args["shape_predictor"])
predictor = dlib.shape_predictor('./1.dat') # 读取训练好的模型
"""
print("predictor",help(predictor))
This object is a tool that takes in an image region containing some
object and outputs a set of point locations that define the pose of the object.
The classic example of this is human face pose prediction, where you take
an image of a human face as input and are expected to identify the locations of
important facial landmarks such as the corners of the mouth and eyes, tip of the nose, and so forth。
此对象是一个工具,它接受包含某些对象的图像区域,并输出一组定义对象姿势的点位置。
这方面的经典例子是人脸姿势预测,在这里,您可以将人脸的图像作为输入,
并期望识别重要面部标志的位置,如嘴角和眼睛、鼻尖等。
"""
# 分别地抓取人脸的左眼和右眼的坐标 respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# face_utils.FACIAL_LANDMARKS_IDXS:Dictionary that remembers insertion order
# 开始读取视频流
print("[INFO] starting camera stream thread...")
# vs = FileVideoStream(args["video"]).start()
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# 从视频流循环帧
while True:
# if this is a file video stream, then we need to check if
# there any more frames left in the buffer to process
# grab the frame from the threaded video file stream, resize
# it, and convert it to grayscale
# channels)
_, frame = cap.read() # 读取一针
frame = imutils.resize(frame, width=1000) # 设置宽度 ·450
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 创建灰度图识别器 进行识别加快速度
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect) # 进行预测 返回值包括眼睛鼻子嘴的坐标
shape = face_utils.shape_to_np(shape)
# extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
# compute the convex hull for the left and right eye, then
# visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
# check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame counter
if ear < EYE_AR_THRESH:
COUNTER += 1
# otherwise, the eye aspect ratio is not below the blink
# threshold
else:
# if the eyes were closed for a sufficient number of
# then increment the total number of blinks
if COUNTER >= EYE_AR_CONSEC_FRAMES:
TOTAL += 1
# reset the eye frame counter
COUNTER = 0
# draw the total number of blinks on the frame along with
# the computed eye aspect ratio for the frame
cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `Esc` key was pressed, break from the loop
if key == 27:
break
# do a bit of cleanup
cv2.destroyAllWindows()
cap.release()
import cv2 as cv
import dlib
import imutils
import numpy as np
from imutils import face_utils as fu
# 计算眼睛的纵横比
def eye_aspect_ratio(eye):
# 计算上下的欧式距离
a = np.linalg.norm(eye[1] - eye[5])
b = np.linalg.norm(eye[2] - eye[4])
# 计算左右的欧式距离
c = np.linalg.norm(eye[0] - eye[3])
# 计算纵横比并返回
d = (a + b) / (2.0 * c)
return d
# 计算嘴巴的纵横比
def mouth_aspect_ratio(mouth):
# 计算上下的欧式距离
a = np.linalg.norm(mouth[3] - mouth[9]) # 52, 59
b = np.linalg.norm(mouth[14] - mouth[18]) # 63, 67
# 计算左右的欧式距离
c = np.linalg.norm(mouth[0] - mouth[6]) # 49, 55
d = np.linalg.norm(mouth[12] - mouth[16]) # 61, 65
# 计算纵横比并返回
e = (a + b) / (c + d)
return e
# 连续3帧眨眼次数
EYE_COUNTER = 0
# 总共眨眼次数
EYE_TOTAL = 0
# 连续3帧张嘴次数
MOUTH_COUNTER = 0
# 总共张嘴次数
MOUTH_TOTAL = 0
# 打开摄像头
cap = cv.VideoCapture(0, cv.CAP_DSHOW)
# 建立人脸检测器
detector = dlib.get_frontal_face_detector()
# 建立68关键点检测器
predictor = dlib.shape_predictor('./1.dat')
# 返回人脸的左眼和右眼在68关键点中的起始和结束
(lStart, lEnd) = fu.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = fu.FACIAL_LANDMARKS_IDXS["right_eye"]
# 返回人脸的嘴巴在68关键点中的起始和结束
(mStart, mEnd) = fu.FACIAL_LANDMARKS_IDXS["mouth"]
# 只要能正确打开摄像头
while cap.isOpened():
# 获取每一帧
_, frame = cap.read()
# 设置输出宽度
frame = imutils.resize(frame, width=750)
# 转变为灰度图加快识别
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# 返回这一帧的所有人脸框
faces = detector(gray, 0)
# 遍历所有框框
for face in faces:
# 返回该框的68个坐标
shape = predictor(gray, face)
# 转变为坐标矩阵
shape = fu.shape_to_np(shape)
# 返回左眼和右眼的坐标
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
# 计算左眼和右眼的纵横比
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# 取平均值
earAVG = (leftEAR + rightEAR) / 2.0
# 计算左眼和右眼的凸包
leftEyeHull = cv.convexHull(leftEye)
rightEyeHull = cv.convexHull(rightEye)
# 圈出凸包,即眼睛的范围
cv.drawContours(frame, [leftEyeHull], -1, (255, 255, 255), 1)
cv.drawContours(frame, [rightEyeHull], -1, (255, 255, 255), 1)
# 返回嘴巴的坐标
Mouth = shape[mStart:mEnd]
# 计算嘴巴的纵横比
mar = mouth_aspect_ratio(Mouth)
# 计算嘴巴的凸包
MouthHull = cv.convexHull(Mouth)
# 圈出凸包,即嘴巴的范围
cv.drawContours(frame, [MouthHull], -1, (255, 255, 255), 1)
'''
若眼睛纵横比小于0.3 且 连续3帧
则认为闭眼
'''
if earAVG < 0.3:
EYE_COUNTER += 1
else:
if EYE_COUNTER >= 3:
EYE_TOTAL += 1
EYE_COUNTER = 0
'''
若嘴巴纵横比大于0.5 且 连续3帧
则认为打哈欠
'''
if mar > 0.5:
MOUTH_COUNTER += 1
else:
if MOUTH_COUNTER >= 3:
MOUTH_TOTAL += 1
MOUTH_COUNTER = 0
# 接下来是对输出的处理
# 左上角输出眨眼次数
cv.putText(
frame,
"Blinks:{0}".format(EYE_TOTAL),
(10, 20),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
# 紧接实时earAVG
cv.putText(
frame,
"earAVG:{0}".format(earAVG),
(200, 20),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
# 左上角输出打哈欠次数
cv.putText(
frame,
"Yawning:{0}".format(MOUTH_TOTAL),
(10, 50),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
# 紧接实时mar
cv.putText(
frame,
"mar:{0}".format(mar),
(200, 50),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
# 右下角提示退出信息
cv.putText(
frame,
"Press 'Esc' to Quit",
(515, 550),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
# 输出每一帧
cv.imshow('camera', frame)
# 按Esc退出
if cv.waitKey(1) & 0xff == 27:
break
# 关闭所有窗口
cv.destroyAllWindows()
# 释放摄像头
cap.release()