操作系统: Windows 11
IDE: PyCharm Community Edition 2022.1.1
Python: 3.9.7
Node.js: v16.14.2
import cv2
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
img = cv2.flip(img, 1)
cv2.imshow('vtuber', picture)
cv2.waitKey(1)
其中,VideoCapture(0)表示内置摄像头,cv2.flip表示把获取到的帧进行左右翻转。
先定位刚才获取到的视频流中的人脸。
import dlib
detector = dlib.get_frontal_face_detector()
def face_positioning(img):
dets = detector(img, 0)
if not dets:
return None
return max(dets, key=lambda det: (det.right() - det.left()) * (det.bottom() - det.top()))
再提取其中的关键点。
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
def extract_key_points(img, position):
landmark_shape = predictor(img, position)
key_points = []
for i in range(68):
pos = landmark_shape.part(i)
key_points.append(np.array([pos.x, pos.y], dtype=np.float32))
return key_points
先计算面部的关键点。
def generate_points(key_points):
def center(array):
return sum([key_points[i] for i in array]) / len(array)
left_brow = [18, 19, 20, 21]
right_brow = [22, 23, 24, 25]
chin = [6, 7, 8, 9, 10]
nose = [29, 30]
return center(left_brow + right_brow), center(chin), center(nose)
再利用这些关键点计算头部旋转角度。
def generate_features(contruction_points):
brow_center, chin_center, nose_center = contruction_points
mid_edge = brow_center - chin_center
bevel_edge = brow_center - nose_center
mid_edge_length = np.linalg.norm(mid_edge)
horizontal_rotation = np.cross(
mid_edge, bevel_edge) / mid_edge_length ** 2
vertical_rotation = mid_edge @ bevel_edge / mid_edge_length ** 2
return np.array([horizontal_rotation, vertical_rotation])
之后汇总一下前面的函数,就可以完成从获取摄像头中的人脸到计算出人脸的头部特征的全部过程了。
def extract_img_features(img):
face_position = face_positioning(img)
if not face_position:
cv2.imshow('self', img)
cv2.waitKey(1)
return None
key_points = extract_key_points(img, face_position)
for i, (p_x, p_y) in enumerate(key_points):
cv2.putText(img, str(i), (int(p_x), int(p_y)), cv2.FONT_HERSHEY_COMPLEX, 0.25, (255, 255, 255))
construction_points = generate_points(key_points)
for i, (p_x, p_y) in enumerate(construction_points):
cv2.putText(img, str(i), (int(p_x), int(p_y)), cv2.FONT_HERSHEY_COMPLEX, 0.25, (255, 255, 255))
rotation = generate_features(construction_points)
cv2.putText(img, str(rotation), (int(construction_points[-1][0]), int(construction_points[-1][1])), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255))
cv2.imshow('self', img)
return rotation
根据前几步识别到的人脸特征,画出小人。
def draw_image(h_rotation, v_rotation):
img = np.ones([512, 512], dtype=np.float32)
face_length = 200
center = 256, 256
left_eye = int(220 - h_rotation *
face_length), int(249 + v_rotation * face_length)
right_eye = int(292 - h_rotation *
face_length), int(249 + v_rotation * face_length)
month = int(256 - h_rotation * face_length /
2), int(310 + v_rotation * face_length / 2)
cv2.circle(img, center, 100, 0, 1)
cv2.circle(img, left_eye, 15, 0, 1)
cv2.circle(img, right_eye, 15, 0, 1)
cv2.circle(img, month, 5, 0, 1)
return img, left_eye, right_eye, month
将前面的步骤一一调用,然后将左眼、右眼、嘴巴的坐标封装成json,并保存到vue3服务器所在的目录,人脸位姿检测就完成了。
def face_set():
cap = cv2.VideoCapture(0)
left_eyes = []
right_eyes = []
months = []
ORIGIN_FEATURE_GROUP = [-0.00899233, 0.39529446]
FEATURE_GROUP = [0, 0]
i = 0
while True:
ret, img = cap.read()
img = cv2.flip(img, 1)
NEW_FEATURE_GROUP = extract_img_features(img)
if NEW_FEATURE_GROUP is not None:
FEATURE_GROUP = NEW_FEATURE_GROUP - ORIGIN_FEATURE_GROUP
HORI_ROTATION, VERT_ROTATION = FEATURE_GROUP
picture, left_eye, right_eye, month = draw_image(HORI_ROTATION, VERT_ROTATION)
left_eyes.append(left_eye)
right_eyes.append(right_eye)
months.append(month)
cv2.imshow('vtuber', picture)
cv2.waitKey(1)
response_object = {"status": "success"}
if cv2.waitKey(1) & 0xFF == ord('q'):
print("close")
break
i += 1
data = []
data.append({
"left_eye": left_eyes[-100::10],
"right_eye": right_eyes[-100::10],
"month": months[-100::10]
})
response_object['courses'] = data
with open('C:/Users/用户名/Nodejs/exp3/public/data.json', 'w') as f:
json.dump(data, f)
return jsonify(response_object)
App.vue长这样:
BarChart.vue长这样:
左眼x坐标
左眼y坐标
右眼x坐标
右眼y坐标
嘴巴x坐标
嘴巴y坐标
最终结果如下图所示: