-------让系统认识我-------
-------人脸识别系统-------
---VERSION1:基于DLIB实现人脸识别
---文章来自我的Github:https://github.com/zhenghaozhang123/dlib_face_recognize
---缺点:
1.判定是否同一个人的阈值难以确定。adas
2.模型适合小型人脸数据库,一旦人脸数据库人数过多,此处的阈值更s加难以确定
3.同时,一旦数据库新的人员进入,需要重新调整阈值,不符合实际产品落实
---优点:
方便简单,可以用来实现小型数据库样本的识别。比如:想要通过电脑摄像头来实现人脸开机,只需要将单独个人的人脸录入单个人脸数据库。
此处为版本1.通过DLIB,向量间欧式距离,来实现人脸识别系统。
需要下载的我dat(人脸鉴定过程需要的包):
shape_predictor_5_face_landmarks.dat -----人脸鉴别基于5个特征点。(https://download.csdn.net/download/u010039305/10413357?utm_source=bbsseo 为下载地址)
shape_predictor_68_face_landmarks.dat -----人脸鉴别基于68个特征点(https://download.csdn.net/download/baiyu_king/10427803 为下载地址)
dlib_face_recognition_resnet_model_v1 -----人脸128D特征转换包 (建议如果想让模型更加准确,自己进行人脸特征转换,最常见为通过卷积神经网络来实现特征取值,后续会讲)(https://download.csdn.net/download/googler_offer/10190598 为下载地址)
现在我们来学如下代码:
get_from_camera.py
通过电脑摄像头实现对自己人脸的抓取,并储存在个人人脸数据库中,以备后面进行识别
import dlib # 人脸识别的库dlib
import numpy as np # 数据处理的库numpy
import cv2 # 图像处理的库OpenCv
# dlib预测器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('D:/yoorstore/face_recognize1/shape_predictor_68_face_landmarks.dat')
# 创建cv2摄像头对象
cap = cv2.VideoCapture(0)
# cap.set(propId, value)
# 设置视频参数,propId设置的视频参数,value设置的参数值
cap.set(3, 480)
# 截图screenshoot的计数器
cnt_ss = 0
# 人脸截图的计数器
cnt_p = 0
# 保存
path_save = "F:/code/python/P_dlib_face_reco/data/get_from_camera/"
# cap.isOpened() 返回true/false 检查初始化是否成功
while cap.isOpened():
# cap.read()
# 返回两个值:
# 一个布尔值true/false,用来判断读取视频是否成功/是否到视频末尾
# 图像对象,图像的三维矩阵q
flag, im_rd = cap.read()
# 每帧数据延时1ms,延时为0读取的是静态帧
kk = cv2.waitKey(1)
# 取灰度
img_gray = cv2.cvtColor(im_rd, cv2.COLOR_RGB2GRAY)
# 人脸数rects
rects = detector(img_gray, 0)
# print(len(rects))
# 待会要写的字体
font = cv2.FONT_HERSHEY_SIMPLEX
if len(rects) != 0:
# 检测到人脸
# 矩形框
for k, d in enumerate(rects):
# 计算矩形大小
# (x,y), (宽度width, 高度height)
pos_start = tuple([d.left(), d.top()])
pos_end = tuple([d.right(), d.bottom()])
# 计算矩形框大小
height = d.bottom() - d.top()
width = d.right() - d.left()
# 根据人脸大小生成空的图像
cv2.rectangle(im_rd, tuple([d.left(), d.top()]), tuple([d.right(), d.bottom()]), (0, 255, 255), 2)
im_blank = np.zeros((height, width, 3), np.uint8)
# 按下's'保存摄像头中的人脸到本地
if kk == ord('s'):
cnt_p += 1
for ii in range(height):
for jj in range(width):
im_blank[ii][jj] = im_rd[d.top() + ii][d.left() + jj]
# 存储人脸图像文件
cv2.imwrite(path_save + "img_face_" + str(cnt_p) + ".jpg", im_blank)
print("写入本地:", path_save + "img_face_" + str(cnt_p) + ".jpg")
# 显示人脸数
cv2.putText(im_rd, "faces: " + str(len(rects)), (20, 50), font, 1, (0, 0, 255), 1, cv2.LINE_AA)
else:
# 没有检测到人脸
cv2.putText(im_rd, "no face", (20, 50), font, 1, (0, 0, 255), 1, cv2.LINE_AA)
# 添加说明
im_rd = cv2.putText(im_rd, "s: save face", (20, 400), font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
im_rd = cv2.putText(im_rd, "q: quit", (20, 450), font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
# 按下q键退出
if kk == ord('q'):
break
# 窗口显示
# cv2.namedWindow("camera", 0) # 如果需要摄像头窗口大小可调
cv2.imshow("camera", im_rd)
# 释放摄像头
cap.release()
# 删除建立的窗口
cv2.destroyAllWindows()
read_csv_128D_feature.py
1.对摄像头抓拍下来的自己的人脸数据库进行特征转换,将自己的每一张人脸照片转换为128D特征;
2.计算完每一张人脸照片128D值后,进行取平均值。取平均值的目的在于:将所有的照片进行一次平均,行成平均脸。可以排除部分照片将自己的人脸某部分特征放大/放小。好比,如果将10岁的你的照片和30岁的你的照片单独分开进行对照。就很难区分是否是本人,但是如果将两张照片进行一次平均,形成此人地平均脸(10岁的你特征占一部分,30岁的你特征占一部分),就可以更为清晰地认出是本人。
import cv2
import os
import dlib
from skimage import io
import csv
import numpy as np
import pandas as pd
path_pics = "D:/yoorstore/face_recognize1/get_from_camera/"
path_csv = "D:/yoorstore/face_recognize1/csvs/"
# detector to find the faces
detector = dlib.get_frontal_face_detector()
# shape predictor to find the face landmarks
predictor = dlib.shape_predictor("D:/yoorstore/face_recognize1/shape_predictor_68_face_landmarks.dat")
# face recognition model, the object maps human faces into 128D vectors
facerec = dlib.face_recognition_model_v1("D:/yoorstore/face_recognize1/dlib_face_recognition_resnet_model_v1.dat")
# 返回单张图像的128D特征
def return_128d_features(path_img):
img = io.imread(path_img)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
dets = detector(img_gray, 1)
print("检测的人脸图像:", path_img, "\n")
# 因为有可能截下来的人脸再去检测,检测不出来人脸了
# 所以要确保是 检测到人脸的人脸图像 拿去算特征
if len(dets) != 0:
shape = predictor(img_gray, dets[0])
face_descriptor = facerec.compute_face_descriptor(img_gray, shape)
else:
face_descriptor = 0
print("no face")
# print(face_descriptor)
return face_descriptor
# return_128d_features(path_pics+"img_face_13.jpg")
# 将文件夹中照片特征提取出来,写入csv
# 输入input:
# path_pics: 图像文件夹的路径
# path_csv: 要生成的csv路径
def write_into_csv(path_pics, path_csv):
dir_pics = os.listdir(path_pics)
with open(path_csv, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
for i in range(len(dir_pics)):
# 调用return_128d_features()得到128d特征
print("正在读的人脸图像:", path_pics + dir_pics[i])
features_128d = return_128d_features(path_pics + dir_pics[i])
# print(features_128d)
# 遇到没有检测出人脸的图片跳过
if features_128d == 0:
i += 1
else:
writer.writerow(features_128d)
write_into_csv(path_pics, path_csv + "default_person.csv")
path_csv_rd = "D:/yoorstore/face_recognize1/csvs/default_person.csv"
# 从csv中读取数据,计算128d特征的均值
feature_mean=[]
def compute_the_mean(path_csv_rd):
column_names = []
# 128列特征
for i in range(128):
column_names.append("features_" + str(i + 1))
# 利用pandas读取csv
rd = pd.read_csv(path_csv_rd, names=column_names)
# 存放128维特征的均值
for i in range(128):
tmp_arr = rd["features_" + str(i + 1)]
tmp_arr = np.array(tmp_arr)
# 计算某一个特征的均值
tmp_mean = np.mean(tmp_arr)
feature_mean.append(tmp_mean)
print(feature_mean)
return feature_mean
# compute_the_mean(path_csv_rd)
recognize_sb.py
完成上面两步骤后,就可以通过电脑摄像头的拍摄,来判定是不是本人。此处的dist就是阈值。
import dlib # 人脸识别的库dlib
import numpy as np # 数据处理的库numpy
import cv2 # 图像处理的库OpenCv
# face recognition model, the object maps human faces into 128D vectors
facerec = dlib.face_recognition_model_v1("dlib_face_recognition_resnet_model_v1.dat")
# 计算两个向量间的欧式距离
def return_euclidean_distance(feature_1, feature_2):
feature_1 = np.array(feature_1)
feature_2 = np.array(feature_2)
dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))
print(dist)
if dist > 0.4:
return "diff"
else:
return "same"
#此处是我的平均dist
features_mean_default_person = [-0.030892765492592986, 0.13333227054068916, 0.054221574805284799, -0.050820438289328626,
-0.056331159841073189, 0.0039378538311116004, -0.044465327145237675,
-0.13096490031794497, 0.14215188983239627, -0.084465635842398593, 0.34389359700052363,
-0.062936659118062566, -0.24372901571424385, -0.13270603316394905, -0.0472818422866495,
0.15475224742763921, -0.24415240554433121, -0.11213862150907516, 0.032288033417180964,
0.023676671577911628, 0.098508275653186594, -0.010117797634417289,
0.0048202000815715448, -0.014808513420192819, -0.060100053486071135,
-0.34934839135722112, -0.095795629448012301, -0.050788544706608117,
0.032316677762489567, -0.099673464894294739, -0.080181991975558434,
0.096361607705291952, -0.1823408101734362, -0.045472671817007815,
-0.0066827326326778062, 0.047393877549391041, -0.038414973079373964,
-0.039067085930391363, 0.15961966781239761, 0.0092458106136243598, -0.16182226570029007,
0.026322136191945327, -0.0039144184832510193, 0.2492692768573761, 0.19180528427425184,
0.022950534855848866, -0.019220497949342979, -0.15331173021542399, 0.047744840089427795,
-0.17038608616904208, 0.026140184680882254, 0.19366614363695445, 0.066497623724372762,
0.07038829416820877, -0.0549700813073861, -0.11961311768544347, -0.032121153940495695,
0.083507449611237169, -0.14934051350543373, 0.011458799806668571, 0.10686114273573223,
-0.10744074888919529, -0.04377919611962218, -0.11030520381111848, 0.20804878441910996,
0.093076545941202266, -0.11621182490336268, -0.1991656830436305, 0.10751579348978244,
-0.11251544991606161, -0.12237925866716787, 0.058218707869711672, -0.15829276019021085,
-0.17670038891466042, -0.2718416170070046, 0.034569320955166689, 0.30443575821424784,
0.061833358712886512, -0.19622498672259481, 0.011373612000361868, -0.050225612756453063,
-0.036157087079788507, 0.12961127491373764, 0.13962576616751521, -0.0074232793168017737,
0.020964263007044792, -0.11185114399382942, 0.012502493042694894, 0.17834208513561048,
-0.072658227462517586, -0.041312719401168194, 0.25095899873658228,
-0.056628625839948654, 0.10285118379090961, 0.046701753217923012, 0.042323612264896691,
0.0036216247826814651, 0.066720707440062574, -0.16388990533979317, -0.0193739396421925,
0.027835704435251261, -0.086023958105789985, -0.05472404568603164, 0.14802298341926776,
-0.10644183582381199, 0.098863413851512108, 0.00061285014778963834,
0.062096107555063146, 0.051960245755157973, -0.099548895108072383,
-0.058173993112225285, -0.065454461562790375, 0.14721672511414477, -0.25363486848379435,
0.20384312381869868, 0.16890435312923632, 0.097537552447695477, 0.087824966562421697,
0.091438713434495431, 0.093809676797766431, -0.034379941362299417,
-0.085149037210564868, -0.24900743130006289, 0.021165960517368819, 0.076710369830068792,
-0.0061752907196549996, 0.028413473285342519, -0.029983982541843465]
# dlib预测器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
# 创建cv2摄像头对象
cap = cv2.VideoCapture(0)
# cap.set(propId, value)
# 设置视频参数,propId设置的视频参数,value设置的参数值
cap.set(3, 480)
# 返回单张图像的128D特征
def get_128d_features(img_gray):
dets = detector(img_gray, 1)
if len(dets) != 0:
shape = predictor(img_gray, dets[0])
face_descriptor = facerec.compute_face_descriptor(img_gray, shape)
else:
face_descriptor = 0
return face_descriptor
# cap.isOpened() 返回true/false 检查初始化是否成功
while cap.isOpened():
# cap.read()
# 返回两个值:
# 一个布尔值true/false,用来判断读取视频是否成功/是否到视频末尾
# 图像对象,图像的三维矩阵
flag, im_rd = cap.read()
# 每帧数据延时1ms,延时为0读取的是静态帧
kk = cv2.waitKey(1)
# 取灰度
img_gray = cv2.cvtColor(im_rd, cv2.COLOR_RGB2GRAY)
# 人脸数dets
dets = detector(img_gray, 0)
# 待会要写的字体
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(im_rd, "q: quit", (20, 400), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
if len(dets) != 0:
# 检测到人脸
# 将捕获到的人脸提取特征和内置特征进行比对
features_rd = get_128d_features(im_rd) #将摄像头拍摄到的人脸转换成128_d特征
compare = return_euclidean_distance(features_rd, features_mean_default_person)#与提前设定好的数据库平均128D做对比。
# 让人名跟随在矩形框的下方
# 确定人名的位置坐标
pos_text_1 = tuple([dets[0].left(), int(dets[0].bottom()+(dets[0].bottom()-dets[0].top())/4)])
im_rd = cv2.putText(im_rd, compare.replace("same", "default_person"), pos_text_1, font, 0.8, (0, 255, 255), 1, cv2.LINE_AA)
# 矩形框
for k, d in enumerate(dets):
# 绘制矩形框
im_rd = cv2.rectangle(im_rd, tuple([d.left(), d.top()]), tuple([d.right(), d.bottom()]), (0, 255, 255), 2)
cv2.putText(im_rd, "faces: " + str(len(dets)), (20, 50), font, 1, (0, 0, 255), 1, cv2.LINE_AA)
else:
# 没有检测到人脸
cv2.putText(im_rd, "no face", (20, 50), font, 1, (0, 0, 255), 1, cv2.LINE_AA)
# 按下q键退出
if kk == ord('q'):
break
# 窗口显示
cv2.imshow("camera", im_rd)
# 释放摄像头
cap.release()
# 删除建立的窗口
cv2.destroyAllWindows()
阈值的确定:
1.对于单样本数据库,可以通过给dist设定一个范围,来查看精确度。来选定dist值。(需要用多张照片进行比对得出此值)
2.之所以说此模型只适合于单样本数据库,因为一旦我们的数据库录入多人系统, 我们设定的dist值就会大波动,而不是小范围变动。好比A同学的最佳dist值在0.4,B同学的最佳dist值在0.7.然后我们会进行取平均值5.5。此处的5.5明显是合适的,会让结果非常的不准确。所以此模型不适用于多样本数据库。
备注:这个模型是做系统识别时候的个人见解,如果存在知识点上的误导,希望大家可以email来联系我:[email protected]