代码参考:https://download.csdn.net/download/weixin_55771290/87430422
windows hello 的低阶板本,没有 Windows hello 的 3D 景深镜头,因此这是一个基于图片的识别机主的程序。 具体运行时,解锁时,判断是否是本人;若不是本人或无人(10s),锁屏;若是本人,正常使用;(采取无密码原始界面)
人脸的检测采取 opencv cv2.CascadeClassifier
关于模型则采取
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 62, 62, 128) 3584
_________________________________________________________________
conv2d_1 (Conv2D) (None, 60, 60, 64) 73792
_________________________________________________________________
flatten (Flatten) (None, 230400) 0
_________________________________________________________________
dense (Dense) (None, 40) 9216040
=================================================================
Total params: 9,293,416
Trainable params: 9,293,416
Non-trainable params: 0
_________________________________________________________________
None
基础需要由四部分组成。
face_1.py |
face_2.py |
face_3.py |
face_4.py |
制作自己人脸训练数据 |
由 face_1.py 和 face_2.py 制作的数据来进行 CNN 深度学习,并保存模型 |
由已知其他人脸来制作数据 |
最后的检测程序 |
主要是在 tensorflow2.0-gpu 下运行; 这里略微吐槽下 tensorflow2.0.keras 模块部分无提示,对于新手不太友好。 conda list:
Name |
Version |
Build Channel |
_tflow_select |
2.1.0 |
gpu |
absl-py |
0.8.1 |
py37_0 |
altgraph |
0.17 |
pypi_0 pypi |
astor |
0.8.0 |
py37_0 |
astroid |
2.3.3 |
py37_0 |
backcall |
0.1.0 |
py37_0 |
blas |
1.0 |
mkl |
ca-certificates |
2019.11.27 |
0 |
certifi |
2019.11.28 |
py37_0 |
colorama |
0.4.3 |
py_0 |
cudatoolkit |
10.0.130 |
0 |
cudnn |
7.6.5 |
cuda10.0_0 |
cycler |
0.10.0 |
pypi_0 pypi |
decorator |
4.4.1 |
py_0 |
future |
0.18.2 |
pypi_0 pypi |
gast |
0.2.2 |
py37_0 |
google-pasta |
0.1.8 |
py_0 |
grpcio |
1.16.1 |
py37h351948d_1 |
h5py |
2.9.0 |
py37h5e291fa_0 |
hdf5 |
1.10.4 |
h7ebc959_0 |
icc_rt |
2019.0.0 |
h0cc432a_1 |
intel-openmp |
2019.4 |
245 |
ipykernel |
5.1.3 |
py37h39e3cac_1 |
ipython |
7.11.1 |
py37h39e3cac_0 |
ipython_genutils |
0.2.0 |
py37_0 |
isort |
4.3.21 |
py37_0 |
jedi |
0.15.2 |
py37_0 |
joblib |
0.14.1 |
py_0 |
jupyter_client |
5.3.4 |
py37_0 |
jupyter_core |
4.6.1 |
py37_0 |
keras |
2.3.1 |
pypi_0 pypi |
keras-applications |
1.0.8 |
py_0 |
keras-preprocessing |
1.1.0 |
py_1 |
kiwisolver |
1.1.0 |
pypi_0 pypi |
lazy-object-proxy |
1.4.3 |
py37he774522_0 |
libprotobuf |
3.11.2 |
h7bd577a_0 |
libsodium |
1.0.16 |
h9d3ae62_0 |
Markdown |
3.1.1 |
py37_0 |
matplotlib |
3.1.2 |
pypi_0 pypi |
mccabe |
0.6.1 |
py37_1 |
mkl |
2019.4 |
245 |
mkl-service |
2.3.0 |
py37hb782905_0 |
mkl_fft |
1.0.15 |
py37h14836fe_0 |
mkl_random |
1.1.0 |
py37h675688f_0 |
mouseinfo |
0.1.2 |
pypi_0 pypi |
numpy |
1.17.4 |
py37h4320e6b_0 |
numpy-base |
1.17.4 |
py37hc3f5095_0 |
opencv-python |
4.1.2.30 |
pypi_0 pypi |
openssl |
1.1.1d |
he774522_3 |
opt_einsum |
3.1.0 |
py_0 |
pandas |
0.25.3 |
pypi_0 |
parso |
0.5.2 |
py_0 |
pefile |
2019.4.18 |
pypi_0 |
pickleshare |
0.7.5 |
py37_0 |
pillow |
7.0.0 |
pypi_0 |
pip |
19.3.1 |
py37_0 |
prompt_toolkit |
3.0.2 |
py_0 |
Protobuf |
3.11.2 |
py37h33f27b4_0 |
pyautogui |
0.9.48 |
pypi_0 pypi |
pygetwindow |
0.0.8 |
pypi_0 pypi |
pygments |
2.5.2 |
py_0 |
pyinstaller |
3.6 |
pypi_0 pypi |
pylint |
2.4.4 |
py37_0 |
pymsgbox |
1.0.7 |
pypi_0 pypi |
pyparsing |
2.4.6 |
pypi_0 pypi |
pyperclip |
1.7.0 |
pypi_0 pypi |
pyreadline |
2.1 |
py37_1 |
pyrect |
0.1.4 |
pypi_0 pypi |
pyscreeze |
0.1.26 |
pypi_0 pypi |
python |
3.7.6 |
h60c2a47_2 |
python-dateutil |
2.8.1 |
py_0 |
pytweening |
1.0.3 |
pypi_0 pypi |
pytz |
2019.3 |
pypi_0 pypi |
pywin32 |
227 |
py37he774522_1 |
pywin32-ctypes |
0.2.0 |
pypi_0 pypi |
pyyaml |
5.3 |
pypi_0 pypi |
pyzmq |
18.1.0 |
py37ha925a31_0 |
scikit-learn |
0.22.1 |
py37h6288b17_0 |
scipy |
1.3.2 |
py37h29ff71c_0 |
setuptools |
44.0.0 |
py37_0 |
six |
1.13.0 |
py37_0 |
SQLite |
3.30.1 |
he774522_0 |
tensorboard |
2.0.0 |
pyhb38c66f_1 |
tensorflow |
2.0.0 |
gpu_py37h57d29ca_0 |
tensorflow-base |
2.0.0 |
gpu_py37h390e234_0 |
tensorflow-estimator |
2.0.0 |
pyh2649769_0 |
tensorflow-gpu |
2.0.0 |
h0d30ee6_0 |
termcolor |
1.1.0 |
py37_1 |
tornado |
6.0.3 |
py37he774522_0 |
traitlets |
4.3.3 |
py37_0 |
vc |
14.1 |
h0510ff6_4 |
vs2015_runtime |
14.16.27012 |
hf0eaf9b_1 |
wcwidth |
0.1.7 |
py37_0 |
werkzeug |
0.16.0 |
py_0 |
wheel |
0.33.6 |
py37_0 |
wincertstore |
0.2 |
py37_0 |
wrapt |
1.11.2 py37he774522_0 |
|
zeromq |
4.3.1 |
h33f27b4_3 |
zlib |
1.2.11 |
h62dcd97_3 |
人脸数据存储至 my_faces 可自己命名
# 制作自己人脸数据fromcv2importcv2importosimportsysimportrandomout_dir='./my_faces'ifnotos.path.exists(out_dir):os.makedirs(out_dir)# 改变亮度与对比度defrelight(img,alpha=1,bias=0):w=img.shape[1]h=img.shape[0]#image = []foriinrange(0,w):forjinrange(0,h):forcinrange(3):tmp=int(img[j,i,c]*alpha+bias)iftmp>255:tmp=255eliftmp<0:tmp=0img[j,i,c]=tmpreturnimg# 获取分类器haar=cv2.CascadeClassifier(r'E:\ProgramData\Anaconda3\envs\tenserflow02\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')# 打开摄像头 参数为输入流,可以为摄像头或视频文件camera=cv2.VideoCapture(0)n=1while1:if(n<=5000):print('It`s processing %s image.'%n)# 读帧success,img=camera.read()gray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)faces=haar.detectMultiScale(gray_img,1.3,5)forf_x,f_y,f_w,f_hinfaces:face=img[f_y:f_y+f_h,f_x:f_x+f_w]face=cv2.resize(face,(64,64))face=relight(face,random.uniform(0.5,1.5),random.randint(-50,50))cv2.imshow('img',face)cv2.imwrite(out_dir+'/'+str(n)+'.jpg',face)n+=1key=cv2.waitKey(30)&0xffifkey==27:breakelse:break
需要收集一个其他人脸的图片集,只要不是自己的人脸都可以,可以在网上找到,这里我给出一个我用到的图片集: 网站地址:http://vis-www.cs.umass.edu/lfw/ 图片集下载:http://vis-www.cs.umass.edu/lfw/lfw.tgz 先将下载的图片集,解压到项目目录下的 lfw 目录下,也可以自己指定目录(修改代码中的 input_dir 变量)
# -*- codeing: utf-8 -*-importsysimportosfromcv2importcv2input_dir='./lfw'output_dir='./other_faces'size=64ifnotos.path.exists(output_dir):os.makedirs(output_dir)defclose_cv2():"""删除cv窗口"""while(1):if(cv2.waitKey(100)==27):breakcv2.destroyAllWindows()# 获取分类器haar=cv2.CascadeClassifier(r'E:\ProgramData\Anaconda3\envs\tenserflow02\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')index=1for(path,dirnames,filenames)inos.walk(input_dir):forfilenameinfilenames:iffilename.endswith('.jpg'):print('Being processed picture %s'%index)img_path=path+'/'+filename# # 从文件读取图片print(img_path)img=cv2.imread(img_path)# cv2.imshow(" ",img)# close_cv2()# 转为灰度图片gray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)faces=haar.detectMultiScale(gray_img,1.3,5)forf_x,f_y,f_w,f_hinfaces:face=img[f_y:f_y+f_h,f_x:f_x+f_w]face=cv2.resize(face,(64,64))# face = relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50))cv2.imshow('img',face)cv2.imwrite(output_dir+'/'+str(index)+'.jpg',face)index+=1key=cv2.waitKey(30)&0xffifkey==27:sys.exit(0)
读取上文的 my_faces 和 other_faces 文件夹下的训练数据进行训练
# -*- codeing: utf-8 -*-from__future__importabsolute_import,division,print_functionimporttensorflowastffromcv2importcv2importnumpyasnpimportosimportrandomimportsysfromsklearn.model_selectionimporttrain_test_splitfromsklearn.metricsimportclassification_report# from keras import backend as KdefgetPaddingSize(img):h,w,_=img.shapetop,bottom,left,right=(0,0,0,0)longest=max(h,w)ifw
# 识别自己from__future__importabsolute_import,division,print_functionimporttensorflowastffromcv2importcv2importosimportsysimportrandomimportnumpyasnpfromsklearn.model_selectionimporttrain_test_splitfromsklearn.metricsimportclassification_reportfromsklearn.metricsimportcohen_kappa_scorefromctypesimport*importtimeimportsysdefgetPaddingSize(img):h,w,_=img.shapetop,bottom,left,right=(0,0,0,0)longest=max(h,w)ifw255:tmp=255eliftmp<0:tmp=0img[j,i,c]=tmpreturnimgout_dir='./temp_faces'ifnotos.path.exists(out_dir):os.makedirs(out_dir)# 获取分类器haar=cv2.CascadeClassifier(r'E:\ProgramData\Anaconda3\envs\tenserflow02\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')# 打开摄像头 参数为输入流,可以为摄像头或视频文件camera=cv2.VideoCapture(0)n=1start=time.clock()while1:if(n<=20):print('It`s processing %s image.'%n)# 读帧success,img=camera.read()gray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)faces=haar.detectMultiScale(gray_img,1.3,5)forf_x,f_y,f_w,f_hinfaces:face=img[f_y:f_y+f_h,f_x:f_x+f_w]face=cv2.resize(face,(64,64))# face = relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50))cv2.imshow('img',face)cv2.imwrite(out_dir+'/'+str(n)+'.jpg',face)n+=1key=cv2.waitKey(30)&0xffifkey==27:breakend=time.clock()print(str(end-start))if(end-start)>10:user32=windll.LoadLibrary('user32.dll')user32.LockWorkStation()sys.exit()else:breakmy_faces_path=out_dirsize=64imgs=[]labs=[]imgs,labs=readData(my_faces_path,size,size,imgs,labs)# 将图片数据与标签转换成数组imgs=np.array(imgs)# labs = np.array([[0,1] if lab == my_faces_path else [1,0] for lab in labs])labs=np.array([[1]iflab==my_faces_pathelse[0]forlabinlabs])# 随机划分测试集与训练集train_x,test_x,train_y,test_y=train_test_split(imgs,labs,test_size=0.9,random_state=random.randint(0,100))# 参数:图片数据的总数,图片的高、宽、通道train_x=train_x.reshape(train_x.shape[0],size,size,3)test_x=test_x.reshape(test_x.shape[0],size,size,3)# 将数据转换成小于1的数train_x=train_x.astype('float32')/255.0test_x=test_x.astype('float32')/255.0restored_model=tf.keras.models.load_model(r'C:\Users\Administrator\Desktop\my_model.h5')pre_result=restored_model.predict_classes(test_x)print(pre_result.shape)print(pre_result)acc=sum(pre_result==1)/pre_result.shape[0]print("相似度: "+str(acc))ifacc>0.8:print("你是***")else:user32=windll.LoadLibrary('user32.dll')user32.LockWorkStation()
激活 Anaconda 环境 切 CD 至 face_4.py 的位置
call activate tensorflow02
cd /d E:\ziliao\LearningPy\face
python face_4.py
Set ws = CreateObject("Wscript.Shell")
ws.run "cmd /c E:\ziliao\LearningPy\face\myface.bat",vbhide
创建任务
常规中 |
触发器 |
操作 |
最高权限 选择对应系统 win10 |
添加 工作站解锁时 |
添加 hide.vbs |