基于卷积神经网络的笑脸数据集训练

人工智能机器学习大作业


实验内容

1 理解人脸图像特征提取的各种方法(至少包括HoG、Dlib和卷积神经网络特征);2. 掌握笑脸数据集(genki4k)正负样本的划分、模型训练和测试的过程(至少包括SVM、CNN),输出模型训练精度和测试精度(F1-score和ROC);3. 完成一个摄像头采集自己人脸、并对表情(笑脸和非笑脸)的实时分类判读(输出分类文字)的程序;4. 将笑脸数据集换成口罩数据集,完成对口罩佩戴与否的模型训练,采取合适的特征提取方法,重新做上述2-3部。

2 完成实验报告和技术报告,技术报告写入博客,提交博客地址到学习通,Markdown和代码发邮件。实验报告按照最初的doc实验报告模版格式撰写(实验类型写“综合性、创新性”),也提交至邮箱。

笑脸数据集(genki4k)正负样本的划分、模型训练和测试的过程
训练数据集

import keras
keras.__version__
import os, shutil
# The path to the directory where the original
# dataset was uncompressed
riginal_dataset_dir = 'C:\\Users\\Desktop\\genki4k'
# The directory where we will
# store our smaller dataset
base_dir = 'C:\\Users\\Desktop\\genki4k\\smile_and_unsmile'
os.mkdir(base_dir)
# Directories for our training,
# validation and test splits
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir,
'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
# Directory with our training smile pictures
train_smile_dir = os.path.join(train_dir, 'smile')
os.mkdir(train_smile_dir)
# Directory with our training unsmile pictures
train_unsmile_dir = os.path.join(train_dir,
'unsmile')
os.mkdir(train_unsmile_dir)
#Directory with our validation smile pictures
validation_smile_dir = os.path.join(validation_dir,
'smile')
os.mkdir(validation_smile_dir)
# Directory with our validation unsmile pictures
validation_unsmile_dir =
os.path.join(validation_dir, 'unsmile')
os.mkdir(validation_unsmile_dir)
# Directory with our validation smile pictures
test_smile_dir = os.path.join(test_dir, 'smile')
os.mkdir(test_smile_dir)
# Directory with our validation unsmile pictures
test_unsmile_dir = os.path.join(test_dir,
'unsmile')
os.mkdir(test_unsmile_dir)

** 构建小型卷积神经网络**

from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3),
activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3),
activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3),
activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3),
activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

数据预处理

from keras.preprocessing.image import
ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator =train_datagen.flow_from_directory(
#This is the target directory
train_dir,
# All
images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
#Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator =
test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')

摄像头笑脸识别

import cv2
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
import dlib
from PIL import Image
model = load_model('smile_and_unsmile_2.h5')
detector = dlib.get_frontal_face_detector()
video=cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
def rec(img):
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
dets=detector(gray,1)
if dets is not None:
for face in dets:
left=face.left()
top=face.top()
right=face.right()
bottom=face.bottom()
cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2)
img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150))
img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)
img1 = np.array(img1)/255.
img_tensor = img1.reshape(-1,150,150,3)
prediction =model.predict(img_tensor)   
print(prediction)
if prediction[0][0]>0.5:
result='unsmile'
else:
result='smile'
cv2.putText(img, result, (left,top), font, 2, (0, 255, 0), 2,
cv2.LINE_AA)
cv2.imshow('Video', img)
while video.isOpened():
res,
img_rd = video.read()
if not
res:
break
rec(img_rd)
if
cv2.waitKey(1) & 0xFF == ord('q'):
 break
video.release()
cv2.destroyAllWindows()

佩戴口罩人脸识别

import cv2
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
import dlib
from PIL import Image
model=load_model('C:\\Users\\UHS\\Desktop\\test\\smile_and_nosmile_1.h5')
detector = dlib.get_frontal_face_detector()
video=cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
def rec(img):
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
dets=detector(gray,1)
if dets is not None:
for face in dets:
left=face.left()
top=face.top()
right=face.right()
bottom=face.bottom()
cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2)
img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150)
img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)
img1 =np.array(img1)/255.
img_tensor =img1.reshape(-1,150,150,3)
prediction=model.predict(img_tensor)    
print(prediction)
if prediction[0][0]>0.5:
result='mask'
else:
result='nomask'
v2.putText(img,result, (left,top), font, 2, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow('maskdetector', img)
while video.isOpened():
res, img_rd =video.read()
if not res:
break
rec(img_rd)
if cv2.waitKey(1)
& 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()

实验体会
本次实验确实有点困难,结合百度等软件我找到了很多资料,同时在物联网工程实践上进一步改进,感觉比较困难,而且因为时间问题,录入的图片也比较少,所以最后结果不是很精确,口罩如果挡住面部大部分了,就不容易识别。
参考文献

https://blog.csdn.net/weixin_42444684/article/details/107284661

你可能感兴趣的:(深度学习,机器学习)