python+OpenCV+TensorFlow实现人脸识别

一个简单的小demo,不太会用GitHub,所以代码直接贴上来。
第一步,获取自己的照片,我们生成10000张,大小都是64*64,下面是生成图片的代码,

import cv2
import os
import sys
import random

out_dir='./my_faces'
if not os.path.exists(out_dir):
    os.makedirs(out_dir)

#改变亮度和对比度
def relight(img,alpha=1,bias=0):
    w, h = img.shape[:2]

    for i in range(0,w):
        for j in range(0,h):
            for c in range(3):
                tmp=int(img[i,j,c]*alpha+bias)
                if tmp>255:
                    tmp=255
                elif tmp<0:
                    tmp=0
                img[i,j,c]=tmp
    return img

#获取分类器
haar=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

#打开摄像头 参数为输入流 可以为摄像头或视频文件
camera =cv2.VideoCapture(0)

n=1
while 1:
    if n<=10000:
        print('It`s processing %s image.' % str(n))
        #读帧
        success,img=camera.read()

        gray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        faces=haar.detectMultiScale(gray_img,1.3,5)
        for f_x,f_y,f_w,f_h in faces:
            face=img[f_y:f_y+f_h,f_x:f_x+f_w]
            face=cv2.resize(face,(64,64))
            '''
            if n%3==1:
                face=relight(face,1,50)
            elif n%3==2:
                face=relight(face,0.5,0)
            '''
            #改变图像的亮度,增加图像的对比性,可以识别不同光源下的人脸
            face=relight(face,random.uniform(0.5,1.5),random.randint(-50,50))
            cv2.imshow('img',face)
            cv2.imwrite(out_dir+'/'+str(n)+'.jpg',face)
            n+=1
        key=cv2.waitKey(30)&0xff
        if key==27:
            break
    else:
        break

第二步、处理其他人的照片,同样大小是64*64,图集是在网上下载的,我也上传上来,先看存放格式
python+OpenCV+TensorFlow实现人脸识别_第1张图片

接下来放代码:
import sys
import os
import cv2

input_path='lfw/lfw'
output_dir='other_faces'

size=64

if not os.path.exists(output_dir):
    os.makedirs(output_dir)

#获取分类器
haar=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

index=1
#返回路径,文件夹名称,文件名称
for (path,dirnames,filenames) in os.walk(input_path):
    for filename in filenames:
        if filename.endswith('.jpg'):
            print('being processed picture %s' % str(index))
            img_path=path+'/'+filename
            # print(img_path)
            #从文件中读取图片
            img=cv2.imread(img_path)
            #转为灰度图片
            gray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            #使用haar 进行人脸检测
            faces=haar.detectMultiScale(gray_img,1.3,5)
            for f_x,f_y,f_w,f_h in faces:
                face =img[f_y:f_y+f_h,f_x:f_x+f_w]
                #统一保存为64*64格式
                face=cv2.resize(face,(size,size))
                cv2.imshow('img',face)
                cv2.imwrite(output_dir+'/'+str(index)+'.jpg',face)
                index+=1

            if index==10000:
                sys.exit(0)
            key=cv2.waitKey(30)&0xff
            if key==27:
                sys.exit(0)

第三步、完成了数据集的处理,我们可以训练自己的模型了。
import tensorflow as tf
import cv2
import numpy as np
import os
import random
import sys
from sklearn.model_selection import train_test_split

my_faces_path='my_faces'
other_faces_path='other_faces'
size=64

imgs=[]
labs=[]

def getPaddingSize(img):
    h,w,_=img.shape
    top,bottom,left,right=(0,0,0,0)
    longest=max(h,w)

    if w# // 表示整除符号
        left=tmp//2
        right=tmp-left
    elif h2
        bottom=tmp-top
    else:
        pass
    return top,bottom,left,right

def readData(path,h=size,w=size):
    for filename in os.listdir(path):
        if filename.endswith('.jpg'):
            filename=path+'/'+filename

            img=cv2.imread(filename)

            top,bottom,left,right=getPaddingSize(img)
            #将图片放大,扩充图片边缘部分
            img=cv2.copyMakeBorder(img,top,bottom,left,right,cv2.BORDER_CONSTANT,value=[0,0,0])
            img=cv2.resize(img,(h,w))

            imgs.append(img)
            #添加标签,根据路径判别类别
            if path==my_faces_path:
                labs.append([0,1])
            else:
                labs.append([1,0])
            # labs.append(path)

readData(my_faces_path)
readData(other_faces_path)
#将图片数据和标签转换成数组
imgs=np.array(imgs)
# print(imgs)

#处理标签
labs=np.array([0,1] if lab==my_faces_path else [1,0] for lab in labs)
# print(labs)

#随机划分测试集和训练集
train_x,test_x,train_y,test_y =train_test_split(imgs,labs,test_size=0.05,random_state=random.randint(0,100))
#参数 图片数据的总数,图片的 高,宽,通道
train_x=train_x.reshape(train_x.shape[0],size,size,3)
test_x=test_x.reshape(test_x.shape[0],size,size,3)

#将数据转换成小于1的数
train_x=train_x.astype('float32')/255.0
test_x=test_x.astype('float32')/255.0

print('train size: %s ,test size:%s' % (len(train_x),len(test_x)))

#分批次 每个批次取100张
batch_size=100
num_batch=len(train_x)//batch_size

x=tf.placeholder(tf.float32,[None,size,size,3])
y_=tf.placeholder(tf.float32,[None,2])

keep_prob_5=tf.placeholder(tf.float32)
keep_prob_75=tf.placeholder(tf.float32)

#随机权值向量
def weightVariable(shape):
    init = tf.random_normal(shape,stddev=0.01)
    return tf.Variable(init)

#随机偏置向量
def baisVariable(shape):
    init=tf.random_normal(shape)
    return tf.Variable(init)

#定义卷积函数
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')

#定义最大池化
def maxPool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

#定义丢失函数
def dropout(x,keep):
    return tf.nn.dropout(x,keep)

#建立cnn训练模型
def cnnLayer():
    #第一层
    #卷积核大小(3,3) 通道是3 输出通道32
    W1=weightVariable([3,3,3,32])
    b1=baisVariable([32])

    #卷积
    conv1=tf.nn.relu(conv2d(x,W1)+b1)
    #池化
    pool1=maxPool(conv1)
    #减少过拟合,随机让某些权值不更新
    drop1=dropout(pool1,keep_prob_5)

    #第二层
    W2=weightVariable([3,3,32,64])
    b2=baisVariable([64])
    conv2=tf.nn.relu(conv2d(drop1,W2)+b2)
    pool2=maxPool(conv2)
    drop2=dropout(pool2,keep_prob_5)

    #第三层
    W3=weightVariable([3,3,64,64])
    b3=baisVariable([64])
    conv3=tf.nn.relu(conv2d(drop2,W3)+b3)
    pool3=maxPool(conv3)
    drop3=dropout(pool3,keep_prob_5)

    #全连接层
    Wf=weightVariable([64*64,512])
    bf=baisVariable([512])
    #将特征图展开
    drop3_flat=tf.reshape(drop3,[-1,8*8*64])
    dense=tf.nn.relu(tf.matmul(drop3_flat,Wf)+bf)
    dropf=dropout(dense,keep_prob_75)

    #输出层
    Wout=weightVariable([512,2])
    bout=weightVariable([2])

    out=tf.add(tf.matmul(dropf,Wout),bout)
    return out

def cnnTrain():
    out =cnnLayer()
    #损失函数
    cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out,labels=y_))
    #优化器
    train_step=tf.train.AdamOptimizer(0.01).minimize(cross_entropy)
    #比较标签是否相等,再求得所有数的平均值
    accuaracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(out,1),tf.argmax(y_,1)),tf.float32))

    #将loss与accuary 保存以供tensorboard使用
    tf.summary.scalar('loss',cross_entropy)
    tf.summary.scalar('accuracy',accuaracy)

    #使用merge_all 可以管理我们的summary 但是容易出错
    merged_summary_op=tf.summary.merge_all()

    #数据保存器初始化
    saver=tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
        #初始化所有向量
        sess.run(tf.global_variables_initializer())
        summary_writer=tf.summary.FileWriter('./tmp',graph=tf.get_default_graph())
        index=0
        for n in range(10):
            #每次取128(batch_size)张图片
            for i in range(num_batch):
                batch_x=train_x[i*batch_size:(i+1)*batch_size]
                batch_y=train_y[i*batch_size:(i+1)*batch_size]

                #开始训练数据,同时训练三个变量,返回三个数据
                _,loss,summary=sess.run([train_step,cross_entropy,merged_summary_op],
                                        feed_dict={x:batch_x,y_:batch_y,keep_prob_5:0.5,keep_prob_75:0.75})

                summary_writer.add_summary(summary,n*num_batch+i)
                #打印损失
                print('次数 %d ,lossing: %.4f' % (n*num_batch+i,loss))

                if (n*num_batch+i)%100==0:
                    #获取测试数据的准确率
                    acc=sess.run(accuaracy,feed_dict={x:test_x,y_:test_y,keep_prob_5:1.0,keep_prob_75:1.0})
                    print('次数 %d , 准确率: %.4f' % (n*num_batch+i,acc))
                    #当准确率连续十次大于0.99时  保存并退出
                    if acc>0.99:
                        index+=1
                    else:
                        index=0
                    if index>10:
                        # model_path=os.path.join(os.getcwd(),'train_faces.model')
                        saver.save(sess,'./tmp/train_faces.model',global_step=n*num_batch+i)
                        sys.exit(0)
        print('accuary less 0.99,exited!')

cnnTrain()

第四步、我们读取模型,开始识别是否是我自己了,完活收工。
import tensorflow as tf
import sys
import os
import numpy as np
import random
import cv2
from sklearn.model_selection import train_test_split

my_faces_path='my_faces'
other_faces_path='other_faces'
size=64

imgs=[]
labs=[]

def getPaddingSize(img):
    h,w,_=img.shape
    top,bottom,left,right=(0,0,0,0)
    longest =max(h,w)

    if w2
        right=tmp-left
    elif h2
        bottom=tmp-top
    else:
        pass
    return top,bottom,left,right

def readData(path,h=size,w=size):
    for filename in os.listdir(path):
        if filename.endswith('.jpg'):
            filename=path+'/'+filename

            img=cv2.imread(filename)

            top,bottom,left,right=getPaddingSize(img)

            #将图片方法,扩充图片的边缘部分
            img=cv2.copyMakeBorder(img,top,bottom,left,right,cv2.BORDER_CONSTANT,value=[0,0,0])
            img=cv2.resize(img,(h,w))

            imgs.append(img)
            if path==my_faces_path:
                labs.append([0,1])
            else:
                labs.append([1,0])

readData(my_faces_path)
readData(other_faces_path)

#将图片数据转换成数组
imgs=np.array(imgs)

#随机划分测试集和训练集
train_x,test_x,train_y,test_y=train_test_split(imgs,labs,test_size=0.05,random_state=random.randint(0,100))
#参数 图片数据的总数 图片的高宽通道
train_x=train_x.reshape(train_x.shape[0],size,size,3)
test_x=test_x.reshape(test_x.shape[0],size,size,3)

#将数据转成成小于1的数
train_x=train_x.astype('float32')/255.0
test_x=test_x.astype('float32')/255.0

print('train size: %s,test size:%s' % (len(train_x),len(test_x)))

#批次
batch_size=128
num_batch=len(train_x)//128

x=tf.placeholder(tf.float32,shape=[None,size,size,3])
y_=tf.placeholder(tf.float32,shape=[None,2])

keep_prob_5=tf.placeholder(tf.float32)
keep_prob_75=tf.placeholder(tf.float32)

def weightVariable(shape):
    init =tf.random_normal(shape,stddev=0.01)
    return tf.Variable(init)

def biasVariable(shape):
    init =tf.random_normal(shape)
    return tf.Variable(init)

def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')

def maxPool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')

def dropout(x,keep):
    return tf.nn.dropout(x,keep)

def cnnLayer():

    #第一层
    #卷积核大小(3.3) 输入通道3 输出32
    W1=weightVariable([3,3,3,32])
    b1=biasVariable([32])
    conv1=tf.nn.relu(conv2d(x,W1)+b1)
    pool1=maxPool(conv1)
    drop1=dropout(pool1,keep_prob_5)

    #第二层
    W2=weightVariable([3,3,32,64])
    b2=biasVariable([64])
    conv2=tf.nn.relu(conv2d(drop1,W2)+b2)
    pool2=maxPool(conv2)
    drop2=dropout(pool2,keep_prob_5)

    #第三层
    W3=weightVariable([3,3,64,64])
    b3=biasVariable([64])
    conv3=tf.nn.relu(conv2d(drop2,W3)+b3)
    pool3=maxPool(conv3)
    drop3=dropout(pool3,keep_prob_5)

    #全连接层
    # [宽*高,要提取的纬度]
    Wf=weightVariable([64*64,512])
    bf=biasVariable([512])
    drop3_flat=tf.reshape(drop3,[-1,64*64])
    dense=tf.nn.relu(tf.matmul(drop3_flat,Wf)+bf)
    dropf=dropout(dense,keep_prob_5)

    #输出层
    Wout=weightVariable([512,2])
    bout=weightVariable([2])
    out=tf.add(tf.matmul(dropf,Wout),bout)
    return out

output=cnnLayer()
predict=tf.argmax(output,1)

saver=tf.train.Saver()
sess=tf.Session()
saver.restore(sess,tf.train.latest_checkpoint('tmp/'))

def is_my_face(image):
    res=sess.run(predict,feed_dict={x:[image/255.0],keep_prob_5:1.0,keep_prob_75:1.0})
    if res[0]==1:
        return True
    else:
        return False

#使用OpenCV 检测人脸
haar=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

camera=cv2.VideoCapture(0)
flag=1
while True:
    _,img=camera.read()
    gray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    faces=haar.detectMultiScale(gray_img,1.3,5)

    if not len(faces):
        cv2.imshow('img',img)
        key=cv2.waitKey(30)&0xff
        if key==27:
            sys.exit(0)

    #标记矩形框
    for f_x,f_y,f_w,f_h in faces:
        face=img[f_y:f_y+f_h,f_x:f_x+f_w]
        face=cv2.resize(face,(size,size))
        flag+=1
        if is_my_face(face):
            print('嗨,张先生,我认出你了')
            cv2.imwrite('test_faces/' + str(flag) + '.jpg', face)
            cv2.rectangle(img,(f_x,f_y),(f_x+f_w,f_y+f_h),(0,0,255),3)
        else:
            cv2.rectangle(img,(f_x,f_y),(f_x+f_w,f_y+f_h),(255,0,0),3)
        cv2.imshow('image',img)
        key=cv2.waitKey(30)&0xff
        if key==27:
            sys.exit(0)

sess.close()

让我们来看看效果,如果是我的脸用红色框标记,如果不是我的脸用蓝色框标记:
python+OpenCV+TensorFlow实现人脸识别_第2张图片

完整代码: 下载地址

你可能感兴趣的:(#,OpenCV-Python,python)