用keras实现人脸关键点检测
改良版:http://www.cnblogs.com/ansang/p/8583122.html
第一步:准备好需要的库
第二步:准备数据集:
data.7z
如图:里面包含着标签和数据
第三步:将图片和标签转成numpy array格式:
1 def __data_label__(path): 2 f = open(path+"lable.txt", "r") 3 i = 0 4 datalist = [] 5 labellist = [] 6 for line in f.readlines(): 7 8 i+=1 9 a = line.replace("\n", "") 10 b = a.split(",") 11 labellist.append(b[1:]) 12 imgname = path + b[0] 13 image = load_img(imgname, target_size=(218, 178)) 14 datalist.append(img_to_array(image)) 15 16 img_data = np.array(datalist) 17 img_data = img_data.astype('float32') 18 img_data /= 255 19 label = np.array(labellist) 20 # print(img_data) 21 return img_data,label
第四步:搭建网络:
这里使用非常简单的网络
1 def __CNN__(): 2 model = Sequential()#218*178*3 3 model.add(Conv2D(32, (3, 3), input_shape=(218, 178, 3))) 4 model.add(Activation('relu')) 5 model.add(MaxPooling2D(pool_size=(2, 2))) 6 7 model.add(Conv2D(32, (3, 3))) 8 model.add(Activation('relu')) 9 model.add(MaxPooling2D(pool_size=(2, 2))) 10 11 model.add(Conv2D(64, (3, 3))) 12 model.add(Activation('relu')) 13 model.add(MaxPooling2D(pool_size=(2, 2))) 14 15 model.add(Flatten()) 16 model.add(Dense(64)) 17 model.add(Activation('relu')) 18 model.add(Dropout(0.5)) 19 model.add(Dense(10)) 20 model.add(Activation('softmax')) 21 model.summary() 22 return model
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 216, 176, 32) 896
_________________________________________________________________
activation_1 (Activation) (None, 216, 176, 32) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 108, 88, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 106, 86, 32) 9248
_________________________________________________________________
activation_2 (Activation) (None, 106, 86, 32) 0
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 53, 43, 32) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 51, 41, 64) 18496
_________________________________________________________________
activation_3 (Activation) (None, 51, 41, 64) 0
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 25, 20, 64) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 32000) 0
_________________________________________________________________
dense_1 (Dense) (None, 64) 2048064
_________________________________________________________________
activation_4 (Activation) (None, 64) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 64) 0
_________________________________________________________________
dense_2 (Dense) (None, 10) 650
_________________________________________________________________
activation_5 (Activation) (None, 10) 0
=================================================================
Total params: 2,077,354
Trainable params: 2,077,354
Non-trainable params: 0
_________________________________________________________________
第五步:训练保存和预测:
1 def train(model, testdata, testlabel, traindata, trainlabel): 2 3 4 # model.compile里的参数loss就是损失函数(目标函数) 5 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) 6 # 开始训练, show_accuracy在每次迭代后显示正确率 。 batch_size是每次带入训练的样本数目 , nb_epoch 是迭代次数 7 model.fit(traindata, trainlabel, batch_size=16, epochs=20, 8 validation_data=(testdata, testlabel)) 9 # 设置测试评估参数,用测试集样本 10 model.evaluate(testdata, testlabel, batch_size=16, verbose=1,) 11 12 def save(model, file_path=FILE_PATH): 13 print('Model Saved.') 14 model.save_weights(file_path) 15 16 # def load(model, file_path=FILE_PATH): 17 # print('Model Loaded.') 18 # model.load_weights(file_path) 19 20 def predict(model,image): 21 22 img = image.resize((1, 218, 178, 3)) 23 img = image.astype('float32') 24 img /= 255 25 26 #归一化 27 result = model.predict(img) 28 result = result*1000+10 29 30 print(result) 31 return result
第六步:主模块:
1 ############ 2 # 主模块 3 ############ 4 if __name__ == '__main__': 5 model = __CNN__() 6 testdata, testlabel = __data_label__(testpath) 7 traindata, trainlabel = __data_label__(trainpath) 8 # print(testlabel) 9 # train(model,testdata, testlabel, traindata, trainlabel) 10 # model.save(FILE_PATH) 11 model.load_weights(FILE_PATH) 12 img = [] 13 path = "D:/pycode/facial-keypoints-master/data/train/000096.jpg" 14 # path = "D:/pycode/Abel_Aguilar_0001.jpg" 15 image = load_img(path) 16 img.append(img_to_array(image)) 17 img_data = np.array(img) 18 rects = predict(model,img_data) 19 img = cv2.imread(path) 20 for x, y, w, h, a,b,c,d,e,f in rects: 21 point(x,y) 22 point(w, h) 23 point(a,b) 24 point(c,d) 25 point(e,f) 26 27 cv2.imshow('img', img) 28 cv2.waitKey(0) 29 cv2.destroyAllWindows()
训练的时候把train函数的注释取消
预测的时候把train函数注释掉。
下面上全代码:
1 from tensorflow.contrib.keras.api.keras.preprocessing.image import ImageDataGenerator,img_to_array 2 from keras.models import Sequential 3 from keras.layers.core import Dense, Dropout, Activation, Flatten 4 from keras.layers.advanced_activations import PReLU 5 from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D 6 from keras.optimizers import SGD, Adadelta, Adagrad 7 from keras.preprocessing.image import load_img, img_to_array 8 from keras.utils import np_utils, generic_utils 9 import numpy as np 10 import cv2 11 12 13 FILE_PATH = 'face_landmark.h5' 14 trainpath = 'D:/pycode/facial-keypoints-master/data/train/' 15 testpath = 'D:/pycode/facial-keypoints-master/data/test/' 16 17 def __data_label__(path): 18 f = open(path+"lable.txt", "r") 19 i = 0 20 datalist = [] 21 labellist = [] 22 for line in f.readlines(): 23 24 i+=1 25 a = line.replace("\n", "") 26 b = a.split(",") 27 labellist.append(b[1:]) 28 imgname = path + b[0] 29 image = load_img(imgname, target_size=(218, 178)) 30 datalist.append(img_to_array(image)) 31 32 img_data = np.array(datalist) 33 img_data = img_data.astype('float32') 34 img_data /= 255 35 label = np.array(labellist) 36 # print(img_data) 37 return img_data,label 38 39 ############### 40 # 开始建立CNN模型 41 ############### 42 43 # 生成一个model 44 45 def __CNN__(): 46 model = Sequential()#218*178*3 47 model.add(Conv2D(32, (3, 3), input_shape=(218, 178, 3))) 48 model.add(Activation('relu')) 49 model.add(MaxPooling2D(pool_size=(2, 2))) 50 51 model.add(Conv2D(32, (3, 3))) 52 model.add(Activation('relu')) 53 model.add(MaxPooling2D(pool_size=(2, 2))) 54 55 model.add(Conv2D(64, (3, 3))) 56 model.add(Activation('relu')) 57 model.add(MaxPooling2D(pool_size=(2, 2))) 58 59 model.add(Flatten()) 60 model.add(Dense(64)) 61 model.add(Activation('relu')) 62 model.add(Dropout(0.5)) 63 model.add(Dense(10)) 64 65 model.summary() 66 return model 67 68 def train(model, testdata, testlabel, traindata, trainlabel): 69 70 71 # model.compile里的参数loss就是损失函数(目标函数) 72 model.compile(loss='categorical_crossentropy', optimizer='adam') 73 # 开始训练, show_accuracy在每次迭代后显示正确率 。 batch_size是每次带入训练的样本数目 , nb_epoch 是迭代次数 74 model.fit(traindata, trainlabel, batch_size=16, epochs=20, 75 validation_data=(testdata, testlabel)) 76 # 设置测试评估参数,用测试集样本 77 model.evaluate(testdata, testlabel, batch_size=16, verbose=1,) 78 79 def save(model, file_path=FILE_PATH): 80 print('Model Saved.') 81 model.save_weights(file_path) 82 83 # def load(model, file_path=FILE_PATH): 84 # print('Model Loaded.') 85 # model.load_weights(file_path) 86 87 def predict(model,image): 88 89 img = image.resize((1, 218, 178, 3)) 90 img = image.astype('float32') 91 img /= 255 92 93 #归一化 94 result = model.predict(img) 95 result = result*1000+10 96 97 print(result) 98 return result 99 def point(x, y): 100 cv2.circle(img, (x, y), 1, (0, 0, 255), 10) 101 102 ############ 103 # 主模块 104 ############ 105 if __name__ == '__main__': 106 model = __CNN__() 107 testdata, testlabel = __data_label__(testpath) 108 traindata, trainlabel = __data_label__(trainpath) 109 # print(testlabel) 110 # train(model,testdata, testlabel, traindata, trainlabel) 111 # model.save(FILE_PATH) 112 model.load_weights(FILE_PATH) 113 img = [] 114 path = "D:/pycode/facial-keypoints-master/data/train/000096.jpg" 115 # path = "D:/pycode/Abel_Aguilar_0001.jpg" 116 image = load_img(path) 117 img.append(img_to_array(image)) 118 img_data = np.array(img) 119 rects = predict(model,img_data) 120 img = cv2.imread(path) 121 for x, y, w, h, a,b,c,d,e,f in rects: 122 point(x,y) 123 point(w, h) 124 point(a,b) 125 point(c,d) 126 point(e,f) 127 128 cv2.imshow('img', img) 129 cv2.waitKey(0) 130 cv2.destroyAllWindows()
结果如下:
未来计划:
用tensorflow-cpu跑的,数据量很少,网络很简单,提升数据量和网络深度应该还能有较大的改善空间。
而且目前网络只能预测大小为(218,178)像素的图片,将适用性提升是未来的目标。
改进方案:
将图片全部resize成方形,边长不够的加黑边补齐。
1 # 按照指定图像大小调整尺寸 2 def resize_image(image, height=IMAGE_SIZE, width=IMAGE_SIZE): 3 top, bottom, left, right = (0, 0, 0, 0) 4 5 # 获取图像尺寸 6 h, w, _ = image.shape 7 8 # 对于长宽不相等的图片,找到最长的一边 9 longest_edge = max(h, w) 10 11 # 计算短边需要增加多上像素宽度使其与长边等长 12 if h < longest_edge: 13 dh = longest_edge - h 14 top = dh // 2 15 bottom = dh - top 16 elif w < longest_edge: 17 dw = longest_edge - w 18 left = dw // 2 19 right = dw - left 20 else: 21 pass 22 23 # RGB颜色 24 BLACK = [0, 0, 0] 25 26 # 给图像增加边界,是图片长、宽等长,cv2.BORDER_CONSTANT指定边界颜色由value指定 27 constant = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=BLACK) 28 29 # 调整图像大小并返回 30 return cv2.resize(constant, (height, width))