基于原作者提供的python版本MTCNN修改,开源地址:https://github.com/DuinoDu/mtcnn
我们的o-net训练代码参考如下开源项目:https://github.com/dlunion/mtcnn
并做了适当修改参考:http://blog.csdn.net/xzzppp/article/details/76467514
将我们训练的模型级联在作者提供的net1,net2后面
作者格式:
图像数据rgb格式, 图像做了转置。 bbox中x1,y1相对左上角计算offset,x2,y2相对于右下角计算offset。
landmark顺序为:xo, x1, x2, x3, x4,y0, y1,y2, y3,y4
我的数据格式:
图像数据gbr格式, 图像不做转置。 bbox中x1,y1相对左上角计算offset,x2,y2相对于右下角计算offset。
landmark顺序为:xo, x1, x2, x3, x4,y0, y1,y2, y3,y4
修改后的代码及代码注释如下:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import _init_paths
import caffe
import cv2
import numpy as np
#from python_wrapper import *
import os
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bouding boxes
if reg.shape[1] == 1:
print "reshape of reg"
pass # reshape of reg
w = boundingbox[:,2] - boundingbox[:,0] + 1
h = boundingbox[:,3] - boundingbox[:,1] + 1
bb0 = boundingbox[:,0] + reg[:,0]*w
bb1 = boundingbox[:,1] + reg[:,1]*h
bb2 = boundingbox[:,2] + reg[:,2]*w
bb3 = boundingbox[:,3] + reg[:,3]*h
boundingbox[:,0:4] = np.array([bb0, bb1, bb2, bb3]).T
#print "bb", boundingbox
return boundingbox
def bbreg_onet(boundingbox, reg):
reg = reg.T
# calibrate bouding boxes
if reg.shape[1] == 1:
print "reshape of reg"
pass # reshape of reg
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
bb0 = boundingbox[:, 0] + reg[:, 0] * w
bb1 = boundingbox[:, 1] + reg[:, 1] * h
bb2 = boundingbox[:, 0] + reg[:, 2] * w
bb3 = boundingbox[:, 1] + reg[:, 3] * h
boundingbox[:, 0:4] = np.array([bb0, bb1, bb2, bb3]).T
# print "bb", boundingbox
return boundingbox
def pad(boxesA, w, h):
boxes = boxesA.copy() # shit, value parameter!!!
#print '#################'
#print 'boxes', boxes
#print 'w,h', w, h
tmph = boxes[:,3] - boxes[:,1] + 1
tmpw = boxes[:,2] - boxes[:,0] + 1
numbox = boxes.shape[0]
#print 'tmph', tmph
#print 'tmpw', tmpw
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:,0:1][:,0]
y = boxes[:,1:2][:,0]
ex = boxes[:,2:3][:,0]
ey = boxes[:,3:4][:,0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w-1 + tmpw[tmp]
ex[tmp] = w-1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h-1 + tmph[tmp]
ey[tmp] = h-1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy = np.maximum(0, dy-1)
dx = np.maximum(0, dx-1)
y = np.maximum(0, y-1)
x = np.maximum(0, x-1)
edy = np.maximum(0, edy-1)
edx = np.maximum(0, edx-1)
ey = np.maximum(0, ey-1)
ex = np.maximum(0, ex-1)
#print "dy" ,dy
#print "dx" ,dx
#print "y " ,y
#print "x " ,x
#print "edy" ,edy
#print "edx" ,edx
#print "ey" ,ey
#print "ex" ,ex
#print 'boxes', boxes
return [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
def rerec(bboxA):
# convert bboxA to square
w = bboxA[:,2] - bboxA[:,0]
h = bboxA[:,3] - bboxA[:,1]
l = np.maximum(w,h).T
#print 'bboxA', bboxA
#print 'w', w
#print 'h', h
#print 'l', l
bboxA[:,0] = bboxA[:,0] + w*0.5 - l*0.5
bboxA[:,1] = bboxA[:,1] + h*0.5 - l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.repeat([l], 2, axis = 0).T
return bboxA
def nms(boxes, threshold, type):
"""nms
:boxes: [:,0:5]
:threshold: 0.5 like
:type: 'Min' or others
:returns: TODO
"""
if boxes.shape[0] == 0:
return np.array([])
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = np.multiply(x2-x1+1, y2-y1+1)
I = np.array(s.argsort()) # read s using I
pick = [];
while len(I) > 0:
xx1 = np.maximum(x1[I[-1]], x1[I[0:-1]])
yy1 = np.maximum(y1[I[-1]], y1[I[0:-1]])
xx2 = np.minimum(x2[I[-1]], x2[I[0:-1]])
yy2 = np.minimum(y2[I[-1]], y2[I[0:-1]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if type == 'Min':
o = inter / np.minimum(area[I[-1]], area[I[0:-1]])
else:
o = inter / (area[I[-1]] + area[I[0:-1]] - inter)
pick.append(I[-1])
I = I[np.where( o <= threshold)[0]]
return pick
def generateBoundingBox(map, reg, scale, t):
stride = 2
cellsize = 12
map = map.T
dx1 = reg[0,:,:].T
dy1 = reg[1,:,:].T
dx2 = reg[2,:,:].T
dy2 = reg[3,:,:].T
(x, y) = np.where(map >= t)
yy = y
xx = x
'''
if y.shape[0] == 1: # only one point exceed threshold
y = y.T
x = x.T
score = map[x,y].T
dx1 = dx1.T
dy1 = dy1.T
dx2 = dx2.T
dy2 = dy2.T
# a little stange, when there is only one bb created by PNet
#print "1: x,y", x,y
a = (x*map.shape[1]) + (y+1)
x = a/map.shape[0]
y = a%map.shape[0] - 1
#print "2: x,y", x,y
else:
score = map[x,y]
'''
#print "dx1.shape", dx1.shape
#print 'map.shape', map.shape
score = map[x,y]
reg = np.array([dx1[x,y], dy1[x,y], dx2[x,y], dy2[x,y]])
if reg.shape[0] == 0:
pass
boundingbox = np.array([yy, xx]).T
bb1 = np.fix((stride * (boundingbox) + 1) / scale).T # matlab index from 1, so with "boundingbox-1"
bb2 = np.fix((stride * (boundingbox) + cellsize - 1 + 1) / scale).T # while python don't have to
score = np.array([score])
boundingbox_out = np.concatenate((bb1, bb2, score, reg), axis=0)
#print '(x,y)',x,y
#print 'score', score
#print 'reg', reg
return boundingbox_out.T
def drawBoxes(im, boxes):
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
for i in range(x1.shape[0]):
cv2.rectangle(im, (int(x1[i]), int(y1[i])), (int(x2[i]), int(y2[i])), (0,0,255), 3)
return im
def drawlandmark(im, points):
for i in range(points.shape[0]):
for j in range(5):
cv2.circle(im, (int(points[i][j]), int(points[i][j+5])), 2, (255,0,0),-1)
return im
from time import time
_tstart_stack = []
def tic():
_tstart_stack.append(time())
def toc(fmt="Elapsed: %s s"):
print fmt % (time()-_tstart_stack.pop())
def detect_face(img, minsize, PNet, RNet, ONet, threshold, fastresize, factor):
img2 = img.copy()
factor_count = 0
total_boxes = np.zeros((0,9), np.float)
points = []
h = img.shape[0]
w = img.shape[1]
minl = min(h, w)
img = img.astype(float)
m = 12.0/minsize
minl = minl*m
#total_boxes = np.load('total_boxes.npy')
#total_boxes = np.load('total_boxes_242.npy')
#total_boxes = np.load('total_boxes_101.npy')
# create scale pyramid
scales = []
while minl >= 12:
scales.append(m * pow(factor, factor_count))
minl *= factor
factor_count += 1
# first stage
for scale in scales:
hs = int(np.ceil(h*scale))
ws = int(np.ceil(w*scale))
if fastresize:
im_data = (img-127.5)*0.0078125 # [0,255] -> [-1,1]
im_data = cv2.resize(im_data, (ws,hs)) # default is bilinear
else:
im_data = cv2.resize(img, (ws,hs)) # default is bilinear
im_data = (im_data-127.5)*0.0078125 # [0,255] -> [-1,1]
#im_data = imResample(img, hs, ws); print "scale:", scale
im_data = np.swapaxes(im_data, 0, 2)
im_data = np.array([im_data], dtype = np.float)
PNet.blobs['data'].reshape(1, 3, ws, hs)
PNet.blobs['data'].data[...] = im_data
out = PNet.forward()
boxes = generateBoundingBox(out['prob1'][0,1,:,:], out['conv4-2'][0], scale, threshold[0])
if boxes.shape[0] != 0:
#print boxes[4:9]
#print 'im_data', im_data[0:5, 0:5, 0], '\n'
#print 'prob1', out['prob1'][0,0,0:3,0:3]
pick = nms(boxes, 0.5, 'Union')
if len(pick) > 0 :
boxes = boxes[pick, :]
if boxes.shape[0] != 0:
total_boxes = np.concatenate((total_boxes, boxes), axis=0)
#np.save('total_boxes_101.npy', total_boxes)
#####
# 1 #
#####
print "[1]:",total_boxes.shape[0]
#print total_boxes
#return total_boxes, []
numbox = total_boxes.shape[0]
if numbox > 0:
# nms
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
print "[2]:",total_boxes.shape[0]
# revise and convert to square
regh = total_boxes[:,3] - total_boxes[:,1]
regw = total_boxes[:,2] - total_boxes[:,0]
t1 = total_boxes[:,0] + total_boxes[:,5]*regw
t2 = total_boxes[:,1] + total_boxes[:,6]*regh
t3 = total_boxes[:,2] + total_boxes[:,7]*regw
t4 = total_boxes[:,3] + total_boxes[:,8]*regh
t5 = total_boxes[:,4]
total_boxes = np.array([t1,t2,t3,t4,t5]).T
#print "[3]:",total_boxes.shape[0]
#print regh
#print regw
#print 't1',t1
#print total_boxes
total_boxes = rerec(total_boxes) # convert box to square
print "[4]:",total_boxes.shape[0]
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4])
print "[4.5]:",total_boxes.shape[0]
#print total_boxes
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h)
#print total_boxes.shape
#print total_boxes
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
#print 'tmph', tmph
#print 'tmpw', tmpw
#print "y,ey,x,ex", y, ey, x, ex,
#print "edy", edy
#tempimg = np.load('tempimg.npy')
# construct input for RNet
tempimg = np.zeros((numbox, 24, 24, 3)) # (24, 24, 3, numbox)
for k in range(numbox):
tmp = np.zeros((int(tmph[k]) +1, int(tmpw[k]) + 1,3))
#print "dx[k], edx[k]:", dx[k], edx[k]
#print "dy[k], edy[k]:", dy[k], edy[k]
#print "img.shape", img[y[k]:ey[k]+1, x[k]:ex[k]+1].shape
#print "tmp.shape", tmp[dy[k]:edy[k]+1, dx[k]:edx[k]+1].shape
tmp[int(dy[k]):int(edy[k])+1, int(dx[k]):int(edx[k])+1] = img[int(y[k]):int(ey[k])+1, int(x[k]):int(ex[k])+1]
#print "y,ey,x,ex", y[k], ey[k], x[k], ex[k]
#print "tmp", tmp.shape
tempimg[k,:,:,:] = cv2.resize(tmp, (24, 24))
#tempimg[k,:,:,:] = imResample(tmp, 24, 24)
#print 'tempimg', tempimg[k,:,:,:].shape
#print tempimg[k,0:5,0:5,0]
#print tempimg[k,0:5,0:5,1]
#print tempimg[k,0:5,0:5,2]
#print k
#print tempimg.shape
#print tempimg[0,0,0,:]
tempimg = (tempimg-127.5)*0.0078125 # done in imResample function wrapped by python
#np.save('tempimg.npy', tempimg)
# RNet
tempimg = np.swapaxes(tempimg, 1, 3)
#print tempimg[0,:,0,0]
RNet.blobs['data'].reshape(numbox, 3, 24, 24)
RNet.blobs['data'].data[...] = tempimg
out = RNet.forward()
#print out['conv5-2'].shape
#print out['prob1'].shape
score = out['prob1'][:,1]
#print 'score', score
pass_t = np.where(score>threshold[1])[0]
#print 'pass_t', pass_t
score = np.array([score[pass_t]]).T
total_boxes = np.concatenate( (total_boxes[pass_t, 0:4], score), axis = 1)
print "[5]:",total_boxes.shape[0]
#print total_boxes
#print "1.5:",total_boxes.shape
mv = out['conv5-2'][pass_t, :].T
#print "mv", mv
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
#print 'pick', pick
if len(pick) > 0 :
total_boxes = total_boxes[pick, :]
print "[6]:",total_boxes.shape[0]
total_boxes = bbreg(total_boxes, mv[:, pick])
print "[7]:",total_boxes.shape[0]
total_boxes = rerec(total_boxes)
print "[8]:",total_boxes.shape[0]
#####
# 2 #
#####
print "2:",total_boxes.shape
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes)
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h)
#print 'tmpw', tmpw
#print 'tmph', tmph
#print 'y ', y
#print 'ey', ey
#print 'x ', x
#print 'ex', ex
# img_patlab:rgb, converTo:bgr(format read in opencv)
# 将图片转换为rgb格式,即opencv读取格式
ttt = img[:, :, 2].copy()
img[:, :, 2] = img[:, :, 0]
img[:, :, 0] = ttt
tempimg = np.zeros((numbox, 48, 48, 3))
for k in range(numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]),3))
tmp[int(dy[k]):int(edy[k])+1, int(dx[k]):int(edx[k])+1] = img[int(y[k]):int(ey[k])+1, int(x[k]):int(ex[k])+1]
tempimg[k,:,:,:] = cv2.resize(tmp, (48, 48))
tempimg = (tempimg-127.5)*0.0078125 # [0,255] -> [-1,1]
# ONet
# 对0,2做了轴变换,变为:channel*width*height
tempimg = np.swapaxes(tempimg, 1, 3)
tempimg = np.swapaxes(tempimg, 2, 3)
ONet.blobs['data'].reshape(numbox, 3, 48, 48)
ONet.blobs['data'].data[...] = tempimg
out = ONet.forward()
score = out['prob1'][:,1]
points = out['conv6-3']
pass_t = np.where(score>threshold[2])[0]
points = points[pass_t, :]
score = np.array([score[pass_t]]).T
total_boxes = np.concatenate( (total_boxes[pass_t, 0:4], score), axis=1)
print "[9]:",total_boxes.shape[0]
mv = out['conv6-2'][pass_t, :].T
w = total_boxes[:,3] - total_boxes[:,1] + 1
h = total_boxes[:,2] - total_boxes[:,0] + 1
# points[:, 0:5] = np.tile(w, (5,1)).T * points[:, 0:5] + np.tile(total_boxes[:,0], (5,1)).T - 1
# points[:, 5:10] = np.tile(h, (5,1)).T * points[:, 5:10] + np.tile(total_boxes[:,1], (5,1)).T -1
points[:, 0:5] = np.tile(w, (5, 1)).T * points[:, 0:5] + np.tile(total_boxes[:, 0], (5, 1)).T - 1
points[:, 5:10] = np.tile(h, (5, 1)).T * points[:, 5:10] + np.tile(total_boxes[:, 1], (5, 1)).T - 1
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes, mv[:,:])
print "[10]:",total_boxes.shape[0]
pick = nms(total_boxes, 0.7, 'Min') # default;0.7
#print pick
if len(pick) > 0 :
total_boxes = total_boxes[pick, :]
print "[11]:",total_boxes.shape[0]
points = points[pick, :]
#####
# 3 #
#####
print "3:",total_boxes.shape
return total_boxes, points
# ***************************************主程序******************************************************
def main():
# 图片名列表
imglistfile = "300W/front_list.txt"
write_file_name = "300W/afw_bboxtest/bbox_pre.txt" # bbox坐标导出文件
write_file = open(write_file_name, "w")
# 测试图片中最小人脸尺寸,用于控制图像金字塔变换尺寸
minsize = 80
caffe_model_path = "./model"
threshold = [0.6, 0.7, 0.7]
factor = 0.709 # 图像金字塔缩放因子
caffe.set_mode_gpu() # cpu模式:set_mode_gpu()
PNet = caffe.Net(caffe_model_path+"/det1.prototxt", caffe_model_path+"/det1.caffemodel", caffe.TEST)
RNet = caffe.Net(caffe_model_path+"/det2.prototxt", caffe_model_path+"/det2.caffemodel", caffe.TEST)
# ONet = caffe.Net(caffe_model_path+"/det3.prototxt", caffe_model_path+"/48net_v5_7.caffemodel", caffe.TEST)
ONet = caffe.Net(caffe_model_path + "/det3.prototxt","/media/xiao/B4AE11CDAE11894E/mtcnn/train_V9_64/models_48_1" + "/_iter_220000.caffemodel",caffe.TEST)
#error = []
f = open(imglistfile, 'r')
for imgpath in f.readlines():
#imgpath = imgpath.split('\n')[0]
imgpath = imgpath[0:-2] # 进行切片
pts_dir = imgpath
imgpath = imgpath + '.jpg'
align_gt = []
pts_dir_name = '300W/afw/' + pts_dir + '.pts' # pts文件全路径
# 读取landmark坐标,写到align_gt中,x0,y0,x1,y1,x2,y2,x3,y3,x4,y4格式
infile_pts = open(pts_dir_name, 'r')
for i in range(3):
compent = infile_pts.next()
for j in range(5):
compent = infile_pts.next()
bbox = compent.strip().split(' ')
align_gt.append(float(bbox[0]))
align_gt.append(float(bbox[1]))
# 将align_gt由list变换为array,方便后面像数组一样调用
align_gt = np.array(align_gt)
infile_pts.close()
write_file.write(imgpath + '\n') # 写入图片名
savepath = imgpath
imagepath = "300W/afw/" + imgpath
print "######\n", imagepath
img = cv2.imread(imagepath) # 读取图片
img_matlab = img.copy()
# 作者的代码中,opencv读取图片,图像数据存储顺序变为了bgr,然后作者做了通道变换,将其变为了rgb(注:我们的版本图像数据存储顺序为bgr,所以在net3中需要变换为bgr)
# 作者的代码中,opencv读取图片,shape为:height*width*channel,对0,2做了轴变换,变为:channel*width*height。
#((注:我们的版本shape为channel*height*width)。这两个是重点和难点,修改代码需要注意
tmp = img_matlab[:,:,2].copy() #rgb变换
img_matlab[:,:,2] = img_matlab[:,:,0]
img_matlab[:,:,0] = tmp
# check rgb position
#tic()
# 检测人脸:输出人脸bbox, landmark位置
boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
#toc()
# write, 将人脸bbox写入到txt文件中
if len(boundingboxes):
write_file.write(str(boundingboxes.shape[0]) + '\n')
for i in range(boundingboxes.shape[0]):
write_file.write(str(boundingboxes[i][0]) + ' ' + str(boundingboxes[i][1]) + ' ')
write_file.write(str(boundingboxes[i][2]) + ' ' + str(boundingboxes[i][3]) + '\n')
else:
write_file.write('0 ' + '\n')
#write_file.write('0 ' + '\n')
continue
# write, 将人脸landmark写入到txt文件中
#if len(points):
#write_file.write(str(points.shape[0]) + '\n')
#for i in range(points.shape[0]):
# for j in range(5):
# write_file.write(str(points[i][j]) + ' ' + str(points[i][j+5]) + ' ')
# write_file.write('\n')
#else:
# write_file.write('0 ' + '\n')
# continue
img = drawBoxes(img, boundingboxes) # 在原图中画人脸bbox
img = drawlandmark(img, points) # 在原图中画人脸landmark
# 画官方标注的人脸landmark
for j in range(5):
cv2.circle(img, (int(align_gt[2*j]), int(align_gt[2*j+1])), 2, (0,0,255),-1)
# 保存并显示图片
savepath = "300W/MTCNN_v8/afw_front/" + savepath
cv2.imwrite(savepath, img)
cv2.imshow('img', img)
ch = cv2.waitKey(10) & 0xFF
if ch == 27:
break
#print error
f.close()
write_file.close()
if __name__ == "__main__":
main()