代码实现(MXNet):https://github.com/pangyupo/mxnet_mtcnn_face_detection
从代码主体main.py开始
detector = MtcnnDetector(model_folder='model', ctx=mx.cpu(0), num_worker = 4 , accurate_landmark = False) # 创建检测器
img = cv2.imread('test.jpg')
# run detector
results = detector.detect_face(img)
if results is not None:
total_boxes = results[0]
points = results[1]
# extract aligned face chips
chips = detector.extract_image_chips(img, points, 144, 0.37)
for i, chip in enumerate(chips):
cv2.imshow('chip_'+str(i), chip)
cv2.imwrite('chip_'+str(i)+'.png', chip)
draw = img.copy()
for b in total_boxes:
cv2.rectangle(draw, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (255, 255, 255))
for p in points:
for i in range(5):
cv2.circle(draw, (p[i], p[i + 5]), 1, (0, 0, 255), 2)
cv2.imshow("detection result", draw)
cv2.waitKey(0)
对第二步的函数detect_face分析
def detect_face(self, img):
# check input
MIN_DET_SIZE = 12
if img is None:
return None
# only works for color image
if len(img.shape) != 3:
return None
# detected boxes
total_boxes = []
height, width, _ = img.shape
minl = min( height, width)
# get all the valid scales
scales = []
m = MIN_DET_SIZE/self.minsize
minl *= m
factor_count = 0
while minl > MIN_DET_SIZE:
scales.append(m*self.factor**factor_count)
minl *= self.factor
factor_count += 1
上部分为放缩过程,scales是所有的放缩尺寸。因为检测窗口固定为12*12,所以为了检测图片中不同大小的人脸,需要将图片放缩成不同的大小,依次输入网络。
下面是P-net
#############################################
# first stage
#############################################
#for scale in scales:
# return_boxes = self.detect_first_stage(img, scale, 0)
# if return_boxes is not None:
# total_boxes.append(return_boxes)
sliced_index = self.slice_index(len(scales))
total_boxes = []
for batch in sliced_index:
local_boxes = self.Pool.map(detect_first_stage_warpper, \
zip(repeat(img), self.PNets[:len(batch)], [scales[i] for i in batch], repeat(self.threshold[0])) )
total_boxes.extend(local_boxes)
# remove the Nones
total_boxes = [ i for i in total_boxes if i is not None]
if len(total_boxes) == 0:
return None
total_boxes = np.vstack(total_boxes)
if total_boxes.size == 0:
return None
# merge the detection from first stage
pick = nms(total_boxes[:, 0:5], 0.7, 'Union')
total_boxes = total_boxes[pick]
bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1
bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1
# refine the bboxes
total_boxes = np.vstack([total_boxes[:, 0]+total_boxes[:, 5] * bbw,
total_boxes[:, 1]+total_boxes[:, 6] * bbh,
total_boxes[:, 2]+total_boxes[:, 7] * bbw,
total_boxes[:, 3]+total_boxes[:, 8] * bbh,
total_boxes[:, 4]
])
total_boxes = total_boxes.T
total_boxes = self.convert_to_square(total_boxes)
total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])
self.slice_index根据self.num_worker 将不同尺寸的图片索引进行分批。self.num_worker是第一个步骤中用到的进程数。
将每一个batch送入PNet,返回对应尺寸的图片中的所有候选框。每个图片的的local_boxes的size为N*9,四个坐标值,一个可能性,四个比例偏移。
先对detect_first_stage函数分析
def detect_first_stage(img, net, scale, threshold):
"""
run PNet for first stage
Parameters:
----------
img: numpy array, bgr order
input image
scale: float number
how much should the input image scale
net: PNet
worker
Returns:
-------
total_boxes : bboxes
"""
height, width, _ = img.shape
hs = int(math.ceil(height * scale))
ws = int(math.ceil(width * scale))
im_data = cv2.resize(img, (ws,hs))
# adjust for the network input
input_buf = adjust_input(im_data) # input_buf size ( 1, c, h, w)
output = net.predict(input_buf)
boxes = generate_bbox(output[1][0,1,:,:], output[0], scale, threshold)
if boxes.size == 0:
return None
# nms
pick = nms(boxes[:,0:5], 0.5, mode='Union')
boxes = boxes[pick]
return boxes
output[0] size:1,4,m,n 每个框dx1,dy1,dx2,dy2的比例偏移。
output[1] size:1,2,m,n 每个框的存在人脸的可能性。
m=(hs-12)/2+1 , n=(ws-12)/2+1
boxes返回4个原始尺寸的坐标,一个可能性,四个比例放缩值。
回到first stage
根据total_boxes进行nms。
将boxes中的坐标值通过放缩值进行调整(调整前的boxes都是正方形,边长为12*scale),然后将boxes调整成正方形。
将total_boxes输入R-net
#############################################
# second stage
#############################################
num_box = total_boxes.shape[0]
# pad the bbox
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(total_boxes, width, height)
# (3, 24, 24) is the input shape for RNet
input_buf = np.zeros((num_box, 3, 24, 24), dtype=np.float32)
for i in range(num_box):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = img[y[i]:ey[i]+1, x[i]:ex[i]+1, :]
input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (24, 24)))
output = self.RNet.predict(input_buf)
# filter the total_boxes with threshold
passed = np.where(output[1][:, 1] > self.threshold[1])
total_boxes = total_boxes[passed]
if total_boxes.size == 0:
return None
total_boxes[:, 4] = output[1][passed, 1].reshape((-1,))
reg = output[0][passed]
# nms
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick]
total_boxes = self.calibrate_box(total_boxes, reg[pick])
total_boxes = self.convert_to_square(total_boxes)
total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])
self.pad调整box坐标,因为有些坐标会超出原始图像的范围。
将所有的候选框resize到2424,输入R-Net,返回Output
Output[0] N4 每个候选框的四个坐标的比例偏移
Output[1] N*2 每个候选框的可能性
筛选可能性没达到阈值的
nms
根据Output[0]调整候选框
最后一步O-net输入第二步得到的候选框
#############################################
# third stage
#############################################
num_box = total_boxes.shape[0]
# pad the bbox
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(total_boxes, width, height)
# (3, 48, 48) is the input shape for ONet
input_buf = np.zeros((num_box, 3, 48, 48), dtype=np.float32)
for i in range(num_box):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.float32)
tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = img[y[i]:ey[i]+1, x[i]:ex[i]+1, :]
input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (48, 48)))
output = self.ONet.predict(input_buf)
# filter the total_boxes with threshold
passed = np.where(output[2][:, 1] > self.threshold[2])
total_boxes = total_boxes[passed]
if total_boxes.size == 0:
return None
total_boxes[:, 4] = output[2][passed, 1].reshape((-1,))
reg = output[1][passed]
points = output[0][passed]
# compute landmark points
bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1
bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[:, 0:5] = np.expand_dims(total_boxes[:, 0], 1) + np.expand_dims(bbw, 1) * points[:, 0:5]
points[:, 5:10] = np.expand_dims(total_boxes[:, 1], 1) + np.expand_dims(bbh, 1) * points[:, 5:10]
# nms
total_boxes = self.calibrate_box(total_boxes, reg)
pick = nms(total_boxes, 0.7, 'Min')
total_boxes = total_boxes[pick]
points = points[pick]
if not self.accurate_landmark:
return total_boxes, points
与R-Net中类似,进行pad与放缩到4848后,输入到ONet中,返回output。
output[0] N10 每个候选框五个关键点的坐标
output[1] N4 每个候选框四个坐标点的比例偏移
output[2] N2 每个候选框的0/1可能性
通过output[2]筛选候选框
计算landmark
调整边界框
nms,这里用了min
输出boxes和points。
调整边界框
后面还有一个extended stage。