最近一段时间一直忙着实习和在研究insightface人脸识别模型的相关事宜,上次说更新mobilenet-ssd的人脸检测,结果一拖再拖,非常抱歉。最近我也会将自己跑Insightface的实践写出来,并将以前和进后的项目上传个人Github仓库,这里先给出链接:https://github.com/Danbinabo
VOC数据集包含20个类别的物体和1个背景,分别是——aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, diningtable, dog, horse, motorbike, person, pottedplant, sheep, foa, train, tvmonitor,有时候我们想用VOC数据集训练,但并不需要这么多类别,而caffe-ssd提供的数据处理工具create_list.sh
和create_data.sh
默认是处理所有的20个分类的。如果我们不想重写这些数据处理工具,可以从根源入手,也就是直接修改数据集里的标注信息,把多余分类的信息删去。
其实训练多类别检测(N+1)和人脸检测(1+1)原理是完全一样的,不同点在于要生成对应的数据集,这里先将widerface数据集转VOC格式数据集的代码贴出来,对应训练数据路径进行修改:
# -*- coding: utf-8 -*-
import os, cv2, sys, shutil
from xml.dom.minidom import Document
def writexml(filename, saveimg, bboxes, xmlpath):
doc = Document()
annotation = doc.createElement('annotation')
doc.appendChild(annotation)
folder = doc.createElement('folder')
folder_name = doc.createTextNode('widerface')
folder.appendChild(folder_name)
annotation.appendChild(folder)
filenamenode = doc.createElement('filename')
filename_name = doc.createTextNode(filename)
filenamenode.appendChild(filename_name)
annotation.appendChild(filenamenode)
source = doc.createElement('source')
annotation.appendChild(source)
database = doc.createElement('database')
database.appendChild(doc.createTextNode('wider face Database'))
source.appendChild(database)
annotation_s = doc.createElement('annotation')
annotation_s.appendChild(doc.createTextNode('PASCAL VOC2007'))
source.appendChild(annotation_s)
image = doc.createElement('image')
image.appendChild(doc.createTextNode('flickr'))
source.appendChild(image)
flickrid = doc.createElement('flickrid')
flickrid.appendChild(doc.createTextNode('-1'))
source.appendChild(flickrid)
owner = doc.createElement('owner')
annotation.appendChild(owner)
flickrid_o = doc.createElement('flickrid')
flickrid_o.appendChild(doc.createTextNode('yanyu'))
owner.appendChild(flickrid_o)
name_o = doc.createElement('name')
name_o.appendChild(doc.createTextNode('yanyu'))
owner.appendChild(name_o)
size = doc.createElement('size')
annotation.appendChild(size)
width = doc.createElement('width')
width.appendChild(doc.createTextNode(str(saveimg.shape[1])))
height = doc.createElement('height')
height.appendChild(doc.createTextNode(str(saveimg.shape[0])))
depth = doc.createElement('depth')
depth.appendChild(doc.createTextNode(str(saveimg.shape[2])))
size.appendChild(width)
size.appendChild(height)
size.appendChild(depth)
segmented = doc.createElement('segmented')
segmented.appendChild(doc.createTextNode('0'))
annotation.appendChild(segmented)
for i in range(len(bboxes)):
bbox = bboxes[i]
objects = doc.createElement('object')
annotation.appendChild(objects)
object_name = doc.createElement('name')
object_name.appendChild(doc.createTextNode('face'))
objects.appendChild(object_name)
pose = doc.createElement('pose')
pose.appendChild(doc.createTextNode('Unspecified'))
objects.appendChild(pose)
truncated = doc.createElement('truncated')
truncated.appendChild(doc.createTextNode('1'))
objects.appendChild(truncated)
difficult = doc.createElement('difficult')
difficult.appendChild(doc.createTextNode('0'))
objects.appendChild(difficult)
bndbox = doc.createElement('bndbox')
objects.appendChild(bndbox)
xmin = doc.createElement('xmin')
xmin.appendChild(doc.createTextNode(str(bbox[0])))
bndbox.appendChild(xmin)
ymin = doc.createElement('ymin')
ymin.appendChild(doc.createTextNode(str(bbox[1])))
bndbox.appendChild(ymin)
xmax = doc.createElement('xmax')
xmax.appendChild(doc.createTextNode(str(bbox[0] + bbox[2])))
bndbox.appendChild(xmax)
ymax = doc.createElement('ymax')
ymax.appendChild(doc.createTextNode(str(bbox[1] + bbox[3])))
bndbox.appendChild(ymax)
f = open(xmlpath, "w")
f.write(doc.toprettyxml(indent=''))
f.close()
rootdir = "E:/public_data/wideface"
def convertimgset(img_set):
imgdir = rootdir + "/WIDER_" + img_set + "/images"
gtfilepath = rootdir + "/wider_face_split/wider_face_" + img_set + "_bbx_gt.txt"
fwrite = open(rootdir + "/ImageSets/Main/" + img_set + ".txt", 'w')
index = 0
with open(gtfilepath, 'r') as gtfiles:
while (True): # true
filename = gtfiles.readline()[:-1]
if filename == None or filename == "":
break
imgpath = imgdir + "/" + filename
print(imgpath)
img = cv2.imread(imgpath)
if not img.data:
print(imgpath)
break
numbbox = int(gtfiles.readline())
bboxes = []
print(numbbox)
for i in range(numbbox):
line = gtfiles.readline()
lines = line.split(" ")
lines = lines[0:4]
bbox = (int(lines[0]), int(lines[1]), int(lines[2]), int(lines[3]))
if int(lines[2]) < 40 or int(lines[3]) < 40:
continue
bboxes.append(bbox)
# cv2.rectangle(img, (bbox[0],bbox[1]),(bbox[0]+bbox[2],bbox[1]+bbox[3]),color=(255,255,0),thickness=1)
filename = filename.replace("/", "_")
if len(bboxes) == 0:
print("no face")
continue
# cv2.imshow("img", img)
# cv2.waitKey(0)
cv2.imwrite("{}/JPEGImages/{}".format(rootdir, filename), img)
fwrite.write(filename.split(".")[0] + "\n")
xmlpath = "{}/Annotations/{}.xml".format(rootdir, filename.split(".")[0])
writexml(filename, img, bboxes, xmlpath)
print("success number is ", index)
index += 1
fwrite.close()
if __name__ == "__main__":
img_sets = ["train", "val"]
for img_set in img_sets:
convertimgset(img_set)
shutil.move(rootdir + "/ImageSets/Main/" + "train.txt", rootdir + "/ImageSets/Main/" + "trainval.txt")
shutil.move(rootdir + "/ImageSets/Main/" + "val.txt", rootdir + "/ImageSets/Main/" + "test.txt")
这样就将wideface数据集生成了上一篇我们用于训练VOC20类物体检测的数据集格式,后面训练过程几乎同上,按照固有的流程进行建立数据软连接,网络生成,模型训练即可。
训练的模型测试可以运行mobilenet-ssd自带的测试代码demo.py,在里面修改自己模型的路径和测试图片即可,这里我在1080ti上大概跑了一个晚上,第二天进行了测试,检测效果还不错,这里主要是用mobilenet-ssd训练的模型较小,非常适合移动端模型的部署。