基于SSD_MobileNet的物品检测

本项目是在利用models/tensorflow中SSD_MobileNet网络模型进行的实验。
github链接:https://github.com/tensorflow/models/tree/master/research/object_detection

主要分为数据预处理、训练和测试部分(验证部分直接用github上的教程)

1. 数据预处理
在github教程上有对pets数据和pascalVOC数据的转.record文件的相关代码,但是由于一般普通数据通常只能获得.jpg(.jpeg)和.xml文件数据,那么不能直接使用github上现成的create_record代码,这里采用官方代码并进行了修改。

r"""Convert raw PASCAL dataset to TFRecord for object_detection.

Example usage:
    ./create_pascal_tf_record --data_dir=/home/user/VOCdevkit \
        --year=VOC2012 \
        --output_path=/home/user/pascal.record
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import hashlib
import io
import logging
import os

from lxml import etree
import PIL.Image
import tensorflow as tf

from object_detection.utils import dataset_util
from object_detection.utils import label_map_util





def dict_to_tf_example(data,
                       dataset_directory,
                       label_map_dict,
                       ignore_difficult_instances=False,
                       image_subdirectory='image'):
  """Convert XML derived dict to tf.Example proto.

  Notice that this function normalizes the bounding box coordinates provided
  by the raw data.

  Args:
    data: dict holding PASCAL XML fields for a single image (obtained by
      running dataset_util.recursive_parse_xml_to_dict)
    dataset_directory: Path to root directory holding PASCAL dataset
    label_map_dict: A map from string label names to integers ids.
    ignore_difficult_instances: Whether to skip difficult instances in the
      dataset  (default: False).
    image_subdirectory: String specifying subdirectory within the
      PASCAL dataset directory holding the actual image data.

  Returns:
    example: The converted tf.Example.

  Raises:
    ValueError: if the image pointed to by data['filename'] is not a valid JPEG
  """
  img_path = os.path.join(image_subdirectory, data['filename'])
  full_path = os.path.join(dataset_directory, img_path)
  with tf.gfile.GFile(full_path, 'rb') as fid:
    encoded_jpg = fid.read()
  encoded_jpg_io = io.BytesIO(encoded_jpg)
  image = PIL.Image.open(encoded_jpg_io)
  if image.format != 'JPEG':
    raise ValueError('Image format not JPEG')
  key = hashlib.sha256(encoded_jpg).hexdigest()

  width = int(data['size']['width'])
  height = int(data['size']['height'])

  xmin = []
  ymin = []
  xmax = []
  ymax = []
  classes = []
  classes_text = []
  truncated = []
  poses = []
  difficult_obj = []
  for obj in data['object']:
    difficult = bool(int(obj['difficult']))


    xmin.append(float(obj['bndbox']['xmin']) / width)
    ymin.append(float(obj['bndbox']['ymin']) / height)
    xmax.append(float(obj['bndbox']['xmax']) / width)
    ymax.append(float(obj['bndbox']['ymax']) / height)
    classes_text.append(obj['name'].encode('utf8'))
    classes.append(label_map_dict[obj['name']])
    truncated.append(int(obj['truncated']))
    poses.append(obj['pose'].encode('utf8'))

  example = tf.train.Example(features=tf.train.Features(feature={
      'image/height': dataset_util.int64_feature(height),
      'image/width': dataset_util.int64_feature(width),
      'image/filename': dataset_util.bytes_feature(
          data['filename'].encode('utf8')),
      'image/source_id': dataset_util.bytes_feature(
          data['filename'].encode('utf8')),
      'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
      'image/encoded': dataset_util.bytes_feature(encoded_jpg),
      'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
      'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
      'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
      'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
      'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
      'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
      'image/object/class/label': dataset_util.int64_list_feature(classes),
      'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
      'image/object/truncated': dataset_util.int64_list_feature(truncated),
      'image/object/view': dataset_util.bytes_list_feature(poses),
  }))
  return example



def main(_):
  data_dir = '插入data数据路径'
  output_path = '插入输出record路径和文件名'
  writer = tf.python_io.TFRecordWriter(output_path)
  label_map_path = '插入label—map路径和文件名'
  label_map_dict = label_map_util.get_label_map_dict(label_map_path)


  examples_path = '插入trainval.txt'
  annotations_dir = '插入xml文件路径/annotations/'
  examples_list = dataset_util.read_examples_list(examples_path)
  for idx, example in enumerate(examples_list):
    if idx % 100 == 0:
      logging.info('On image %d of %d', idx, len(examples_list))
    path = os.path.join(annotations_dir, example + '.xml')
    print(path)



    with tf.gfile.GFile(path, 'r') as fid:

      xml_str = fid.read()
    xml = etree.fromstring(xml_str)
    data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
    # print(data)

    tf_example = dict_to_tf_example(data, data_dir, label_map_dict)
    writer.write(tf_example.SerializeToString())

  writer.close()


if __name__ == '__main__':
  tf.app.run()

把里面的路径和文件夹改成自己的就可以生成record文件了!
但是在此之前,需要准备trainval.txt文件。就是把整体图片的名称读出一个列表,具体代码如下:

#!/usr/bin/env python2
# -*- coding: utf-8 -*-

import os
from os import listdir, getcwd
from os.path import join
if __name__ == '__main__':
    source_folder='models/research/object_detection/data/image/'      #地址是所有图片的保存地点
    dest='models/research/object_detection/data/annotations/trainval.txt' #保存train.txt的地址
    file_list=os.listdir(source_folder)       #赋值图片所在文件夹的文件列表
    train_file=open(dest,'a')                 #打开文件
    for file_obj in file_list:                #访问文件列表中的每一个文件
        file_path=os.path.join(source_folder,file_obj) 
        file_name,file_extend=os.path.splitext(file_obj)
        #file_name 保存文件的名字,file_extend保存文件扩展名
        #file_num=int(file_name) 
        train_file.write(file_name+'\n') 
    train_file.close()#关闭文件

运行上面这个可以生成所需要的trainval.txt文件。

2. 训练
训练部分需要修改的源文件有:配置文件.config和预训练模型。
config文件需要改的部分主要有6个地方:

num_classes: 3
fine_tune_checkpoint: "models/research/object_detection/models/model/train/model.ckpt-200000"
训练:
input_path: "/models/research/object_detection/data/train_gold.record"
label_map_path: "/models/research/object_detection/data/gold_label_map.pbtxt"
验证:
input_path: "models/research/object_detection/data/train_gold.record"
label_map_path: "models/research/object_detection/data/gold_label_map.pbtxt"

然后预训练模型的model需要在网上下载,这里一定要下载相同网络模型的预训练模型。
然后输入教程中的四行指令运行即可开始训练。(貌似由于是分布式原因,这个程序会占用服务器所有GPU,而且会把其他的东西挤掉==)
训练过程中会保存最近五次的中途训练结果。
3. 测试
测试部分参考github中相关程序:
https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb

你可能感兴趣的:(深度学习,物体检测)