【深度学习】【caffe实用工具6】笔记28 windows下SSD网络中的convert_annoset工具的使用

/**************************************************************************************************************************
文件说明:
        【1】这个程序用于将图像数据集合(a set of images)和与这个图像相关的注释(annotations)转换为caffe专用的数据库LMDB/LEVELDB
		     格式
		【2】使用方法:convert_annoset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME
		             [1]ROOTFOLDER/----------------图像数据所在的文件夹
					 [2]LISTFILE-------------------输入图像数据的文件列表,其每一行为subfolder1/file1.JPEG 7
					 [3][FLAGS]--------------------可选参数,是否使用shuffle,颜色空间,编码等
	    【3】对于【分类任务】,文件的格式如下所示:imgfolder1/img1.JPEG 7
		【4】对于【检测任务】,文件的格式如下所示:imgfolder1/img1.JPEG annofolder1/anno1.xml
检测任务的使用举例:
        代码需要进行三处修改:
		   【1】DEFINE_string(anno_type,   "detection","The type of annotation {classification, detection}.");
		   【2】DEFINE_string(label_map_file,"E://caffeInstall2013SSDCUDA//caffe-ssd-microsoft//data//ssd//labelmap_voc.prototxt",
		                                     "A file with LabelMap protobuf message.");
           【3】主函数中代码修改:
				     argv[1] = "E://caffeInstall2013SSDCUDA//caffe-ssd-microsoft//data//ssd//";
				     argv[2] = "E://caffeInstall2013SSDCUDA//caffe-ssd-microsoft//data//ssd//test.txt";
				     argv[3] = "E://caffeInstall2013SSDCUDA//caffe-ssd-microsoft//examples//ssd//ssd_test_lmdb";
开发环境:
         Win10+cuda7.5+cuDnnV5.0+ccaffe_windows_ssd+OpenCv+VS2013
时间地点:
         陕西师范大学 文津楼 2017.8.10
作    者:
         九 月
***************************************************************************************************************************/
#include 
#include   // NOLINT(readability/streams)
#include 
#include 
#include 
#include 

#include "boost/scoped_ptr.hpp"
#include "boost/variant.hpp"
#include "gflags/gflags.h"
#include "glog/logging.h"

#include "caffe/proto/caffe.pb.h"
#include "caffe/util/db.hpp"
#include "caffe/util/format.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/rng.hpp"

using namespace caffe;  // NOLINT(build/namespaces)
using std::pair;
using boost::scoped_ptr;

DEFINE_bool(  gray,        false,     "When this option is on, treat images as grayscale ones");
DEFINE_bool(  shuffle,     false,     "Randomly shuffle the order of images and their labels");
DEFINE_string(backend,     "lmdb",     "The backend {lmdb, leveldb} for storing the result");
DEFINE_string(anno_type,   "detection","The type of annotation {classification, detection}.");
DEFINE_string(label_type,  "xml",      "The type of annotation file format.");
DEFINE_string(label_map_file,"E://caffeInstall2013SSDCUDA//caffe-ssd-microsoft//data//ssd//labelmap_voc.prototxt",      
	                                   "A file with LabelMap protobuf message.");
DEFINE_bool(  check_label,  false,    "When this option is on, check that there is no duplicated name/label.");
DEFINE_int32( min_dim,      0,         "Minimum dimension images are resized to (keep same aspect ratio)");
DEFINE_int32( max_dim,      0,         "Maximum dimension images are resized to (keep same aspect ratio)");
DEFINE_int32( resize_width, 0,         "Width images are resized to");
DEFINE_int32( resize_height,0,         "Height images are resized to");
DEFINE_bool( check_size,    false,    "When this option is on, check that all the datum have the same size");
DEFINE_bool( encoded,       false,    "When this option is on, the encoded image will be save in datum");
DEFINE_string(encode_type,  "",        "Optional: What type should we encode the image as ('png','jpg',...).");

int main(int argc, char** argv) 
{
#ifdef USE_OPENCV
  ::google::InitGoogleLogging(argv[0]);
  // Print output to stderr (while still logging)
  FLAGS_alsologtostderr = 1;

#ifndef GFLAGS_GFLAGS_H_
  namespace gflags = google;
#endif

  gflags::SetUsageMessage("Convert a set of images and annotations to the ""leveldb/lmdb format used as input for Caffe.\n"
                          "Usage:\n""    convert_annoset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME\n");
  
  argv[1] = "E://caffeInstall2013SSDCUDA//caffe-ssd-microsoft//data//ssd//";
  argv[2] = "E://caffeInstall2013SSDCUDA//caffe-ssd-microsoft//data//ssd//test.txt";
  argv[3] = "E://caffeInstall2013SSDCUDA//caffe-ssd-microsoft//examples//ssd//ssd_test_lmdb";

  const bool  is_color   = !FLAGS_gray;
  const bool  check_size = FLAGS_check_size;
  const bool  encoded    = FLAGS_encoded;

  const string encode_type = FLAGS_encode_type;
  const string anno_type   = FLAGS_anno_type;

  AnnotatedDatum_AnnotationType type;

  const string label_type     = FLAGS_label_type;
  const string label_map_file = FLAGS_label_map_file;

  const bool check_label     = FLAGS_check_label;

  std::map name_to_label;

  std::ifstream infile(argv[2]);
  std::vector > > lines;
  std::string filename;
  int         label;
  std::string labelname;

  if (anno_type == "classification") 
  {
    while (infile >> filename >> label) 
	{
      lines.push_back(std::make_pair(filename, label));
    }
  } 
  else if (anno_type == "detection") 
  {
    type = AnnotatedDatum_AnnotationType_BBOX;
    LabelMap label_map;
    CHECK(ReadProtoFromTextFile(label_map_file, &label_map))<< "Failed to read label map file.";
    CHECK(MapNameToLabel(label_map, check_label, &name_to_label))<< "Failed to convert name to label.";
    while (infile >> filename >> labelname) 
	{
      lines.push_back(std::make_pair(filename, labelname));
    }
  }
  if (FLAGS_shuffle) 
  {
    // randomly shuffle data
    LOG(INFO) << "Shuffling data";
    shuffle(lines.begin(), lines.end());
  }
  LOG(INFO) << "A total of " << lines.size() << " images.";

  if (encode_type.size() && !encoded)
    LOG(INFO) << "encode_type specified, assuming encoded=true.";

  int min_dim       = std::max(0, FLAGS_min_dim);
  int max_dim       = std::max(0, FLAGS_max_dim);
  int resize_height = std::max(0, FLAGS_resize_height);
  int resize_width  = std::max(0, FLAGS_resize_width);

  // Create new DB
  scoped_ptr db(db::GetDB(FLAGS_backend));
  db->Open(argv[3], db::NEW);
  scoped_ptr txn(db->NewTransaction());

  // Storing to db
  std::string root_folder(argv[1]);
  AnnotatedDatum anno_datum;
  Datum* datum = anno_datum.mutable_datum();

  int count     = 0;
  int data_size = 0;

  bool data_size_initialized = false;

  for (int line_id = 0; line_id < lines.size(); ++line_id) 
  {
    bool status = true;
    std::string enc = encode_type;
    if (encoded && !enc.size()) 
	{
      // Guess the encoding type from the file name
      string fn = lines[line_id].first;
      size_t p = fn.rfind('.');
      if ( p == fn.npos )
        LOG(WARNING) << "Failed to guess the encoding of '" << fn << "'";
      enc = fn.substr(p);
      std::transform(enc.begin(), enc.end(), enc.begin(), ::tolower);
    }
    filename = root_folder + lines[line_id].first;
    if (anno_type == "classification") 
	{
      label = boost::get(lines[line_id].second);
      status = ReadImageToDatum(filename, label, resize_height, resize_width,
          min_dim, max_dim, is_color, enc, datum);
    } 
	else if (anno_type == "detection") 
	{
      labelname = root_folder + boost::get(lines[line_id].second);
      status = ReadRichImageToAnnotatedDatum(filename, labelname, resize_height,
          resize_width, min_dim, max_dim, is_color, enc, type, label_type,
          name_to_label, &anno_datum);
      anno_datum.set_type(AnnotatedDatum_AnnotationType_BBOX);
    }
    if (status == false) 
	{
      LOG(WARNING) << "Failed to read " << lines[line_id].first;
      continue;
    }
    if (check_size) {
      if (!data_size_initialized) 
	  {
        data_size = datum->channels() * datum->height() * datum->width();
        data_size_initialized = true;
      } else 
	  {
        const std::string& data = datum->data();
        CHECK_EQ(data.size(), data_size) << "Incorrect data field size "
            << data.size();
      }
    }
    // sequential
    string key_str = caffe::format_int(line_id, 8) + "_" + lines[line_id].first;

    // Put in db
    string out;
    CHECK(anno_datum.SerializeToString(&out));
    txn->Put(key_str, out);

    if (++count % 1000 == 0) 
	{
      // Commit db
      txn->Commit();
      txn.reset(db->NewTransaction());
      LOG(INFO) << "Processed " << count << " files.";
    }
  }
  // write the last batch
  if (count % 1000 != 0) 
  {
    txn->Commit();
    LOG(INFO) << "Processed " << count << " files.";
  }
  std::system("pause");
#else
  LOG(FATAL) << "This tool requires OpenCV; compile with USE_OPENCV.";
#endif  // USE_OPENCV
  return 0;
}


【深度学习】【caffe实用工具6】笔记28 windows下SSD网络中的convert_annoset工具的使用_第1张图片

用了30分钟,终于生成了训练阶段所需要的LMDB数据库文件,如下所示:

【深度学习】【caffe实用工具6】笔记28 windows下SSD网络中的convert_annoset工具的使用_第2张图片

你可能感兴趣的:(CNN--ANN--Deep,Learning)