简单ocr识别加c++本地识别接口实现

*本博客主要是记录在caffe(添加了新层)框架上训练数字串识别算法crnn的模型,用VS2013封装一个识别数字串的接口。

1. crnn模型训练

框架:crnn.caffe在github上的地址
数据准备图片标签:+2_XXX.jpg
数据增强到了六万,只识别0~9,+,-,. 等(可能数据还没达到十万量级识别精度欠佳)
模型训练:
训练模型时需要resize到128*32,为了避免拉抻变形,所以这里需要在原始图片周围加上padding。加了padding之后的图片这样在算法中resize时就不会变形严重。
训练步骤可以根据github指导逐步训练,网络结构可以根据自己的算法进行调整。
注意避免模型过拟合
模型测试:cpp_recognition中提供了测试的接口;
测试代码稍微改了下
`

#include 
#include 
#include "boost/algorithm/string.hpp"
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

#include "glob.h"

#include 
#include 
#include 
#include 
using namespace caffe;
using namespace std;
using namespace cv;

const int BLANK_LABEL = 13;


class Classifier {
 public:
  Classifier(const string& model_file,const string& trained_file,int blank_label);
  std::vector Classify(const cv::Mat& img, int N = 1);
 private:
  std::vector Predict(const cv::Mat& img);
  void GetLabelseqs(const std::vector& label_seq_with_blank,
                    std::vector& label_seq);
  void WrapInputLayer(std::vector* input_channels);
  void Preprocess(const cv::Mat& img,std::vector* input_channels);
 private:
  int blank_label_;
  boost::shared_ptr > net_;
  cv::Size input_geometry_;
  int num_channels_;
};
Classifier::Classifier(const string& model_file,const string& trained_file,int blank_label) {
  blank_label_ = blank_label;
  net_.reset(new Net(model_file, caffe::TEST));
  net_->CopyTrainedLayersFrom(trained_file);

  CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input.";
  CHECK_EQ(net_->num_outputs(), 1) << "Network should have exactly one output.";

  Blob* input_layer = net_->input_blobs()[0];
  num_channels_ = input_layer->channels();
  CHECK(num_channels_ == 3 || num_channels_ == 1)
    << "Input layer should have 1 or 3 channels.";
  input_geometry_ = cv::Size(input_layer->width(), input_layer->height());

//Blob* output_layer = net_->output_blobs()[0];
}
/* Return the top N predictions. */
//std::vector Classifier::Classify(const cv::Mat& img, int N) {
//  std::vector output = Predict(img);
//  return output;
//}
void Classifier::GetLabelseqs(const std::vector& label_seq_with_blank,
                              std::vector& label_seq) {
  label_seq.clear();
  int prev = blank_label_;
  int length = label_seq_with_blank.size();
  for(int i = 0; i < length; ++i) {
    int cur = label_seq_with_blank[i];
    if(cur != prev && cur != blank_label_) {
      label_seq.push_back(cur);
    }
    prev = cur;
  }
}

std::vector Classifier::Classify(const cv::Mat& img, int N) {
  std::vector pred_label_seq_with_blank = Predict(img);
  std::vector pred_label_seq;
  GetLabelseqs(pred_label_seq_with_blank, pred_label_seq);
  return pred_label_seq;
}

std::vector Classifier::Predict(const cv::Mat& img) {
  Blob* input_layer = net_->input_blobs()[0];
  input_layer->Reshape(1, num_channels_,input_geometry_.height, input_geometry_.width);
  /* Forward dimension change to all layers. */
  net_->Reshape();
  std::vector input_channels;
  WrapInputLayer(&input_channels);
  Preprocess(img, &input_channels);
  net_->Forward();
  /* Copy the output layer to a std::vector */
  Blob* output_layer = net_->output_blobs()[0];
  int alphabet_size=output_layer->shape(2);
  int time_step=output_layer->shape(0);
  vectorpred_label_seq_with_blank;
  const float* begin = output_layer->cpu_data();
  for(int t=0;t* input_channels) {
  Blob* input_layer = net_->input_blobs()[0];
  int width = input_layer->width();
  int height = input_layer->height();
  float* input_data = input_layer->mutable_cpu_data();
  for (int i = 0; i < input_layer->channels(); ++i) {
    cv::Mat channel(height, width, CV_32FC1, input_data);
    input_channels->push_back(channel);
    input_data += width * height;
  }
}

void Classifier::Preprocess(const cv::Mat& img,std::vector* input_channels) {
  /* Convert the input image to the input image format of the network. */
  cv::Mat sample;
  if (img.channels() == 3 && num_channels_ == 1)
    cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY);
  else if (img.channels() == 4 && num_channels_ == 1)
    cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY);
  else if (img.channels() == 4 && num_channels_ == 3)
    cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR);
  else if (img.channels() == 1 && num_channels_ == 3)
    cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR);
  else
    sample = img;
  cv::Mat sample_resized;
  if (sample.size() != input_geometry_)cv::resize(sample, sample_resized, input_geometry_);
  else sample_resized = sample;
  cv::Mat sample_float;
  if (num_channels_ == 3)sample_resized.convertTo(sample_float, CV_32FC3, 1/255.0);
  else sample_resized.convertTo(sample_float, CV_32FC1, 1/255.0);
  cv::split(sample_float, *input_channels);
  CHECK(reinterpret_cast(input_channels->at(0).data)
        == net_->input_blobs()[0]->cpu_data())
    << "Input channels are not wrapping the input layer of the network.";
}


void trim(string &s) 
{
  if( !s.empty() ) 
  {
    s.erase(0, s.find_first_not_of(" "));
    s.erase(s.find_last_not_of(" ") + 1);
  }
}
int main(int argc, char** argv){
    caffe::GlobalInit(&argc,&argv);
    string file_path=(string)argv[1];
    string proto_cnn_file=(string)argv[2];
    string model_cnn_file=(string)argv[3];
	  Classifier classifier(proto_cnn_file,model_cnn_file, BLANK_LABEL);

//read images.
vector files;
int right_num = 0;    
glob(file_path, files);
for (int i = 0; i < files.size(); i++) {
  Mat image = imread(files[i]);
  Mat resizeimg;
  resize(image,resizeimg,Size(128,32),0,0,CV_INTER_LINEAR);
  vector predictions=classifier.Classify(resizeimg);

  //get the image's real label.
  string str(files[i]);
  int st = str.find_last_of('/');
  int ed = str.find_last_of('_');
  string real_str = str.substr(st+1, ed-st-1);
  trim(real_str);
  
  string pre_str = "";
  for (size_t i = 0; i < predictions.size(); ++i) {
    stringstream ss;
    if (predictions[i] == 10)pre_str += "+";
    else if (predictions[i] == 11)pre_str += "-";
    else if (predictions[i] == 12)pre_str += ".";
    else pre_str += std::to_string(predictions[i]);
  }
  std::cout << files[i] << std::endl;
  std::cout << "real label: " << real_str << "  pred label: " << pre_str << std::endl;
  if (real_str == pre_str) right_num += 1;  
}
std::cout << "the right nums: " << right_num << std::endl;
std::cout << "the precision is " << right_num/(double)files.size() << std::endl;

}
`可以正常测试模型的识别精度了。
本地识别接口本地c++识别接口可以根据test.cpp实现,但是需要依赖opencv库和caffe库,这两个库需要提前编译好。
实验结果:
约六万的数字串数据,训练模型,6000多的测试数据识别精度最好89%;可以从网络结构方面找一些优化的点。

你可能感兴趣的:(C++,caffe)