Caffe源代码之SoftmaxWithLoss交叉熵损失函数

SoftmaxWithLoss交叉熵损失函数

在Caffe中,SoftmaxWithLoss和Softmax的前向传播基本一样,唯一有点区别的是SoftmaxWithLoss计算了损失值,用于打印在终端。SoftmaxWithLoss继承于Loss基类,Loss基类继承于Layer基类。因此,SoftmaxWithLoss算是Layer基类的孙子类。首先,我们来看一下,Loss类。

Loss基类

头文件

#ifndef CAFFE_LOSS_LAYER_HPP_
#define CAFFE_LOSS_LAYER_HPP_

#include 

#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {

const float kLOG_THRESHOLD = 1e-20;

/**
 * @brief An interface for Layer%s that take two Blob%s as input -- usually
 *        (1) predictions and (2) ground-truth labels -- and output a
 *        singleton Blob representing the loss.
 *
 * LossLayers are typically only capable of backpropagating to their first input
 * -- the predictions.
 */
template <typename Dtype>
class LossLayer : public Layer {
 public:
  explicit LossLayer(const LayerParameter& param)
     : Layer(param) {}
  virtual void LayerSetUp(
      const vector*>& bottom, const vector*>& top);
  virtual void Reshape(
      const vector*>& bottom, const vector*>& top);
//显然,该层需要两个输入,一个是Logist,另外一个是Label
  virtual inline int ExactNumBottomBlobs() const { return 2; }

  /**
   * @brief For convenience and backwards compatibility, instruct the Net to
   *        automatically allocate a single top Blob for LossLayers, into which
   *        they output their singleton loss, (even if the user didn't specify
   *        one in the prototxt, etc.).
   */
   //由于是损失层,自动生成,损失值,作为top
  virtual inline bool AutoTopBlobs() const { return true; }
  //需要额外的输入
  virtual inline int ExactNumTopBlobs() const { return 1; }
  /**
   * We usually cannot backpropagate to the labels; ignore force_backward for
   * these inputs.
   */
   //bottom[1]是Label变量,显然是不能反向传播的
  virtual inline bool AllowForceBackward(const int bottom_index) const {
    return bottom_index != 1;
  }
};

}  // namespace caffe

#endif  // CAFFE_LOSS_LAYER_HPP_

源文件

#include 

#include "caffe/layers/loss_layer.hpp"

namespace caffe {

template <typename Dtype>
void LossLayer::LayerSetUp(
    const vector*>& bottom, const vector*>& top) {
  // LossLayers have a non-zero (1) loss by default.
  //默认的,为每一个Loss层分配一个权重1
  if (this->layer_param_.loss_weight_size() == 0) {
    this->layer_param_.add_loss_weight(Dtype(1));
  }
}

template <typename Dtype>
void LossLayer::Reshape(
    const vector*>& bottom, const vector*>& top) {
  CHECK_EQ(bottom[0]->shape(0), bottom[1]->shape(0))
      << "The data and label should have the same first dimension.";
//显然,Loss是一个值,top[0]中存放损失值
  vector<int> loss_shape(0);  // Loss layers output a scalar; 0 axes.
  top[0]->Reshape(loss_shape);
}

INSTANTIATE_CLASS(LossLayer);

}  // namespace caffe

SoftmaxWithLoss头文件

#ifndef CAFFE_SOFTMAX_WITH_LOSS_LAYER_HPP_
#define CAFFE_SOFTMAX_WITH_LOSS_LAYER_HPP_

#include 

#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

#include "caffe/layers/loss_layer.hpp"
#include "caffe/layers/softmax_layer.hpp"

namespace caffe {

/**
 * @brief Computes the multinomial logistic loss for a one-of-many
 *        classification task, passing real-valued predictions through a
 *        softmax to get a probability distribution over classes.
 *
 * This layer should be preferred over separate
 * SoftmaxLayer + MultinomialLogisticLossLayer
 * as its gradient computation is more numerically stable.
 * At test time, this layer can be replaced simply by a SoftmaxLayer.
 *
 * @param bottom input Blob vector (length 2)
 *   -# @f$ (N \times C \times H \times W) @f$
 *      the predictions @f$ x @f$, a Blob with values in
 *      @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of
 *      the @f$ K = CHW @f$ classes. This layer maps these scores to a
 *      probability distribution over classes using the softmax function
 *      @f$ \hat{p}_{nk} = \exp(x_{nk}) /
 *      \left[\sum_{k'} \exp(x_{nk'})\right] @f$ (see SoftmaxLayer).
 *   -# @f$ (N \times 1 \times 1 \times 1) @f$
 *      the labels @f$ l @f$, an integer-valued Blob with values
 *      @f$ l_n \in [0, 1, 2, ..., K - 1] @f$
 *      indicating the correct class label among the @f$ K @f$ classes
 * @param top output Blob vector (length 1)
 *   -# @f$ (1 \times 1 \times 1 \times 1) @f$
 *      the computed cross-entropy classification loss: @f$ E =
 *        \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n})
 *      @f$, for softmax output class probabilites @f$ \hat{p} @f$
 */
template <typename Dtype>
class SoftmaxWithLossLayer : public LossLayer {
 public:
   /**
    * @param param provides LossParameter loss_param, with options:
    *  - ignore_label (optional)
    *    Specify a label value that should be ignored when computing the loss.
    *  - normalize (optional, default true)
    *    If true, the loss is normalized by the number of (nonignored) labels
    *    present; otherwise the loss is simply summed over spatial locations.
    */
  explicit SoftmaxWithLossLayer(const LayerParameter& param)
      : LossLayer(param) {}
  virtual void LayerSetUp(const vector*>& bottom,
      const vector*>& top);
  virtual void Reshape(const vector*>& bottom,
      const vector*>& top);

  virtual inline const char* type() const { return "SoftmaxWithLoss"; }
  virtual inline int ExactNumTopBlobs() const { return -1; }
  virtual inline int MinTopBlobs() const { return 1; }
  //top最多可以有两个,top[0]为损失值,top[1]也就是softmax的输出概率
  virtual inline int MaxTopBlobs() const { return 2; }

 protected:
  virtual void Forward_cpu(const vector*>& bottom,
      const vector*>& top);
  virtual void Forward_gpu(const vector*>& bottom,
      const vector*>& top);
  /**
   * @brief Computes the softmax loss error gradient w.r.t. the predictions.
   *
   * Gradients cannot be computed with respect to the label inputs (bottom[1]),
   * so this method ignores bottom[1] and requires !propagate_down[1], crashing
   * if propagate_down[1] is set.
   *
   * @param top output Blob vector (length 1), providing the error gradient with
   *      respect to the outputs
   *   -# @f$ (1 \times 1 \times 1 \times 1) @f$
   *      This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$,
   *      as @f$ \lambda @f$ is the coefficient of this layer's output
   *      @f$\ell_i@f$ in the overall Net loss
   *      @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence
   *      @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$.
   *      (*Assuming that this top Blob is not used as a bottom (input) by any
   *      other layer of the Net.)
   * @param propagate_down see Layer::Backward.
   *      propagate_down[1] must be false as we can't compute gradients with
   *      respect to the labels.
   * @param bottom input Blob vector (length 2)
   *   -# @f$ (N \times C \times H \times W) @f$
   *      the predictions @f$ x @f$; Backward computes diff
   *      @f$ \frac{\partial E}{\partial x} @f$
   *   -# @f$ (N \times 1 \times 1 \times 1) @f$
   *      the labels -- ignored as we can't compute their error gradients
   */
  virtual void Backward_cpu(const vector*>& top,
      const vector<bool>& propagate_down, const vector*>& bottom);
  virtual void Backward_gpu(const vector*>& top,
      const vector<bool>& propagate_down, const vector*>& bottom);

  /// Read the normalization mode parameter and compute the normalizer based
  /// on the blob size.  If normalization_mode is VALID, the count of valid
  /// outputs will be read from valid_count, unless it is -1 in which case
  /// all outputs are assumed to be valid.
  virtual Dtype get_normalizer(
      LossParameter_NormalizationMode normalization_mode, int valid_count);

  /// The internal SoftmaxLayer used to map predictions to a distribution.
  shared_ptr > softmax_layer_;//在SoftmaxWithLoss中居然有一个softmax_layer的指针,显然需要调用softmax的前向传播的过程
  /// prob stores the output probability predictions from the SoftmaxLayer.
  Blob prob_;//存储Softmax的输出
  /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward
  vector*> softmax_bottom_vec_;//softmax层的输入数据块指针
  /// top vector holder used in call to the underlying SoftmaxLayer::Forward
  vector*> softmax_top_vec_;//softmax层的输出数据块指针
  /// Whether to ignore instances with a certain label.
  bool has_ignore_label_;//需要忽略的Label,这里举一个例子,还是图像分割好了
  //显然,背景的类别,我们是可以忽略的
  /// The label indicating that an instance should be ignored.
  int ignore_label_;
  /// How to normalize the output loss.
  LossParameter_NormalizationMode normalization_;

  int softmax_axis_, outer_num_, inner_num_;//这里同softmax层
};

}  // namespace caffe

#endif  // CAFFE_SOFTMAX_WITH_LOSS_LAYER_HPP_

SoftmaxWithLoss源文件

在头文件中,我们可以看到,SoftmaxWithLoss类中有一个指向softmax类的指针,显然,需要调用其前向传播。

#include 
#include 
#include 

#include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"

namespace caffe {

template <typename Dtype>
void SoftmaxWithLossLayer::LayerSetUp(
    const vector*>& bottom, const vector*>& top) {
  LossLayer::LayerSetUp(bottom, top);
  LayerParameter softmax_param(this->layer_param_);
  softmax_param.set_type("Softmax");
  //创建softmax_layer
  softmax_layer_ = LayerRegistry::CreateLayer(softmax_param);
  softmax_bottom_vec_.clear();
  //softmax_bottom_vec是softmax_layer的bottom
  softmax_bottom_vec_.push_back(bottom[0]);
  softmax_top_vec_.clear();
  //softmax_bottom_vec是softmax_layer的top
  softmax_top_vec_.push_back(&prob_);
  softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_);//于是,就SetUp了
//在损失函数中,需要忽略的类别
  has_ignore_label_ =
    this->layer_param_.loss_param().has_ignore_label();
  if (has_ignore_label_) {
    ignore_label_ = this->layer_param_.loss_param().ignore_label();
  }
  //归一化方式,
  //这里还是以图像分割来说,有三种种归一化方式
  //(1)batch_size中每个像素点总的损失函数的平均值
  //(2)和第一种类似,不过,不是所有像素点的损失值,而是有效的(用户定义)
  //(3)batch_size中每一张图像的平均损失值
  if (!this->layer_param_.loss_param().has_normalization() &&
      this->layer_param_.loss_param().has_normalize()) {
    normalization_ = this->layer_param_.loss_param().normalize() ?
                     LossParameter_NormalizationMode_VALID :
                     LossParameter_NormalizationMode_BATCH_SIZE;
  } else {
    normalization_ = this->layer_param_.loss_param().normalization();
  }
}

template <typename Dtype>
void SoftmaxWithLossLayer::Reshape(
    const vector*>& bottom, const vector*>& top) {
  LossLayer::Reshape(bottom, top);
  softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_);
  softmax_axis_ =
      bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis());
  outer_num_ = bottom[0]->count(0, softmax_axis_);
  inner_num_ = bottom[0]->count(softmax_axis_ + 1);
  //这个需要解释一下,以图像分割为例吧,若logist数据的大小为(32,5,240,240),
  //那么label数据块大小为:(32,240,240),矩阵中,每一点的数值表示,该点所属类别,并不是one_hot的形式
  CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count())
      << "Number of labels must match number of predictions; "
      << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), "
      << "label count (number of labels) must be N*H*W, "
      << "with integer values in {0, 1, ..., C-1}.";
//当有两个输出时,则top[1]表示概率
  if (top.size() >= 2) {
    // softmax output
    top[1]->ReshapeLike(*bottom[0]);
  }
}
//按照不同的归一化方式,获取不同的缩放值
template <typename Dtype>
Dtype SoftmaxWithLossLayer::get_normalizer(
    LossParameter_NormalizationMode normalization_mode, int valid_count) {
  Dtype normalizer;
  switch (normalization_mode) {
    case LossParameter_NormalizationMode_FULL:
      normalizer = Dtype(outer_num_ * inner_num_);
      break;
    case LossParameter_NormalizationMode_VALID:
      if (valid_count == -1) {
        normalizer = Dtype(outer_num_ * inner_num_);
      } else {
        normalizer = Dtype(valid_count);
      }
      break;
    case LossParameter_NormalizationMode_BATCH_SIZE:
      normalizer = Dtype(outer_num_);
      break;
    case LossParameter_NormalizationMode_NONE:
      normalizer = Dtype(1);
      break;
    default:
      LOG(FATAL) << "Unknown normalization mode: "
          << LossParameter_NormalizationMode_Name(normalization_mode);
  }
  // Some users will have no labels for some examples in order to 'turn off' a
  // particular loss in a multi-task setup. The max prevents NaNs in that case.
  return std::max(Dtype(1.0), normalizer);
}
//前向传播
template <typename Dtype>
void SoftmaxWithLossLayer::Forward_cpu(
    const vector*>& bottom, const vector*>& top) {
  // The forward pass computes the softmax prob values.
  softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);//上来就是调用softmax的前向传播
  const Dtype* prob_data = prob_.cpu_data(); //获取概率值
  const Dtype* label = bottom[1]->cpu_data(); //获取标签值
  int dim = prob_.count() / outer_num_;
  int count = 0;
  Dtype loss = 0;
  for (int i = 0; i < outer_num_; ++i) {//大循环
    for (int j = 0; j < inner_num_; j++) {
      const int label_value = static_cast<int>(label[i * inner_num_ + j]);
      //如果碰到了,需要忽略的标签值,跳过
      if (has_ignore_label_ && label_value == ignore_label_) {
        continue;
      }
      //这个标签值不能超过了类别数
      DCHECK_GE(label_value, 0);
      DCHECK_LT(label_value, prob_.shape(softmax_axis_));
      计算loss += -log(prob(label_value))
      loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j],
                           Dtype(FLT_MIN)));
      ++count;
    }
  }
  //归一化
  top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count);
  if (top.size() == 2) {
    top[1]->ShareData(prob_);
  }
}
//反向传播
template <typename Dtype>
void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top,
    const vector<bool>& propagate_down, const vector*>& bottom) {
    //标签数据是不能进行反向传播的
  if (propagate_down[1]) {
    LOG(FATAL) << this->type()
               << " Layer cannot backpropagate to label inputs.";
  }
  //在反向传播之前回顾一下softmaxwithLoss怎么进行反向传播的
  //1.对label_value类别进行反向时,其值为prob[label_value] - 1
  //2.对k(非label_value类别)进行反向时,其值为prob[k]
  if (propagate_down[0]) {
  //获取bottom_diff指针
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    //获取概率值数据块得指针
    const Dtype* prob_data = prob_.cpu_data();
    //首先进行反向传播的第二步,对k(非label_value类别)进行反向时,其值为prob[k]
    caffe_copy(prob_.count(), prob_data, bottom_diff);
    //获取label的指针
    const Dtype* label = bottom[1]->cpu_data();
    int dim = prob_.count() / outer_num_;
    int count = 0;
    for (int i = 0; i < outer_num_; ++i) {
      for (int j = 0; j < inner_num_; ++j) {
        const int label_value = static_cast<int>(label[i * inner_num_ + j]);
        if (has_ignore_label_ && label_value == ignore_label_) {
        //如果label值是被忽略的,则反向传播时,其值为0
          for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) {
            bottom_diff[i * dim + c * inner_num_ + j] = 0;
          }
        } else {
        //进行反向传播的第一步,对label_value类别进行反向时,其值为
        //prob[label_value] - 1
          bottom_diff[i * dim + label_value * inner_num_ + j] -= 1;
          ++count;
        }
      }
    }
    // Scale gradient
    //之后需要进行梯度归一化,g/(loss_weight * get_normalizer)
    Dtype loss_weight = top[0]->cpu_diff()[0] /
                        get_normalizer(normalization_, count);
    caffe_scal(prob_.count(), loss_weight, bottom_diff);
  }
}

#ifdef CPU_ONLY
STUB_GPU(SoftmaxWithLossLayer);
#endif

INSTANTIATE_CLASS(SoftmaxWithLossLayer);
REGISTER_LAYER_CLASS(SoftmaxWithLoss);

}  // namespace caffe

你可能感兴趣的:(深度学习与计算机视觉,caffe源代码)