MXNet 定义新激活函数(Custom new activation function)

这里使用比较简单的定义方式,只是在原有的激活函数调用中加入。

  • 准备工作

下载MXNet源代码,确认可以顺利编译通过。推荐在Linux下进行此操作:

https://mxnet.incubator.apache.org/get_started/install.html

  • 编写激活函数先前和先后传递

src/operator/mshadow_op.h里面,加入新的激活函数向前传递和向后的函数:

/*!
 * \brief RBF Unit
 * \author Yuzhong Liu
*/
struct rbf {
  template
  MSHADOW_XINLINE static DType Map(DType x) {
    return DType(expf(-x*x));
  }
};

struct rbf_grad {
  template
  MSHADOW_XINLINE static DType Map(DType x, DType a) {
    return DType(-2 * x * a);
  }
};
  • 添加调用方法

src/operator/leaky_relu-inl.h里面,激活函数的调用方式:

namespace leakyrelu {
enum LeakyReLUOpInputs {kData, kGamma};
enum LeakyReLUOpOutputs {kOut, kMask};
# 定义新的激活函数名称
enum LeakyReLUOpType {kLeakyReLU, kPReLU, kRReLU, kELU, kRBF};
enum LeakyReLUOpResource {kRandom};
}  // namespace leakyrelu

struct LeakyReLUParam : public dmlc::Parameter {
  // use int for enumeration
  int act_type;
  float slope;
  float lower_bound;
  float upper_bound;
  DMLC_DECLARE_PARAMETER(LeakyReLUParam) {
    DMLC_DECLARE_FIELD(act_type).set_default(leakyrelu::kLeakyReLU)
    .add_enum("rrelu", leakyrelu::kRReLU)
    .add_enum("leaky", leakyrelu::kLeakyReLU)
    .add_enum("prelu", leakyrelu::kPReLU)
    .add_enum("elu", leakyrelu::kELU)
    # 添加激活函数枚举
    .add_enum("rbf", leakyrelu::kRBF)
    .describe("Activation function to be applied.");
    DMLC_DECLARE_FIELD(slope).set_default(0.25f)
    .describe("Init slope for the activation. (For leaky and elu only)");
    DMLC_DECLARE_FIELD(lower_bound).set_default(0.125f)
    .describe("Lower bound of random slope. (For rrelu only)");
    DMLC_DECLARE_FIELD(upper_bound).set_default(0.334f)
    .describe("Upper bound of random slope. (For rrelu only)");
  }
};

template
class LeakyReLUOp : public Operator {
 public:
  explicit LeakyReLUOp(LeakyReLUParam param) {
    param_ = param;
  }

  virtual void Forward(const OpContext &ctx,
                       const std::vector &in_data,
                       const std::vector &req,
                       const std::vector &out_data,
                       const std::vector &aux_args) {
    using namespace mshadow;
    using namespace mshadow::expr;
    size_t expected = param_.act_type == leakyrelu::kPReLU ? 2 : 1;
    CHECK_EQ(in_data.size(), expected);
    Stream *s = ctx.get_stream();
    Tensor data;
    Tensor out;
    Tensor mask;
    Tensor weight;
    int n = in_data[leakyrelu::kData].shape_[0];
    int k = in_data[leakyrelu::kData].shape_[1];
    Shape<3> dshape = Shape3(n, k, in_data[leakyrelu::kData].Size()/n/k);
    data = in_data[leakyrelu::kData].get_with_shape(dshape, s);
    out = out_data[leakyrelu::kOut].get_with_shape(dshape, s);
    if (param_.act_type == leakyrelu::kRReLU) {
      mask = out_data[leakyrelu::kMask].get_with_shape(dshape, s);
    }
    switch (param_.act_type) {
      case leakyrelu::kLeakyReLU: {
        Assign(out, req[leakyrelu::kOut], F(data, param_.slope));
        break;
      }
      case leakyrelu::kPReLU: {
        weight = in_data[leakyrelu::kGamma].get(s);
        Assign(out, req[leakyrelu::kOut],
               F(data, broadcast<1>(weight, out.shape_)));
        break;
      }
      case leakyrelu::kRReLU: {
        if (ctx.is_train) {
          Random* prnd = ctx.requested[leakyrelu::kRandom].get_random(s);
          mask = prnd->uniform(mask.shape_);
          mask = mask * (param_.upper_bound - param_.lower_bound) + param_.lower_bound;
          Assign(out, req[leakyrelu::kOut], F(data, mask));
        } else {
          const float slope = (param_.lower_bound + param_.upper_bound) / 2.0f;
          Assign(out, req[leakyrelu::kOut], F(data, slope));
        }
        break;
      }
      case leakyrelu::kELU: {
        Assign(out, req[leakyrelu::kOut], F(data, param_.slope));
        break;
      }
      # RBF向前
      case leakyrelu::kRBF: {
        Assign(out, req[leakyrelu::kOut], F(data));
        break;
      }
      default:
        LOG(FATAL) << "Not implmented";
    }
  }

  virtual void Backward(const OpContext & ctx,
                        const std::vector &out_grad,
                        const std::vector &in_data,
                        const std::vector &out_data,
                        const std::vector &req,
                        const std::vector &in_grad,
                        const std::vector &aux_args) {
    using namespace mshadow;
    using namespace mshadow::expr;
    size_t expected = param_.act_type == leakyrelu::kPReLU ? 2 : 1;
    CHECK_EQ(out_grad.size(), 1U);
    CHECK_EQ(req.size(), expected);
    CHECK_EQ(in_data.size(), expected);
    Stream *s = ctx.get_stream();
    Tensor output;
    Tensor data;
    Tensor gdata;
    Tensor grad;
    Tensor mask;
    Tensor weight;
    Tensor grad_weight;
    int n = out_grad[leakyrelu::kOut].shape_[0];
    int k = out_grad[leakyrelu::kOut].shape_[1];
    Shape<3> dshape = Shape3(n, k, out_grad[leakyrelu::kOut].Size()/n/k);
    grad = out_grad[leakyrelu::kOut].get_with_shape(dshape, s);
    gdata = in_grad[leakyrelu::kData].get_with_shape(dshape, s);
    output = out_data[leakyrelu::kOut].get_with_shape(dshape, s);
    if (param_.act_type == leakyrelu::kRReLU) {
      mask = out_data[leakyrelu::kMask].get_with_shape(dshape, s);
    }
    if (param_.act_type == leakyrelu::kPReLU) {
      data = in_data[leakyrelu::kData].get_with_shape(dshape, s);
    }
    switch (param_.act_type) {
      case leakyrelu::kLeakyReLU: {
        Assign(gdata, req[leakyrelu::kData], F(output, param_.slope) * grad);
        break;
      }
      case leakyrelu::kPReLU: {
        weight = in_data[leakyrelu::kGamma].get(s);
        grad_weight = in_grad[leakyrelu::kGamma].get(s);
        grad_weight = sumall_except_dim<1>(F(data) * grad);
        gdata = F(data, broadcast<1>(weight, data.shape_)) * grad;
        break;
      }
      case leakyrelu::kRReLU: {
        Assign(gdata, req[leakyrelu::kData], F(output, mask) * grad);
        break;
      }
      case leakyrelu::kELU: {
        Assign(gdata, req[leakyrelu::kData], F(output, param_.slope) * grad);
        break;
      }
      # RBF向前
      case leakyrelu::kRBF: {
        data = in_data[leakyrelu::kData].get_with_shape(dshape, s);
        Assign(gdata, req[leakyrelu::kData], F(data, output) * grad);
        break;
      }
      default:
        LOG(FATAL) << "Not implmented";
    }
  }

 private:
  LeakyReLUParam param_;
};  // class LeakyReLUOp
  • 从重新编译,并测试
import mxnet as mx
from mxnet import autograd
a = mx.nd.random_uniform(-1, 1, shape=[3, 3]) +0.5
a.attach_grad()

with autograd.record():
    b = mx.nd.LeakyReLU(data=a, act_type='rbf')
    
print a, b
  • 参考资料

https://mxnet.incubator.apache.org/how_to/new_op.html
http://blog.csdn.net/qq_20965753/article/details/66975622?utm_source=debugrun&utm_medium=referral

你可能感兴趣的:(MXNet 定义新激活函数(Custom new activation function))