LM、GN算法原理及实现

1、LM算法原理

LM、GN算法原理及实现_第1张图片

2、 高斯-牛顿算法

f(x)进行一阶展开为:f(x+\Delta x) = f(x)+J(x)\Delta x

目标函数可转化为 

上式对\Delta x求导得:J(x)^TJ(x) \Delta x = -J(x)^Tf(x)

简化为H\Delta x = g,即为高斯-牛顿算法的更新方程

3、GN与LM的区别

LM会拒绝可能导致更大的残差平方和的更新,速度也可能会慢些,而使用GN必须保证H的可逆性。

4、LM的c++简单实现

#include 
#include 

using namespace std;
using namespace Eigen;

vector obs_;
int max_iterations_ = 100;
int max_inner_iterations_ = 10;
int lambda_;
VectorXd errors_;
Matrix3d hessian_;
Vector3d hb_;
MatrixXd jacobian_;
double tau_ = 1e-5;
double epsilon_ = 1e-5;
double current_squared_error;
double a_,b_,c_;

void computeJacobian();
void computeHb();
void initLambda();

int main(){
    double a = 1, b = 2 , c= 3;
    a_ = 0, b_= 0, c_=0;
    std::default_random_engine generator;
    std::normal_distribution noise(0,0.1);
    for(int i=0;i<100;i++){
        Vector2d ob;
        ob(0) = i;
        ob(1) = a*i*i+b*i+c+noise(generator);
        obs_.push_back(ob);
    }
    computeJacobian();
    computeHb();
    initLambda();
    int iter = 0;
    double delta_norm; //每次优化增量的大小
    double v = 2.0;
    while (iter++0){
                a_= aa,b_ = bb,c_=cc;
                lambda_ = lambda_*max(1.0/3.0, 1-pow((2*rou-1),3));
                v = 2;
                break;
            }else{
                lambda_ = lambda_ *v;
                v *= 2;
            }
        }
        if(delta_norm< epsilon_){
            break;
        }
        computeJacobian();
        computeHb();
    }
    cout<< a_<<" "< jac;
        double yy = a_*xi*xi+b_*xi+c_;
        jac(0,0) = xi*xi;
        jac(0,1) = xi;
        jac(0,2) = 1;
        jacobian_.row(i) = jac;
        errors_(i) = yy-yi;
    }
}

void computeHb(){
    hessian_ = jacobian_.transpose()*jacobian_;
    hb_  = -jacobian_.transpose()*errors_;
}

 

5、Ceres示例

#include
#include
#include
using namespace std;
using namespace Eigen;

struct costFunc
{
    costFunc(double x, double y):x_(x),y_(y){}
    template
    bool operator()(const T* const a,const T* const b, const T* const c, T* residual) const{
        residual[0] = T(y_) - (a[0]*x_*x_+b[0]*x_+c[0]);
        return true;
    }
    const double x_;
    const double y_;
};


int main(){
    vector obs;
    std::default_random_engine generator;
    std::normal_distribution noise(0,0.1);
    double a = 1.,b = 2., c= 3.;
    double a0 = 0,b0= 0,c0 = 0;
    cout<< "original: " << a0<<" "<(
            new costFunc(obs[i][0],obs[i][1]));
        problem.AddResidualBlock(pCostFunction, nullptr, &a0,&b0,&c0);
    }

    ceres::Solver::Options options;
    options.linear_solver_type = ceres::DENSE_QR;
    options.minimizer_progress_to_stdout = true;
    ceres::Solver::Summary summary;
    ceres::Solve(options,&problem,&summary);

    cout<< summary.BriefReport()<

解析求导形式:

#include
#include
#include
using namespace std;
using namespace Eigen;

class CostFunc: public ceres::SizedCostFunction<1,3>{
public:
    CostFunc(double x , double y):x_(x),y_(y){}
    virtual ~CostFunc(){}
    virtual bool Evaluate(double const* const* parameters, double* residuals, double** jacobians) const{
        const double a = parameters[0][0];
        const double b = parameters[0][1];
        const double c = parameters[0][2];

        residuals[0] = a*x_*x_+b*x_+c -y_;
        if(!jacobians) return true;
        double* jac = jacobians[0];
        if(!jac) return true;
        jac[0] = x_*x_;
        jac[1] = x_;
        jac[2] = 1;
        return true;
    }

private:
    const double x_;
    const double y_;
};


int main(){
    vector obs;
    std::default_random_engine generator;
    std::normal_distribution noise(0,0.1);
    double a = 1.,b = 2., c= 3.;
    double a0 = 0,b0= 0,c0 = 0;
    cout<< "original: " << a0<<" "<

 

你可能感兴趣的:(编程基础,LM算法)