梯度下降 计算实例

Example1

The gradient descent algorithm is applied to find a local minimum of the function f(x)=x4−3x3+2, with derivative f’(x)=4x3−9x2.

//From calculation, it is expected that the local minimum occurs at x=9/4
double x_old = 0.0; //The value does not matter as long as abs(x_new - x_old) > precision
double x_new = 6.0; //# The algorithm starts at x=6
double gamma = 0.01; //0.01 # step size
double precision = 0.00001;

double df(double x)
{
    return 4*x*x*x - 9*x*x;
}

void gradientDescent()
{
    while (abs(x_old - x_new) > precision)
    {
        x_old = x_new;
        x_new -= gamma *df(x_old);
    }
}

int main()
{
    gradientDescent();

    cout<return 0;
}

Example2

梯度下降 计算实例_第1张图片

#include 
#include 
#include 

using namespace std;


double gamma = 0.001;
int max_iter = 1000;
float func_tol = 0.1;
double fx(const Eigen::MatrixXd& x)
{
    return 0.5*((3*x(0,0) - cos(x(1,0) * x(2,0)) - 3.0/2) * (3*x(0,0) - cos(x(1,0) * x(2,0)) - 3.0/2) \
                + (4* x(0,0) *x(0,0) - 625 * x(1,0) * x(1,0) + 2*x(1,0) - 1)*(4* x(0,0) *x(0,0) - 625 * x(1,0) * x(1,0) + 2*x(1,0) - 1)\
                + (exp(-x(0,0) * x(1,0)) + 20 *x(2,0) + (10 * M_PI-3)/3)*(exp(-x(0,0) * x(1,0)) + 20 *x(2,0) +  (10 * M_PI-3)/3)
        );
}
double gx(const Eigen::MatrixXd& x,int i)
{
    switch(i)
    {
        case 0: return 3*x(0,0) - cos(x(1,0) * x(2,0)) - 3.0/2;
        case 1: return 4*x(0,0) *x(0,0) - 625 * x(1,0) * x(1,0) + 2*x(1,0) - 1;
        case 2: return exp(-x(0,0) * x(1,0)) + 20 *x(2,0) +  (10 * M_PI-3)/3;
    }
    return INT_MAX;
}

double JG(const Eigen::MatrixXd& x,int i,int j)
{
    int index = 3*i+j;
    switch(index)
    {
        case 0: return 3;
        case 1: return sin(x(1,0)*x(2,0))*x(2,0);
        case 2: return sin(x(1,0)*x(2,0))*x(1,0);
        case 3: return 8 * x(0,0);
        case 4: return -1250 * x(1,0) + 2;
        case 5: return 0;
        case 6: return -x(1,0) * exp(-x(0,0) * x(1,0));
        case 7: return -x(0,0) * exp(-x(0,0) * x(1,0));
        case 8: return 20;
    }

    return INT_MAX;
}


void gradientDescent1(Eigen::MatrixXd &x_data)
{

    int iter = 0;
    double fval = fx(x_data);
    cout<<"x=["<0,0)<<","<1,0)<<","<2,0)<<"]"<<" ";
    cout<<"f(x)="<while (iter < max_iter && fval > func_tol)
    {
        Eigen::MatrixXd J(3,3);
        for(int i = 0;i<3;++i)
            for(int j = 0;j<3;++j)
                J(i,j) = JG(x_data,i,j);
        cout<3,1);
        for(int i = 0;i<3;++i)
            G(i,0) = gx(x_data,i);
        cout<cout<<"Iter: "<" ";
        cout<<"x=["<0,0)<<","<1,0)<<","<2,0)<<"]"<<" ";
        cout<<"f(x)="<int main()
{
    Eigen::MatrixXd x_data(3,1);

    x_data(0,0) = 0,0;
    x_data(1,0) = 0,0;
    x_data(2,0) = 0,0;

    gradientDescent1(x_data);



    return 0;
}

你可能感兴趣的:(算法,C++,Math,PCL学习)