共轭梯度优化算法的C++实现

头文件:

/*
 * Copyright (c) 2008-2011 Zhang Ming (M. Zhang), [email protected]
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation, either version 2 or any later version.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright notice,
 *    this list of conditions and the following disclaimer.
 *
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
 * more details. A copy of the GNU General Public License is available at:
 * http://www.fsf.org/licensing/licenses
 */


/*****************************************************************************
 *                               conjgrad.h
 *
 * Conjugate gradient optimal method.
 *
 * This class is designed for finding the minimum value of objective function
 * in one dimension or multidimension. Inexact line search and PRP (Polak-
 * Ribiere-Polyak) formula are used to get the step size and conjgate
 * coefficient "beta" in each iteration.
 *
 * Zhang Ming, 2010-03, Xi'an Jiaotong University.
 *****************************************************************************/


#ifndef CONJGRAD_H
#define CONJGRAD_H


#include <linesearch.h>
#include <utilities.h>


namespace splab
{

    template <typename Dtype, typename Ftype>
    class ConjGrad : public LineSearch<Dtype, Ftype>
    {

    public:

        ConjGrad();
        ~ConjGrad();

        void optimize( Ftype &func, Vector<Dtype> &x0, int reLen,
                       Dtype tol=Dtype(1.0e-6), int maxItr=100 );

        Vector<Dtype> getOptValue() const;
        Vector<Dtype> getGradNorm() const;
        Dtype getFuncMin() const;
        int getItrNum() const;

    private:

        // iteration number
        int itrNum;

        // minimum value of objective function
        Dtype fMin;

        // optimal solution
        Vector<Dtype> xOpt;

        // gradient norm for each iteration
        Vector<Dtype> gradNorm;

    };
    // class ConjGrad


    #include <conjgrad-impl.h>

}
// namespace splab


#endif
// CONJGRAD_H

实现文件:

/*
 * Copyright (c) 2008-2011 Zhang Ming (M. Zhang), [email protected]
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation, either version 2 or any later version.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright notice,
 *    this list of conditions and the following disclaimer.
 *
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
 * more details. A copy of the GNU General Public License is available at:
 * http://www.fsf.org/licensing/licenses
 */


/*****************************************************************************
 *                               conjgrad-impl.h
 *
 * Implementation for ConjGrad class.
 *
 * Zhang Ming, 2010-03, Xi'an Jiaotong University.
 *****************************************************************************/


/**
 * constructors and destructor
 */
template <typename Dtype, typename Ftype>
ConjGrad<Dtype, Ftype>::ConjGrad() : LineSearch<Dtype, Ftype>()
{
}

template <typename Dtype, typename Ftype>
ConjGrad<Dtype, Ftype>::~ConjGrad()
{
}


/**
 * Finding the optimal solution. The default tolerance error and maximum
 * iteratin number are "tol=1.0e-6" and "maxItr=100", respectively.
 */
template <typename Dtype, typename Ftype>
void ConjGrad<Dtype, Ftype>::optimize( Ftype &func, Vector<Dtype> &x0,
                                       int reLen, Dtype tol, int maxItr )
{
    // initialize parameters.
    int k = 0,
        cnt = 0;
    Vector<Dtype> x(x0);
    Dtype fx = func(x);
    this->funcNum++;
    Vector<Dtype> gnorm(maxItr);
    Vector<Dtype> g = func.grad(x);
    gnorm[k++]= norm(g);

    Dtype alpha,
          beta;
    Vector<Dtype> d(x0.dim()),
                  dPrev(x0.dim()),
                  gPrev(x0.dim());

    while( ( gnorm(k) > tol ) && ( k < maxItr ) )
    {
        // descent direction
        if( mod(k,reLen) == 1 )
            d = - g;
        else
        {
            beta = dotProd(g,g-gPrev) / dotProd(gPrev,gPrev);
            d = beta*dPrev - g ;
        }

        // one dimension searching
        alpha = this->getStep( func, x, d );

        if( !this->success )
        {
            dPrev = 0;
            cnt++;
            if( cnt == maxItr )
                break;
        }
        else
        {
            // update
            x += alpha*d;
            fx = func(x);
            this->funcNum++;
            dPrev = d;
            gPrev = g;
            g = func.grad(x);
            gnorm[k++] = norm(g);
        }
    }

    xOpt = x;
    fMin = fx;
    gradNorm.resize(k);
    for( int i=0; i<k; ++i )
        gradNorm[i] = gnorm[i];

    if( gradNorm(k) > tol )
        this->success = false;
}


/**
 * Get the optimum point.
 */
template <typename Dtype, typename Ftype>
inline Vector<Dtype> ConjGrad<Dtype, Ftype>::getOptValue() const
{
    return xOpt;
}


/**
 * Get the norm of gradient in each iteration.
 */
template <typename Dtype, typename Ftype>
inline Vector<Dtype> ConjGrad<Dtype, Ftype>::getGradNorm() const
{
    return gradNorm;
}


/**
 * Get the minimum value of objective function.
 */
template <typename Dtype, typename Ftype>
inline Dtype ConjGrad<Dtype, Ftype>::getFuncMin() const
{
    return fMin;
}


/**
 * Get the iteration number.
 */
template <typename Dtype, typename Ftype>
inline int ConjGrad<Dtype, Ftype>::getItrNum() const
{
    return gradNorm.dim()-1;
}

测试代码:

/*****************************************************************************
 *                               conjgrad_test.cpp
 *
 * Conjugate gradient optimal method testing.
 *
 * Zhang Ming, 2010-03, Xi'an Jiaotong University.
 *****************************************************************************/


#define BOUNDS_CHECK

#include <iostream>
#include <iomanip>
#include <objfunc.h>
#include <conjgrad.h>


using namespace std;
using namespace splab;


typedef double  Type;


int main()
{
    Type a = 1.0,
         b = -1.0,
         c = -1.0;
    ObjFunc<Type> f( a, b, c );
    Vector<Type> x0(2);
    x0(1) = 0.5;
    x0(2) = 0.5;

    Type tolErr = 1.0e-6;
    ConjGrad< Type, ObjFunc<Type> > prp;
    prp.optimize( f, x0, x0.dim(), tolErr );
    if( prp.isSuccess() )
    {
        Vector<Type> xmin = prp.getOptValue();
        int N = prp.getItrNum();
        cout << "The iterative number is:   " << N << endl << endl;
        cout << "The number of function calculation is:   "
             << prp.getFuncNum() << endl << endl;
        cout << setiosflags(ios::fixed) << setprecision(4);
        cout << "The optimal value of x is:   " << xmin << endl;
        cout << "The minimum value of f(x) is:   " << f(xmin) << endl << endl;
        cout << "The gradient's norm at x is:   "
             << prp.getGradNorm()[N] << endl << endl;
    }
    else
        cout << "The optimal solution  cann't be found!" << endl;

    return 0;
}

运行结果:

The iterative number is:   29

The number of function calculation is:   207

The optimal value of x is:   size: 2 by 1
-0.7071
-0.0000

The minimum value of f(x) is:   -0.4289

The gradient's norm at x is:   0.0000


Process returned 0 (0x0)   execution time : 0.078 s
Press any key to continue.

你可能感兴趣的:(共轭梯度优化算法的C++实现)