[optimization]Dogleg Method狗腿算法

实现信赖域框架下的狗腿算法,matlab代码:

function [x_opt, y] = dogLeg()

% init
x = [10 * rand(1), 10 * rand(1)]';
nIter = 100;
delta = 2;
x_iter = zeros(nIter, 2);
h = zeros(nIter, 1);
y = zeros(nIter, 1);
% iteration
for i = 1 : nIter
    if sum(abs(g(x))) < 0.0001
        
        break
    end
    
    % unconstrained min
    pU = -g(x)' * g(x) * g(x) / (g(x)' * B(x) * g(x));
    % full step
    pB = -B(x)^-1 * g(x);
    
    if pB' * pB > delta * delta
        t = sqrt( ( delta * delta - pU' * pU ) / ((pB - pU)' * (pB - pU)) ) + 1;
        if t < 1 
            pk = t * pU;
        else
            pk = pU + (t - 1) * (pB - pU);
        end
    else % pB' * pB <= delta
        pk = pB;
    end
    
    % update trust region
    % reduction ratio 
    r = ( f(x) - f(x + pk) ) / ( m(x,zeros(2,1)) - m(x,pk) );
    if r < 0.25
        delta = 0.25 * delta;
    else
        if r > 0.75 && sqrt(abs(pk' * pk))
            delta = min(2 * delta, 10000); 
        end
    end  
    
    if r > 0.25
        x = x + pk;
    end
    x_iter(i,:) = x';
    y(i) = f(x);
    h(i) = i;
end
    x_opt = x;
end

% f(x)
function y = f(x)
    y = 100 * (x(2) - x(1)^2)^2 + (1 - x(1))^2;
end

% gradient of f(x)
function y = g(x)
    y = [400 * (x(1)^2 - x(2)) * x(1) + 2 * (x(1) - 1), 200 * (x(2) - x(1)^2)]';
end

% Hessian of f(x)
function y = B(x)
    y = [400 * (3*x(1)^2- x(2)) + 2, -400 * x(1); -400 * x(1), 200];
end

function y = m(x, p)
    y = f(x) + g(x)' * p + 0.5 * p' * B(x) * p;
end



[optimization]Dogleg Method狗腿算法_第1张图片



[optimization]Dogleg Method狗腿算法_第2张图片




[optimization]Dogleg Method狗腿算法_第3张图片





[optimization]Dogleg Method狗腿算法_第4张图片

你可能感兴趣的:([optimization]Dogleg Method狗腿算法)