实用优化算法.helper 文件

我在写这门课的实验代码的时候,发现算法有一些公用的函数,我把这些公用的函数放到了一个helper文件中,减少了代码的重复性,和编写的难度。

 

import numpy as np
from sympy import *
import math

from scipy.optimize import fmin_bfgs


x1,a,b,x2,x3 = symbols('x1,a,b,x2,x3')

def get_grad(x,f):
    if len(x) == 2:
        ans = [[],[]]
        ans[0].append(diff(f,x1).subs(x1,x[0][0]).subs(x2,x[1][0]))
        ans[1].append(diff(f,x2).subs(x1,x[0][0]).subs(x2,x[1][0]))
        ans = np.array(ans)
        return ans
    elif len(x) == 3:
        ans = [[],[],[]]
        ans[0].append(diff(f,x1).subs(x1,x[0][0]).subs(x2,x[1][0]).subs(x3,x[2][0]))
        ans[1].append(diff(f,x2).subs(x1,x[0][0]).subs(x2,x[1][0]).subs(x3,x[2][0]))
        ans[2].append(diff(f,x3).subs(x1,x[0][0]).subs(x2,x[1][0]).subs(x3,x[2][0]))
        ans = np.array(ans)
        return ans

def get_len_grad(x):
    s = 0
    for i in x:
        s += pow(i[0],2)
    return math.sqrt(s)

# ----------------------------------------------------------------------------------------------------------------------

def get_value(t,xx,d,f):
    test_x = xx + t*d
    if len(test_x) == 3:
        return f.subs(x1,test_x[0][0]).subs(x2,test_x[1][0]).subs(x3,test_x[2][0])
    elif len(test_x) == 2:
        return f.subs(x1, test_x[0][0]).subs(x2, test_x[1][0])


def golden_search(st,ed,xx,d,f):
    a = st
    b = ed
    cnt = 1
    x1 =  a + 0.382*(b - a)
    x2 = a + 0.618*(b - a)
    while b - a > 1e-10:
        x1_value = get_value(x1,xx,d,f)
        x2_value = get_value(x2,xx,d,f)
        if x1_value < x2_value:
            b = x2
            x2 = x1
            x1 = a + 0.382*(b - a)
        elif x1_value > x2_value:
            a = x1
            x1 = x2
            x2 = a + 0.618*(b - a)
        elif x1_value == x2_value:
            a = x1
            b = x2
            x1 = a + 0.382 * (b - a)
            x2 = a + 0.618 * (b - a)
        cnt += 1
    ans = ( b + a )/ 2
    return ans

#----------------------------------------------------------------------------------------------------------------------------

def non_accuracy_search(x0,d,f):
    alpha = 1
    berta = 0.5
    p = 0.25
    xk = x0
    tmp1 = f.subs(x1,(xk + alpha*d)[0][0]).subs(x2,(xk + alpha*d)[1][0])
    tmp2 = f.subs(x1,xk[0][0]).subs(x2,xk[1][0]) + p*alpha*(np.dot(get_grad(xk,f).T,d))
    while tmp1 > tmp2:
        alpha = berta*alpha
        tmp1 = f.subs(x1, (xk + alpha * d)[0][0]).subs(x2, (xk + alpha * d)[1][0])
        tmp2 = f.subs(x1, xk[0][0]).subs(x2, xk[1][0]) + p * alpha * np.dot((get_grad(xk,f).T),d)
    print('非精确搜索步长',alpha)
    return alpha

#----------------------------------------------------------------------------------------------------------------------------

def get_grad_grad(x,f):
    ans = [[],[]]
    t = diff(f,x1)
    tt = diff(t,x1)
    ans[0].append(tt.subs(x1,x[0][0]).subs(x2, x[1][0]))
    tt = diff(t,x2)
    ans[0].append(tt.subs(x1,x[0][0]).subs(x2, x[1][0]))
    t = diff(f,x2)
    tt = diff(t,x1)
    ans[1].append(tt.subs(x1,x[0][0]).subs(x2, x[1][0]))
    tt = diff(t,x2)
    ans[1].append(tt.subs(x1,x[0][0]).subs(x2, x[1][0]))
    ans = np.array(ans)
    return ans

#----------------------------------------------------------------------------------------------------------------------------

def get_biet(g0,g1):
    return (np.dot(g1.T,g1) / np.dot(g0.T,g0))[0][0]

def get_biet_PRP(k,g0,g1):
    if k == 0:
        return 0
    return (np.dot(g1.T,(g1 - g0))/(np.dot(g0.T,g0)))[0][0]

def FR_Newton(x,f):
    g = get_grad(x,f)
    g1 = g
    k = 0
    n = 5
    next_x = x
    d = -1*g1
    while get_len_grad(g1) >= 0.0001:
        if k == 0 or k % n == 0:
            d = -1*g1
        else:
            bt = get_biet(g, g1)
            d = -1*g1 + bt * d
        step = golden_search(0,2,x,d,f)  # 调用实验一的黄金分割法进行精确搜索
        # step = non_accuracy_search(x,d,f)
        next_x = x + step*d
        k += 1
        g = g1
        x = next_x
        g1 = get_grad(next_x,f)
        # print(x)
    return next_x

# ------------------DFP--------------------------------------------------------------------------------------------------------
def DFP(x0,h,f):
    k = 1
    while get_len_grad(get_grad(x0,f)) > 0.00001:
        d = -1 * np.dot(h, get_grad(x0,f))
        step = golden_search(0, 2, x0, d, f)
        x1 = x0 + step * d
        s = x1 - x0
        y = get_grad(x1,f) - get_grad(x0,f)
        t1 = np.dot(h,y)
        t2 = np.dot(t1,y.T)
        t3 = np.dot(t2,h)  # 分子
        t4 = np.dot(y.T,h)
        t5 = np.dot(t4,y)  # 分母
        t6 = np.dot(s,s.T)  # 分子
        t7 = np.dot(y.T, s)  # 分母
        h = h - t3*(1/t5[0][0]) + (1/t7[0][0]) * t6
        x0 = x1
        # print(k,' ',x0)
        k += 1
    return x0

 

你可能感兴趣的:(实用优化算法)