拟牛顿法、高斯牛顿法、牛顿法、共轭梯度法法的python实现[数值最优化2021]

本文的代码与课本算法的流程框架完全一致,不再重复说明理论,只是汇总一下代码实现。

文章目录

  • 拟牛顿法-BFGS
  • 高斯-GN
  • 牛顿法-Newton
  • 共轭梯度法
  • 总结

拟牛顿法、高斯牛顿法、牛顿法、共轭梯度法法的python实现[数值最优化2021]_第1张图片

拟牛顿法-BFGS

# -*- coding: utf-8 -*-#
# Author: xhc
# Date:    2021-05-28 16:01
# project:  1_zyh
# Name:    BFGS.py
import numpy as np

# 定义求解方程
fun = lambda x: 0.5*x[0]**2+x[1]**2-x[0]*x[1]-2*x[0]


# 定义函数 用于求梯度数值 数据放入mat矩阵
def gfun(x):
    x = np.array(x)
    part1 = x[0][0]-x[1][0]-2
    part2 = -1.0*x[0][0]+2*x[1][0]
    result = np.mat([part1,part2])
    # print(gf) 打印测试
    return result


# 传入参数有三个 fun为函数 gfun为梯度 x0为初始值
def BGFS(fun, gfun, x0):
    maxk = 5000 # 迭代次数
    rho = 0.45 # 步长
    sigma = 0.3 # 常数 在 0到1/2之间
    epsilon = 1e-6 # 终止条件
    k = 0 # 第几次迭代
    Bk = np.mat([[1.0,0.0],[0.0,1.0]]) # 初始值为单位矩阵

    while k < maxk:
        gk = gfun(x0) # (1,2)
        if np.linalg.norm(gk) < epsilon: # 范数小于epsilon 终止
            break
        dk = -Bk.I.dot(gk.T) # Bk*d+梯度=0
        m = 0
        mk = 0
        while m < 30: # Arijo算法求步长
            if fun(x0 + (rho**m)*dk) < fun(x0) + sigma * (rho**m)*gk.dot(dk):
                mk = m
                break
            m = m + 1

        x = x0 + (rho**mk)*dk # (2,1) # 迭代公式
        # 以下就是BFGS修正公式
        sk = x - x0 # (2,1)
        yk = gfun(x) - gk  # (1,2)
        if yk * sk > 0:
            Bk = Bk - (Bk * sk * sk.T * Bk)/(sk.T*Bk*sk) + (yk.T*yk)/(yk*sk)
        k = k + 1
        x0 = x
        print("--------------------------")
        print("当前点为为%s" % x0.T)
        print("当前点的值为%f" % fun(x0))
        print("--------------------------")
    
    return x, k
if __name__ == "__main__":
    x0 = np.array([[1],[1]]) # 选择初始点
    x,  k = BGFS(fun, gfun, x0)
    print("BFGS")
    print("本题使用的例子是课本p56例4.1.1,但是采用的非精确搜索,结果如下")
    print("迭代次数为%d次" %k)
    print("最优点为%s" %x.T)
    print("最小值为%d" %fun(x))
    
    

拟牛顿法、高斯牛顿法、牛顿法、共轭梯度法法的python实现[数值最优化2021]_第2张图片

高斯-GN

# -*- coding: utf-8 -*-#
# Author: xhc
# Date:    2021-05-29 14:56
# project:  1_zyh
# Name:    GN.py

import numpy as np
#fun =

def gfun(x):
    x = np.array(x)
    val1 = x[0][0] - 0.7*np.sin(x[0][0]) - 0.2*np.cos(x[1][0])
    val2 = x[1][0] - 0.7*np.cos(x[0][0]) + 0.2*np.sin(x[1][0])
    y = np.mat([[val1],[val2]])
    return y

def Hess(x):
    x = np.array(x)
    val1 = 1 - 0.7*np.cos(x[0][0])
    val2 = 0.2*np.sin(x[1][0])
    val3 = 0.7*np.sin(x[0][0])
    val4 = 1 + 0.2*np.cos(x[1][0])
    y=np.mat([[val1, val2],
             [val3, val4]])
    return y

def GN(gfun, Hess, x0):
    maxk = 5000
    rho = 0.4
    sigma = 0.4
    k = 0
    epsilon = 1e-6
    while k < maxk:
        fk = gfun(x0)
        hess = Hess(x0)
        
        gk = hess.T * fk
        
        dk = - (hess.T * hess ).I.dot(gk)
        
        if np.linalg.norm(gk) < epsilon:
            break
        m = 0
        mk = 0
        while m < 30:
            newf = 0.5 * np.linalg.norm(gfun(x0 + (rho**m) * dk))**2
            oldf = 0.5 * np.linalg.norm(gfun(x0))**2
            if newf < oldf + sigma * (rho**m)*gk.T*dk:
                mk = m
                break
            m = m + 1
            
        x0 = x0 + (rho**mk) * dk
        k = k + 1
        print("--------------------------")
        print("当前点为为%s" % x0.T)
        print("--------------------------")
    x = x0
    return x,  k
if __name__ == "__main__":
    x0 = np.array([[0], [0]])  # (2,1)
    x, k = GN(gfun, Hess, x0)
    print("GN")
    print("本题实现非独立完成,在课本找不到例题,最初使用1/2(cos^2(x1)+1/2(sin^2(x2)) 会产生奇异矩阵,无法求逆。更换函数 结果如下")
    print("迭代次数为%d次" % k)
    print("最优点为%s" % x.T)
    # print("最小值为%d" %fun(x))

        

拟牛顿法、高斯牛顿法、牛顿法、共轭梯度法法的python实现[数值最优化2021]_第3张图片

牛顿法-Newton

# -*- coding: utf-8 -*-#
# Author: xhc
# Date:    2021-05-28 17:06
# project:  1_zyh
# Name:    Newton.py
import numpy as np

fun = lambda x:0.5*x[0]**2+x[1]**2-x[0]*x[1]-x[0]

def gfun(x):
    x = np.array(x)
    part1 = x[0][0] - x[1][0] - 1
    part2 = -x[0][0] + 2 * x[1][0]
    gf = np.mat([part1,part2])
    return gf

def Hess(x):
    x = np.array(x)
    part1 = 1
    part2 = -1
    part3 = -1
    part4 = 2
    Hess = np.mat([[part1, part2],[part3, part4]])
    
    return Hess

def DampNewtonMethod(fun, gfun, Hessian, x0):
    maxk = 5000 # 最大迭代次数
    rho = 0.4 # 步长
    sigma = 0.4 # 常数
    k = 0 # 迭代次数
    epsilon = 1e-6 # 终止条件
    while k < maxk:
        gk = gfun(x0) # 梯度计算
        Gk = Hessian(x0) # hess计算
        dk = -Gk.I.dot(gk.T) # 搜索方向
        if np.linalg.norm(gk) < epsilon: # 终止条件
            break
        m = 0
        mk = 0
        while m < 20: # 用Armijo搜索求步长
            if fun(x0 + (rho**m) * dk) < fun(x0) + (sigma * (rho**m) * gk.dot(dk)):
                mk = m
                break
            m = m + 1
        x0 = x0 + (rho**mk) * dk
        k = k + 1
        print("--------------------------")
        print("当前点为为%s" % x0.T)
        print("当前点的值为%f" % fun(x0))
        print("--------------------------")
    x = x0
    return x, k
if __name__ == "__main__":
    x0 = np.array([[0],[0]])
    x, k = DampNewtonMethod(fun, gfun, Hess, x0)
    print("牛顿法")
    print("本题使用的例子是课本p41例3.2.1,但是采用的非精确搜索,结果如下")
    print("迭代次数为%d次" %k)
    print("最优点为%s" %x.T)
    print("最小值为%f" %fun(x))

拟牛顿法、高斯牛顿法、牛顿法、共轭梯度法法的python实现[数值最优化2021]_第4张图片

共轭梯度法

# -*- coding: utf-8 -*-#
# Author: xhc
# Date:    2021-05-29 15:40
# project:  1_zyh
# Name:    共轭梯度法法.py


import numpy as np

# 函数
fun = lambda x: 0.5*x[0]**2+x[1]**2

# 梯度
def gfun(x):
    x = np.array(x)
    part1 = x[0][0]
    part2 = 2*x[1][0]
    gf = np.mat([part1,part2])
    return gf

def FR_Gradient(fun, gfun, x0):
    maxk = 5000
    rho = 0.4
    sigma = 0.4
    k = 0
    epsilon = 1e-6
    n = len(x0)  
    g0 = 0
    d0 = 0
    while k < maxk:
        g = gfun(x0)
        itern = k - (n+1)*np.floor(k/(n+1))
        itern = itern + 1
        if itern == 1: # 每一次迭代k=0的情况 负梯度
            d = -g
        else: # 每一次迭代 k>0 的情况  负梯度+bkd
            beta = (g.dot(g.T))/(g0.dot(g0.T))  # 更新beta
            d = -g + beta*d0
            gd = g.dot(d.T)
            if gd >= 0.0:
                d = -g
        if np.linalg.norm(g) < epsilon: # 终止条件
            break
        m = 0
        mk = 0
        # armijo
        while m < 20:
            if fun(x0 + (rho**m)*d.T) < fun(x0) + sigma * (rho**m) * g.dot(d.T):
                mk = m
                break
            m = m + 1
        x0 = x0 + (rho**mk)*d.T
        x0 = np.array(x0)
        g0 = g
        d0 = d
        k = k + 1
        print("--------------------------")
        print("当前点为为%s" % x0.T)
        print("当前点的值为%f" % fun(x0))
        print("--------------------------")
    x = x0
    return x, k
if __name__ == "__main__":
    x0 = np.array([[2],[1]])
    x, k = FR_Gradient(fun, gfun, x0)
    print("共轭梯度法--FR算法")
    print("本题使用的例子是课本p76例5.2.1")
    print("迭代次数为%d次" %k)
    print("最优点为%s" %x.T)
    print("最小值为%f"%fun(x))

拟牛顿法、高斯牛顿法、牛顿法、共轭梯度法法的python实现[数值最优化2021]_第5张图片

总结

如有错误,欢迎指出。

在这里插入图片描述

你可能感兴趣的:(课设汇总,拟牛顿法,高斯牛顿,共轭梯度法,数值最优化)