粒子群算法通过设计一种无质量的粒子来模拟鸟群中的鸟,粒子仅具有两个属性:速度和位置,速度代表移动的快慢,位置代表移动的方向。每个粒子在搜索空间中单独的搜寻最优解,并将其记为当前个体极值,并将个体极值与整个粒子群里的其他粒子共享,找到最优的那个个体极值作为整个粒子群的当前全局最优解,粒子群中的所有粒子根据自己找到的当前个体极值和整个粒子群共享的当前全局最优解来调整自己的速度和位置。多尺度协同变异的自适应粒子群算法(MAEPSO).该算法利用不同大小方差的自适应高斯变异机制实现解空间的探索,这种多个或大或小的变异机制能够促使整个种群以尽量分散的变异尺度来对解空间进行更加详尽的探索。最终使得算法得到较好的收敛效果。
Python程序实现论文“一种多尺度协同变异的粒子群优化算法”(哈尔滨工程大学陶新民等,《软件学报》,2012年07期)。编程实现传统PSO算法和论文所提出的MAEPSO算法。测试了论文中的Benchmark函数。
算法设计
传统的PSO算法仅仅是根据当前位置与全局最优位置进行位置与速度的更新,而多尺度协同变异的粒子群算法则是引入了多尺度高斯变异算法来进行位置的更新操作,在每次变异之后,在根据粒子群的位置来更新该多尺度高斯变异算子的方差。接着根据变异的次数等信息来更新高斯变异算法的阈值。从而促使整个种群可以有效的进行变异,最终使得算法得到较好的收敛效果。
给每个粒子初始化随机解,再根据个体最优解和群体最优解确定粒子下次移动的方向和大小,即速度,不断更新迭代。在MAEPSO中,粒子速度更新的方式发生了变化,为了防止粒子陷入局部最优解,设定了一个阈值,当粒子的当前维度的速度小于阈值时,发生逃逸,并逐渐减小阈值,使得算法前期能够有效的探索解空间,后期能够进行精确的局部搜索,而逃逸时根据论文中设定的高斯变异算子更新速度,并在算法后期逐渐减小惯性带来的影响。
方法实现:
①种群初始化
②更新适应度个体最优、群体最优、速度
③判断微粒是否需要逃逸
④若满足逃逸条件,则按照论文公式进行逃逸
⑤更新微粒位置、多尺度高斯变异算子、阈值
⑥判断是否满足终止条件,若不满足,则跳转到②
`
import random
import math
PI = 3.1415927
INF = 1e10
Size = 20
M = 5
dim = 30
W = 100
c1 = 1.4
c2 = 1.4
w_max = 0.5
w_min = 0.4
w = w_max
P = Size // M
choice = "Tablet" # Change this to the desired function type
# 初始化粒子群参数
X = [[0] * dim for _ in range(Size)]
pb = [[0] * dim for _ in range(Size)]
V = [[0] * dim for _ in range(Size)]
T = [0.5] * dim
G = [0] * dim
gb = [0] * dim
sigma = [2 * W] * M
## 计算不同函数类型的适应度值
def function(X):
sum_val = 0
if choice == "Tablet":
sum_val = 1e6 * X[0] ** 2
for i in range(1, len(X)):
sum_val += X[i] ** 2
elif choice == "Quadric":
for i in range(len(X)):
temp_sum = 0
for j in range(i + 1):
temp_sum += X[j]
sum_val += temp_sum * temp_sum
elif choice == "Rosenbrock":
for i in range(len(X) - 1):
sum_val += 100 * ((X[i + 1] - X[i] * X[i]) ** 2) + (X[i] - 1) ** 2
'''
elif choice == "Griewank":
sum1 = sum(X[i] ** 2 for i in range(len(X)))
sum2 = 1
for i in range(len(X)):
sum2 *= math.cos(X[i] / math.sqrt(i + 1))
sum_val = sum1 / 4000.0 - sum2 + 1
elif choice == "Rastrigin":
A = 10
for i in range(len(X)):
sum_val += X[i] ** 2 - A * math.cos(2 * PI * X[i]) + A
elif choice == "SchafferF7":
for i in range(len(X) - 1):
sum_val += (X[i] ** 2 + X[i + 1] ** 2) ** 0.25 * \
(math.sin(50 * (X[i] ** 2 + X[i + 1] ** 2) ** 0.1) + 1.0)
'''
return sum_val
def func(X):
fit = []
for i in range(len(X)):
fit.append(function(X[i]))
return fit
def sample_normal():
u = random.uniform(-1, 1)
v = random.uniform(-1, 1)
r = u * u + v * v
if r == 0 or r > 1:
return sample_normal()
c = math.sqrt(-2 * math.log(r) / r)
return u * c
def init():
for i in range(Size):
for j in range(dim):
rand1 = random.uniform(-1, 1) * W * 2
rand2 = random.uniform(-1, 1)
X[i][j] = rand1
pb[i][j] = rand1
V[i][j] = rand2
fit = func(X)
min_val = INF
min_idx = -1
for i in range(len(fit)):
if min_val > fit[i]:
min_val = fit[i]
min_idx = i
gb[:] = X[min_idx][:]
def update_vp():
global w, G
for i in range(Size):
for j in range(dim):
rand1 = random.random()
rand2 = random.random()
V[i][j] = w * V[i][j] + c1 * rand1 * (pb[i][j] - X[i][j]) + c2 * rand2 * (gb[j] - X[i][j])
if abs(V[i][j]) < T[j]:
G[j] += 1
min_f = INF
min_ind = 0
temp_x = X[i][j]
randn_sigma = [sample_normal() * sigma[k] for k in range(M)]
for m in range(M):
X[i][j] = temp_x + randn_sigma[m]
temp_f = function(X[i])
if temp_f < min_f:
min_f = temp_f
min_ind = m
Vmax = W - abs(X[i][j])
rand3 = random.uniform(-1, 1)
X[i][j] = temp_x + rand3 * Vmax
if min_f < function(X[i]):
V[i][j] = randn_sigma[min_ind]
else:
V[i][j] = rand3 * Vmax
X[i][j] = temp_x
X[i][j] += V[i][j]
if function(pb[i]) > function(X[i]):
pb[i][:] = X[i][:]
if function(gb) > function(pb[i]):
gb[:] = pb[i][:]
def FitX(m):
sub_group = [X[i][:] for i in range((m - 1) * P, m * P, P)]
sub_f = func(sub_group)
return sum(sub_f) / (P * 1.0)
def update_sigma():
FitXs = []
total_FitX = 0
for i in range(M):
fit = FitX(i + 1)
total_FitX += fit
FitXs.append(fit)
max_FitX = max(FitXs)
min_FitX = min(FitXs)
for i in range(M):
sigma[i] *= math.exp((M * FitXs[i] - total_FitX) / (max_FitX - min_FitX + 1e-10))
while sigma[i] > (W / 4.0):
sigma[i] -= W / 4.0
def update_t():
k1 = 5
k2 = 10
for i in range(dim):
if G[i] > k1:
T[i] /= k2 * 1.0
G[i] = 0
# 对每种选择执行优化过程的函数
def evolve(step, epoch):
result = []
for i in range(epoch):
#初始化粒子群和参数
init()
#执行优化
for j in range(step):
update_vp()
update_sigma()
update_t()
w = w_max - (w_max - w_min) * j / step * 1.0
# 检查是否达到全局最佳适应度
if function(gb) == 0:
print("finished")
break
temp_res = function(gb)
print(i, j, temp_res)
res = function(gb)
result.append(res)
print("===============================")
print(i, "\t\t\t", res)
print(gb)
if __name__ == "__main__":
print(f"Optimizing for choice: {choice}")
evolve(4000, 1)
print("Best solution for choice", choice)
print("Optimal Path:", gb)
print("Best Fitness:", function(gb))
print("===============================")
Tablet函数
Tablet函数,PSO结果比论文中的结果要好
Quadric函数
Quadric函数和原始论文的结果差距不大
Rosenbrock函数
Rosenbrock和论文差距不大
剩下的function需要读者自己去验证
**
`
import math
PI = 3.1415927
INF = 1e10
Size = 20
dim = 30
W = 100
c1 = 1.4
c2 = 1.4
w_max = 0.5
w_min = 0.4
w = w_max
P = Size // 2 # 为了简化问题,减少粒子数量
# 初始化粒子群参数
X = [[0] * dim for _ in range(Size)]
pb = [[0] * dim for _ in range(Size)]
V = [[0] * dim for _ in range(Size)]
G = [0] * dim
gb = [0] * dim
def function(X):
sum_val = 0
if choice == "Tablet":
sum_val = 1e6 * X[0] ** 2
for i in range(1, len(X)):
sum_val += X[i] ** 2
elif choice == "Quadric":
for i in range(len(X)):
temp_sum = 0
for j in range(i + 1):
temp_sum += X[j]
sum_val += temp_sum * temp_sum
elif choice == "Rosenbrock":
for i in range(len(X) - 1):
sum_val += 100 * ((X[i + 1] - X[i] * X[i]) ** 2) + (X[i] - 1) ** 2
'''
elif choice == "Griewank":
sum1 = sum(X[i] ** 2 for i in range(len(X)))
sum2 = 1
for i in range(len(X)):
sum2 *= math.cos(X[i] / math.sqrt(i + 1))
sum_val = sum1 / 4000.0 - sum2 + 1
elif choice == "Rastrigin":
A = 10
for i in range(len(X)):
sum_val += X[i] ** 2 - A * math.cos(2 * PI * X[i]) + A
elif choice == "SchafferF7":
for i in range(len(X) - 1):
sum_val += (X[i] ** 2 + X[i + 1] ** 2) ** 0.25 * \
(math.sin(50 * (X[i] ** 2 + X[i + 1] ** 2) ** 0.1) + 1.0)
'''
return sum_val
# 初始化粒子群位置和速度
def init():
for i in range(Size):
for j in range(dim):
X[i][j] = random.uniform(-W, W)
pb[i][j] = X[i][j]
V[i][j] = random.uniform(-1, 1)
fit = [function(X[i]) for i in range(Size)]
min_val = min(fit)
min_idx = fit.index(min_val)
gb[:] = X[min_idx][:]
# 更新粒子速度和位置
def update_vp():
global w, G
for i in range(Size):
for j in range(dim):
rand1 = random.random()
rand2 = random.random()
V[i][j] = w * V[i][j] + c1 * rand1 * (pb[i][j] - X[i][j]) + c2 * rand2 * (gb[j] - X[i][j])
X[i][j] += V[i][j]
if function(pb[i]) > function(X[i]):
pb[i][:] = X[i][:]
if function(gb) > function(pb[i]):
gb[:] = pb[i][:]
# 执行粒子群优化
def pso_optimization(steps, epochs):
result = []
for _ in range(epochs):
init()
for _ in range(steps):
update_vp()
# 更新惯性权重
w = w_max - (_ / steps) * (w_max - w_min)
# 如果找到最优解,则提前结束优化
if function(gb) == 0:
print("已找到最优解")
break
temp_res = function(gb)
print(_, temp_res)
res = function(gb)
result.append(res)
print("===============================")
print("最佳解适应度:", res)
print("最佳解路径:", gb)
if __name__ == "__main__":
choice = "Rosenbrock" # 可以根据需要更改选择的函数类型
print("Best solution for choice", choice)
pso_optimization(5000, 1)
MAEPSO结果分析
Tablet函数和原文差距不大,效果比PSO要好
Quadric函数
Quadric函数结果基本和论文一致,效果比PSO效果要好
Rosenbrock函数
Rosenbrock效果跟原文差不多,效果比PSO效果要好。
MAEPSO是一种改进的粒子群优化算法,旨在提高传统 PSO 算法的性能。它包括了多个改进点,这些改进点可以使其相对于标准的 PSO 算法表现更好。MAEPSO 引入了多尺度协同变异的概念,允许粒子群在不同的尺度上执行变异操作。这种变异策略有助于提高算法的探索性,有助于跳出局部最优解,并且能够更好地在搜索空间中进行全局搜索。MAEPSO 可能具有更强的自适应性,能够根据搜索的进展调整其内部参数,以更好地适应问题的特性。这种自适应性能够帮助算法更快地收敛到最优解附近。MAEPSO 可能更加注重维持种群的多样性,防止粒子群过早收敛到局部最优解。通过保持多样性,它能够更广泛地探索搜索空间。MAEPSO 通过引入多尺度协同变异、自适应性、多样性以及对操作符的改进等方式,相对于标准 PSO 算法可能在性能上有所提升。然而,算法的性能优势往往也依赖于特定问题和应用场景,因此在实际应用中可能需要根据具体情况进行评估和选择。
https://blog.csdn.net/breeze_blows/article/details/103088804
这是参考某位大佬的博客