智能优化算法以及matlab实现程序
智能优化算法的优点
智能优化算法的分类
主要的智能优化算法有:
智能优化算法的优点
智能优化算法在现在的工程中越来越流行,可以解决工程应用中的最优值的求解问题,只需要在可行解范围里探索适应度最低的可行解,不需要函数的梯度信息,可以避免陷入局部最优解.在实验的过程中,智能优化算法对于高维数和高复杂度的函数都有着相对高的运行速度和高的精确度.
智能优化算法的分类
智能优化算法可以大致分为三类:
①Evolution-based:,遗传算法,差分进化算法等
②Physics-based:
③Swarm-based:粒子群算法
主要的智能优化算法有:
我最近将常见的一些智能优化算法的matlab程序从网上整理到一块,方便各位人员使用和指出错误.
鲸鱼优化算法(WOA)
粒子群算法(PSO)
差分进化算法(DE)
蝙蝠算法(BA)
灰狼优化算法(GWO)
蝗虫优化算法(GOA)
飞蛾优化算法(MFO)
遗传算法(GA)
蝴蝶优化算法(BOA)
————————————————
原文链接:https://blog.csdn.net/liuren666/article/details/116521346
clear all
clc
SearchAgents_no = 30; % Number of search agents
Function_name='F1'; % Name of the test function that can be from F1 to F23 (Table 1,2,3 in the paper)
Max_iteration = 500; % Maximum numbef of iterations
% Load details of the selected benchmark function
[lb,ub,dim,fobj]=Get_Functions_details(Function_name);
Positions = initialization(SearchAgents_no,dim,ub,lb);
WOA1 = clock;
[WOA_score,WOA_pos,WOA_cg_curve]=WOALR(SearchAgents_no,Max_iteration,lb,ub,dim,fobj,Positions);
WOA2 = clock;
WOA_time = etime(WOA2,WOA1)
PSO1 = clock;
[PSO_score,PSO_pos,PSO_cg_curve]=PSO(SearchAgents_no,Max_iteration,lb,ub,dim,fobj,Positions);
PSO2 = clock;
PSO_time = etime(PSO2,PSO1)
DE1 = clock;
[DE_score,DE_pos,DE_cg_curve]=DEA(SearchAgents_no,Max_iteration,lb,ub,dim,fobj,Positions);
DE2 = clock;
DE_time = etime(DE2,DE1)
BA1 = clock;
[BA_score,BA_pos,BA_cg_curve]=BA(SearchAgents_no,Max_iteration,lb,ub,dim,fobj,Positions);
BA2 = clock;
BA_time = etime(BA2,BA1)
GWO1 = clock;
[GWO_score,GWO_pos,GWO_cg_curve]=GWO(SearchAgents_no,Max_iteration,lb,ub,dim,fobj,Positions);
GWO2 = clock;
GWO_time = etime(GWO2,GWO1)
GOA1 = clock;
[GOA_score,GOA_pos,GOA_cg_curve]=GOA(SearchAgents_no,Max_iteration,lb,ub,dim,fobj,Positions);
GOA2 = clock;
GOA_time = etime(GOA2,GOA1)
MFO1 = clock;
[MFO_score,MFO_pos,MFO_cg_curve]=MFO(SearchAgents_no,Max_iteration,lb,ub,dim,fobj,Positions);
MFO2 = clock;
MFO_time = etime(MFO2,MFO1)
GA1 = clock;
[GA_score,GA_pos,GA_cg_curve]=GA(SearchAgents_no,Max_iteration,lb,ub,dim,fobj,Positions);
GA2 = clock;
GA_time = etime(GA2,GA1)
BOA1 = clock;
[BOA_score,BOA_pos,BOA_cg_curve]=BOAL(SearchAgents_no,Max_iteration,lb,ub,dim,fobj,Positions);
BOA2 = clock;
BOA_time = etime(BOA2,BOA1)
figure('Position',[269 240 660 290])
%Draw search space
subplot(1,2,1);
func_plot(Function_name);
title('Parameter space')
xlabel('x_1');
ylabel('x_2');
zlabel([Function_name,'( x_1 , x_2 )'])
%Draw objective space
subplot(1,2,2);
%semilogy(WOA_cg_curve,'Color','r',WOA_cg_curve1,'Color','g')
plot(1:Max_iteration,WOA_cg_curve,...
1:Max_iteration,PSO_cg_curve,1:Max_iteration,DE_cg_curve(1:500),...
1:Max_iteration,BA_cg_curve,1:Max_iteration,GOA_cg_curve(1:500),'k',...
1:Max_iteration,GWO_cg_curve,1:Max_iteration,MFO_cg_curve,...
1:Max_iteration,GA_cg_curve,1:Max_iteration,BOA_cg_curve)
legend('WOA','PSO','DE','BA','GOA','GWO','MFO','GA','BOA')
title('Objective space')
xlabel('Iteration');
ylabel('Best score obtained so far');
axis tight
grid on
box on
%legend('BOA')
display(['The best solution obtained by WOA is : ', num2str(WOA_pos)]);
display(['The best optimal value of the objective funciton found by WOA is : ', num2str(WOA_score)]);
display(['The best solution obtained by PSO is : ', num2str(PSO_pos)]);
display(['The best optimal value of the objective funciton found by PSO is : ', num2str(PSO_score)]);
display(['The best solution obtained by DE is : ', num2str(DE_pos')]);
display(['The best optimal value of the objective funciton found by DE is : ', num2str(DE_score)]);
display(['The best solution obtained by BA is : ', num2str(BA_pos)]);
display(['The best optimal value of the objective funciton found by BA is : ', num2str(BA_score)]);
display(['The best solution obtained by GOA is : ', num2str(GOA_pos)]);
display(['The best optimal value of the objective funciton found by GOA is : ', num2str(GOA_score)]);
display(['The best solution obtained by GWO is : ', num2str(GWO_pos)]);
display(['The best optimal value of the objective funciton found by GWA is : ', num2str(GWO_score)]);
display(['The best solution obtained by MFO is : ', num2str(MFO_pos)]);
display(['The best optimal value of the objective funciton found by MFO is : ', num2str(MFO_score)]);
display(['The best solution obtained by GA is : ', num2str(GA_pos)]);
display(['The best optimal value of the objective funciton found by GA is : ', num2str(GA_score)]);
display(['The best solution obtained by BOA is : ', num2str(BOA_pos)]);
display(['The best optimal value of the objective funciton found by BOA is : ', num2str(BOA_score)]);
测试基函数
function [lb,ub,dim,fobj] = Get_Functions_details(F)
switch F
case 'F1'
fobj = @F1;
lb=-100;
ub=100;
dim=30;
case 'F2'
fobj = @F2;
lb=-10;
ub=10;
dim=30;
case 'F3'
fobj = @F3;
lb=-100;
ub=100;
dim=30;
case 'F4'
fobj = @F4;
lb=-100;
ub=100;
dim=30;
case 'F5'
fobj = @F5;
lb=-30;
ub=30;
dim=30;
case 'F6'
fobj = @F6;
lb=-100;
ub=100;
dim=30;
case 'F7'
fobj = @F7;
lb=-1.28;
ub=1.28;
dim=30;
case 'F8'
fobj = @F8;
lb=-500;
ub=500;
dim=30;
case 'F9'
fobj = @F9;
lb=-5.12;
ub=5.12;
dim=30;
case 'F10'
fobj = @F10;
lb=-32;
ub=32;
dim=30;
case 'F11'
fobj = @F11;
lb=-600;
ub=600;
dim=30;
case 'F12'
fobj = @F12;
lb=-50;
ub=50;
dim=30;
case 'F13'
fobj = @F13;
lb=-50;
ub=50;
dim=30;
case 'F14'
fobj = @F14;
lb=-65.536;
ub=65.536;
dim=2;
case 'F15'
fobj = @F15;
lb=-5;
ub=5;
dim=4;
case 'F16'
fobj = @F16;
lb=-5;
ub=5;
dim=2;
case 'F17'
fobj = @F17;
lb=[-5,0];
ub=[10,15];
dim=2;
case 'F18'
fobj = @F18;
lb=-2;
ub=2;
dim=2;
case 'F19'
fobj = @F19;
lb=0;
ub=1;
dim=3;
case 'F20'
fobj = @F20;
lb=0;
ub=1;
dim=6;
case 'F21'
fobj = @F21;
lb=0;
ub=10;
dim=4;
case 'F22'
fobj = @F22;
lb=0;
ub=10;
dim=4;
case 'F23'
fobj = @F23;
lb=0;
ub=10;
dim=4;
end
end
% F1
function o = F1(x)
o=sum(x.^2);
end
% F2
function o = F2(x)
o=sum(abs(x))+prod(abs(x));
end
% F3
function o = F3(x)
dim=size(x,2);
o=0;
for i=1:dim
o=o+sum(x(1:i))^2;
end
end
% F4
function o = F4(x)
o=max(abs(x));
end
% F5
function o = F5(x)
dim=size(x,2);
o=sum(100*(x(2:dim)-(x(1:dim-1).^2)).^2+(x(1:dim-1)-1).^2);
end
% F6
function o = F6(x)
o=sum(abs((x+.5)).^2);
end
% F7
function o = F7(x)
dim=size(x,2);
o=sum([1:dim].*(x.^4))+rand;
end
% F8
function o = F8(x)
o=sum(-x.*sin(sqrt(abs(x))));
end
% F9
function o = F9(x)
dim=size(x,2);
o=sum(x.^2-10*cos(2*pi.*x))+10*dim;
end
% F10
function o = F10(x)
dim=size(x,2);
o=-20*exp(-.2*sqrt(sum(x.^2)/dim))-exp(sum(cos(2*pi.*x))/dim)+20+exp(1);
end
% F11
function o = F11(x)
dim=size(x,2);
o=sum(x.^2)/4000-prod(cos(x./sqrt([1:dim])))+1;
end
% F12
function o = F12(x)
dim=size(x,2);
o=(pi/dim)*(10*((sin(pi*(1+(x(1)+1)/4)))^2)+sum((((x(1:dim-1)+1)./4).^2).*...
(1+10.*((sin(pi.*(1+(x(2:dim)+1)./4)))).^2))+((x(dim)+1)/4)^2)+sum(Ufun(x,10,100,4));
end
% F13
function o = F13(x)
dim=size(x,2);
o=.1*((sin(3*pi*x(1)))^2+sum((x(1:dim-1)-1).^2.*(1+(sin(3.*pi.*x(2:dim))).^2))+...
((x(dim)-1)^2)*(1+(sin(2*pi*x(dim)))^2))+sum(Ufun(x,5,100,4));
end
% F14
function o = F14(x)
aS=[-32 -16 0 16 32 -32 -16 0 16 32 -32 -16 0 16 32 -32 -16 0 16 32 -32 -16 0 16 32;,...
-32 -32 -32 -32 -32 -16 -16 -16 -16 -16 0 0 0 0 0 16 16 16 16 16 32 32 32 32 32];
for j=1:25
bS(j)=sum((x'-aS(:,j)).^6);
end
o=(1/500+sum(1./([1:25]+bS))).^(-1);
end
% F15
function o = F15(x)
aK=[.1957 .1947 .1735 .16 .0844 .0627 .0456 .0342 .0323 .0235 .0246];
bK=[.25 .5 1 2 4 6 8 10 12 14 16];bK=1./bK;
o=sum((aK-((x(1).*(bK.^2+x(2).*bK))./(bK.^2+x(3).*bK+x(4)))).^2);
end
% F16
function o = F16(x)
o=4*(x(1)^2)-2.1*(x(1)^4)+(x(1)^6)/3+x(1)*x(2)-4*(x(2)^2)+4*(x(2)^4);
end
% F17
function o = F17(x)
o=(x(2)-(x(1)^2)*5.1/(4*(pi^2))+5/pi*x(1)-6)^2+10*(1-1/(8*pi))*cos(x(1))+10;
end
% F18
function o = F18(x)
o=(1+(x(1)+x(2)+1)^2*(19-14*x(1)+3*(x(1)^2)-14*x(2)+6*x(1)*x(2)+3*x(2)^2))*...
(30+(2*x(1)-3*x(2))^2*(18-32*x(1)+12*(x(1)^2)+48*x(2)-36*x(1)*x(2)+27*(x(2)^2)));
end
% F19
function o = F19(x)
aH=[3 10 30;.1 10 35;3 10 30;.1 10 35];cH=[1 1.2 3 3.2];
pH=[.3689 .117 .2673;.4699 .4387 .747;.1091 .8732 .5547;.03815 .5743 .8828];
o=0;
for i=1:4
o=o-cH(i)*exp(-(sum(aH(i,:).*((x-pH(i,:)).^2))));
end
end
% F20
function o = F20(x)
aH=[10 3 17 3.5 1.7 8;.05 10 17 .1 8 14;3 3.5 1.7 10 17 8;17 8 .05 10 .1 14];
cH=[1 1.2 3 3.2];
pH=[.1312 .1696 .5569 .0124 .8283 .5886;.2329 .4135 .8307 .3736 .1004 .9991;...
.2348 .1415 .3522 .2883 .3047 .6650;.4047 .8828 .8732 .5743 .1091 .0381];
o=0;
for i=1:4
o=o-cH(i)*exp(-(sum(aH(i,:).*((x-pH(i,:)).^2))));
end
end
% F21
function o = F21(x)
aSH=[4 4 4 4;1 1 1 1;8 8 8 8;6 6 6 6;3 7 3 7;2 9 2 9;5 5 3 3;8 1 8 1;6 2 6 2;7 3.6 7 3.6];
cSH=[.1 .2 .2 .4 .4 .6 .3 .7 .5 .5];
o=0;
for i=1:5
o=o-((x-aSH(i,:))*(x-aSH(i,:))'+cSH(i))^(-1);
end
end
% F22
function o = F22(x)
aSH=[4 4 4 4;1 1 1 1;8 8 8 8;6 6 6 6;3 7 3 7;2 9 2 9;5 5 3 3;8 1 8 1;6 2 6 2;7 3.6 7 3.6];
cSH=[.1 .2 .2 .4 .4 .6 .3 .7 .5 .5];
o=0;
for i=1:7
o=o-((x-aSH(i,:))*(x-aSH(i,:))'+cSH(i))^(-1);
end
end
% F23
function o = F23(x)
aSH=[4 4 4 4;1 1 1 1;8 8 8 8;6 6 6 6;3 7 3 7;2 9 2 9;5 5 3 3;8 1 8 1;6 2 6 2;7 3.6 7 3.6];
cSH=[.1 .2 .2 .4 .4 .6 .3 .7 .5 .5];
o=0;
for i=1:10
o=o-((x-aSH(i,:))*(x-aSH(i,:))'+cSH(i))^(-1);
end
end
function o=Ufun(x,a,k,m)
o=k.*((x-a).^m).*(x>a)+k.*((-x-a).^m).*(x<(-a));
end
初始化种群
function Positions=initialization(SearchAgents_no,dim,ub,lb)
Boundary_no= size(ub,2); % numnber of boundaries
% If the boundaries of all variables are equal and user enter a signle
% number for both ub and lb
if Boundary_no==1
Positions=rand(SearchAgents_no,dim).*(ub-lb)+lb;
end
% If each variable has a different lb and ub
if Boundary_no>1
for i=1:dim
ub_i=ub(i);
lb_i=lb(i);
Positions(:,i)=rand(SearchAgents_no,1).*(ub_i-lb_i)+lb_i;
end
end
画基函数的二维图形:
function func_plot(func_name)
[lb,ub,dim,fobj]=Get_Functions_details(func_name);
switch func_name
case 'F1'
x=-100:2:100; y=x; %[-100,100]
case 'F2'
x=-100:2:100; y=x; %[-10,10]
case 'F3'
x=-100:2:100; y=x; %[-100,100]
case 'F4'
x=-100:2:100; y=x; %[-100,100]
case 'F5'
x=-200:2:200; y=x; %[-5,5]
case 'F6'
x=-100:2:100; y=x; %[-100,100]
case 'F7'
x=-1:0.03:1; y=x %[-1,1]
case 'F8'
x=-500:10:500;y=x; %[-500,500]
case 'F9'
x=-5:0.1:5; y=x; %[-5,5]
case 'F10'
x=-20:0.5:20; y=x;%[-500,500]
case 'F11'
x=-500:10:500; y=x;%[-0.5,0.5]
case 'F12'
x=-10:0.1:10; y=x;%[-pi,pi]
case 'F13'
x=-5:0.08:5; y=x;%[-3,1]
case 'F14'
x=-100:2:100; y=x;%[-100,100]
case 'F15'
x=-5:0.1:5; y=x;%[-5,5]
case 'F16'
x=-1:0.01:1; y=x;%[-5,5]
case 'F17'
x=-5:0.1:5; y=x;%[-5,5]
case 'F18'
x=-5:0.06:5; y=x;%[-5,5]
case 'F19'
x=-5:0.1:5; y=x;%[-5,5]
case 'F20'
x=-5:0.1:5; y=x;%[-5,5]
case 'F21'
x=-5:0.1:5; y=x;%[-5,5]
case 'F22'
x=-5:0.1:5; y=x;%[-5,5]
case 'F23'
x=-5:0.1:5; y=x;%[-5,5]
end
L=length(x);
f=[];
for i=1:L
for j=1:L
if strcmp(func_name,'F15')==0 && strcmp(func_name,'F19')==0 && strcmp(func_name,'F20')==0 && strcmp(func_name,'F21')==0 && strcmp(func_name,'F22')==0 && strcmp(func_name,'F23')==0
f(i,j)=fobj([x(i),y(j)]);
end
if strcmp(func_name,'F15')==1
f(i,j)=fobj([x(i),y(j),0,0]);
end
if strcmp(func_name,'F19')==1
f(i,j)=fobj([x(i),y(j),0]);
end
if strcmp(func_name,'F20')==1
f(i,j)=fobj([x(i),y(j),0,0,0,0]);
end
if strcmp(func_name,'F21')==1 || strcmp(func_name,'F22')==1 ||strcmp(func_name,'F23')==1
f(i,j)=fobj([x(i),y(j),0,0]);
end
end
end
surfc(x,y,f,'LineStyle','none');
end
鲸鱼优化算法:
function [Leader_score,Leader_pos,Convergence_curve]=WOA(SearchAgents_no,Max_iter,lb,ub,dim,fobj,Positions)
% initialize position vector and score for the leader
Leader_pos=zeros(1,dim);
Leader_score=inf; %change this to -inf for maximization problems
%Initialize the positions of search agents
%Positions=initialization(SearchAgents_no,dim,ub,lb);
Convergence_curve=zeros(1,Max_iter);
t=0;% Loop counter
% Main loop
while t
% Return back the search agents that go beyond the boundaries of the search space
Flag4ub=Positions(i,:)>ub;
Flag4lb=Positions(i,:) Positions(i,:)=(Positions(i,:).*(~(Flag4ub+Flag4lb)))+ub.*Flag4ub+lb.*Flag4lb;
% Calculate objective function for each search agent
fitness=fobj(Positions(i,:));
% Update the leader
if fitness
Leader_score=fitness; % Update alpha
Leader_pos=Positions(i,:);
end
end
a=2-t*((2)/Max_iter); % a decreases linearly fron 2 to 0 in Eq. (2.3)
% a2 linearly dicreases from -1 to -2 to calculate t in Eq. (3.12)
a2=-1+t*((-1)/Max_iter);
% Update the Position of search agents
for i=1:size(Positions,1)
r1=rand(); % r1 is a random number in [0,1]
r2=rand(); % r2 is a random number in [0,1]
A=2*a*r1-a; % Eq. (2.3) in the paper
C=2*r2; % Eq. (2.4) in the paper
b=1; % parameters in Eq. (2.5)
l=(a2-1)*rand+1; % parameters in Eq. (2.5)
p = rand(); % p in Eq. (2.6)
for j=1:size(Positions,2)
if p<0.5
if abs(A)>=1
rand_leader_index = floor(SearchAgents_no*rand()+1);
X_rand = Positions(rand_leader_index, :);
D_X_rand=abs(C*X_rand(j)-Positions(i,j)); % Eq. (2.7)
Positions(i,j)=X_rand(j)-A*D_X_rand; % Eq. (2.8)
elseif abs(A)<1
D_Leader=abs(C*Leader_pos(j)-Positions(i,j)); % Eq. (2.1)
Positions(i,j)=Leader_pos(j)-A*D_Leader; % Eq. (2.2)
end
elseif p>=0.5
distance2Leader=abs(Leader_pos(j)-Positions(i,j));
% Eq. (2.5)
Positions(i,j)=distance2Leader*exp(b.*l).*cos(l.*2*pi)+Leader_pos(j);
end
end
end
t=t+1;
Convergence_curve(t)=Leader_score;
end
PSO优化算法:
function [leader_score,g,gb] = PSO(Search_num,T_max,lb,ub,dim,fobj,Positions)
c1 = 2;
c2 = 2;
w = 0.8;
vmax = 0.5;
vmin = -0.5;
D = dim;
N = Search_num;
T = T_max;
%x = initialization(Search_num,dim,ub,lb);
x = Positions;
v = rand(N,D)*(vmax-vmin)+vmin;
p = x;
pbest = ones(T,1);
for i = 1:N
pbest(i) = fobj(x(i,:));
end
g = ones(1,D);
gbest = inf;
for i = 1:N
if(pbest(i)
gbest = pbest(i);
end
end
gb = ones(1,T);
for i = 1:T
for j = 1:N
if(fobj(x(j,:))
pbest(j) = fobj(x(j,:));
end
if (pbest(j)
gbest = pbest(j);
end
v(j,:) = w*v(j,:)+c1*rand()*(p(j,:)-x(j,:))...
+c2*rand()*(g-x(j,:));
x(j,:) = x(j,:) + v(j,:);
for ii = 1:D
if(v(j,ii)>vmax)||(v(j,ii)
end
if(x(j,ii)>ub)||(x(j,ii) x(j,ii) = rand()*(ub-lb)+lb;
end
end
end
gb(i) = gbest;
end
leader_score = gb(end);
差分进化优化算法:
function [Y,X,trace] = DEA(Search_num,T_max,lb,ub,dim,fobj,Positions)
NP = Search_num;
D =dim;
G = T_max;
F0 = 0.5;
CR = 0.9;
Xs = ub;
Xx = lb;
x = zeros(D,NP);
v = zeros(D,NP);
u = zeros(D,NP);
%x = initialization(Search_num,dim,ub,lb)';
x = Positions';
for m = 1:NP
Ob(m) = fobj(x(:,m));
end
trace(1) = min(Ob);
for gen = 1:G
lamda = exp(1-G/(G+1-gen));
F = F0*2^(lamda);
for m = 1:NP
r1 = randi(NP,1,1);
while (r1==m)
r1 = randi(NP,1,1);
end
r2 = randi(NP,1,1);
while(r2==m)||(r2==r1)
r2 = randi(NP,1,1);
end
r3 = randi(NP,1,1);
while(r3==m)||(r3==r1)||(r3==r2)
r3 = randi(NP,1,1);
end
v(:,m) = x(:,r1)+F*(x(:,r2)-x(:,r3));
end
r = randi(D,1,1);
for n = 1:D
cr = rand(1);
if (cr<=CR)||(n==r)
u(n,:) = v(n,:);
else
u(n,:) = x(n,:);
end
end
for n = 1:D
for m = 1:NP
if (u(n,m)
u(n,m) = rand()*(Xs-Xx)+Xx;
end
end
end
for m = 1:NP
Ob1(m) = fobj(u(:,m));
end
for m = 1:NP
if Ob1(m)
end
end
for m = 1:NP
Ob(m) = fobj(x(:,m));
end
trace(gen+1) = min(Ob);
end
[SortOb,Index] = sort(Ob);
x = x(:,Index);
X = x(:,1);
Y = min(Ob);
end
蝙蝠优化算法:
function [bestMin,bestS,bestArchive] = BA(Search_num,T_max,lb,ub,dim,fobj,Positions)
%A new modification approach on bat algorithm for solving optimization problems
%omegaxyz.com 2019年2月12日
%% BA参数设置
t = 1;
maxT = T_max; %最大迭代次数
%问题的维度
sizep = Search_num; %种群大小
xmin = lb;
xmax = ub; %位置向量的范围
A = 0.6.*ones(sizep,1); % 响度 (不变或者减小)
r = zeros(sizep,1); % 脉冲率 (不变或增加))
r0 = 0.7;
Af = 0.9;
Rf = 0.9;
Qmin = 0; % 最小频率
Qmax = 1; % 最大频率
%% 初始化
Lb = xmin*ones(1,dim);
Ub = xmax*ones(1,dim);
%pop = Lb+(Ub-Lb).*rand(sizep,dim); %种群初始化
pop = Positions;
popv = zeros(sizep,dim); % 速度
Q = zeros(sizep,1); % 频率
pfitness = zeros(dim,1);
for i = 1:sizep
pfitness(i) = fobj(pop(i,:)); %评价
end
[bestMin, bestID]=min(pfitness);
bestS = pop(bestID, :);
bestArchive = zeros(maxT,1);
%% 具体迭代过程
while t <= maxT
for i = 1:sizep
Q(i)=Qmin+(Qmax-Qmin)*rand();
popv(i,:)=popv(i,:)+(pop(i,:)-bestS)*Q(i);
Stemp = pop(i,:)+popv(i,:);
% 脉冲率
if rand>r(i)
Stemp=bestS-1+2*rand(1,dim);
end
fitTemp = fobj(Stemp);
if (fitTemp<=pfitness(i))&&(rand() pop(i,:) = Stemp;
pfitness(i) = fitTemp;
A(i) = Af*A(i);
r(i) = r0*(1-exp(-Rf*t));
end
if fitTemp <= bestMin
bestMin = fitTemp;
bestS = Stemp;
end
end
bestArchive(t) = bestMin;
%fprintf('GEN: %d min: %.4f\n', t, bestMin);
t = t +1;
end
end
GWO优化算法:
function [Alpha_score,Alpha_pos,Convergence_curve]=GWO(SearchAgents_no,Max_iter,lb,ub,dim,fobj,Positions)
% initialize alpha, beta, and delta_pos
Alpha_pos=zeros(1,dim);
Alpha_score=inf; %change this to -inf for maximization problems
Beta_pos=zeros(1,dim);
Beta_score=inf; %change this to -inf for maximization problems
Delta_pos=zeros(1,dim);
Delta_score=inf; %change this to -inf for maximization problems
%Initialize the positions of search agents
%Positions=initialization(SearchAgents_no,dim,ub,lb);
Convergence_curve=zeros(1,Max_iter);
l=0;% Loop counter
% Main loop
while l
% Return back the search agents that go beyond the boundaries of the search space
Flag4ub=Positions(i,:)>ub;
Flag4lb=Positions(i,:) Positions(i,:)=(Positions(i,:).*(~(Flag4ub+Flag4lb)))+ub.*Flag4ub+lb.*Flag4lb;
% Calculate objective function for each search agent
fitness=fobj(Positions(i,:));
% Update Alpha, Beta, and Delta
if fitness
Alpha_pos=Positions(i,:);
end
if fitness>Alpha_score && fitness
Beta_pos=Positions(i,:);
end
if fitness>Alpha_score && fitness>Beta_score && fitness
Delta_pos=Positions(i,:);
end
end
a=2-l*((2)/Max_iter); % a decreases linearly fron 2 to 0
% Update the Position of search agents including omegas
for i=1:size(Positions,1)
for j=1:size(Positions,2)
r1=rand(); % r1 is a random number in [0,1]
r2=rand(); % r2 is a random number in [0,1]
A1=2*a*r1-a; % Equation (3.3)
C1=2*r2; % Equation (3.4)
D_alpha=abs(C1*Alpha_pos(j)-Positions(i,j)); % Equation (3.5)-part 1
X1=Alpha_pos(j)-A1*D_alpha; % Equation (3.6)-part 1
r1=rand();
r2=rand();
A2=2*a*r1-a; % Equation (3.3)
C2=2*r2; % Equation (3.4)
D_beta=abs(C2*Beta_pos(j)-Positions(i,j)); % Equation (3.5)-part 2
X2=Beta_pos(j)-A2*D_beta; % Equation (3.6)-part 2
r1=rand();
r2=rand();
A3=2*a*r1-a; % Equation (3.3)
C3=2*r2; % Equation (3.4)
D_delta=abs(C3*Delta_pos(j)-Positions(i,j)); % Equation (3.5)-part 3
X3=Delta_pos(j)-A3*D_delta; % Equation (3.5)-part 3
Positions(i,j)=(X1+X2+X3)/3;% Equation (3.7)
end
end
l=l+1;
Convergence_curve(l)=Alpha_score;
end
GOA优化算法:
function [TargetFitness,TargetPosition,Convergence_curve]=GOA(N,Max_iter,lb,ub,dim,fobj,Positions)
%
flag=0;
% if size(ub,1)==1
% ub=ones(dim,1)*ub;
% lb=ones(dim,1)*lb;
% end
%
% if (rem(dim,2)~=0) % this algorithm should be run with a even number of variables. This line is to handle odd number of variables
% dim = dim+1;
% ub = [ub; 100];
% lb = [lb; -100];
% flag=1;
% end
%Initialize the population of grasshoppers
%GrassHopperPositions=initialization(N,dim,ub,lb);
GrassHopperPositions = Positions;
GrassHopperFitness = zeros(1,N);
fitness_history=zeros(N,Max_iter);
position_history=zeros(N,Max_iter,dim);
Convergence_curve=zeros(1,Max_iter+1);
Trajectories=zeros(N,Max_iter);
cMax=1;
cMin=0.00004;
%Calculate the fitness of initial grasshoppers
for i=1:size(GrassHopperPositions,1)
% if flag == 1
% GrassHopperFitness(1,i)=fobj(GrassHopperPositions(i,1:end-1));
% else
GrassHopperFitness(1,i)=fobj(GrassHopperPositions(i,:));
%end
fitness_history(i,1)=GrassHopperFitness(1,i);
position_history(i,1,:)=GrassHopperPositions(i,:);
Trajectories(:,1)=GrassHopperPositions(:,1);
end
[sorted_fitness,sorted_indexes]=sort(GrassHopperFitness);
% m = 1;
% Convergence_curve(m) = sorted_fitness;
% Find the best grasshopper (target) in the first population
for newindex=1:N
Sorted_grasshopper(newindex,:)=GrassHopperPositions(sorted_indexes(newindex),:);
end
TargetPosition=Sorted_grasshopper(1,:);
TargetFitness=sorted_fitness(1);
% Main loop
l=1; % Start from the second iteration since the first iteration was dedicated to calculating the fitness of antlions
while l
c=cMax-l*((cMax-cMin)/Max_iter); % Eq. (2.8) in the paper
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for i=1:size(GrassHopperPositions,1)
temp= GrassHopperPositions';
% for k=1:2:dim
S_i=zeros(dim,1);
for j=1:N
if i~=j
Dist=distance(temp(:,j), temp(:,i)); % Calculate the distance between two grasshoppers
r_ij_vec=(temp(:,j)-temp(:,i))/(Dist+eps); % xj-xi/dij in Eq. (2.7)
xj_xi=2+rem(Dist,2); % |xjd - xid| in Eq. (2.7)
s_ij=((ub - lb)*c/2)*S_func(xj_xi).*r_ij_vec; % The first part inside the big bracket in Eq. (2.7)
S_i=S_i+s_ij;
end
end
S_i_total = S_i;
% end
X_new = c * S_i_total'+ (TargetPosition); % Eq. (2.7) in the paper
GrassHopperPositions_temp(i,:)=X_new';
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% GrassHopperPositions
GrassHopperPositions=GrassHopperPositions_temp;
for i=1:size(GrassHopperPositions,1)
% Relocate grasshoppers that go outside the search space
Tp=GrassHopperPositions(i,:)>ub';Tm=GrassHopperPositions(i,:)
% Calculating the objective values for all grasshoppers
if flag == 1
GrassHopperFitness(1,i)=fobj(GrassHopperPositions(i,1:end-1));
else
GrassHopperFitness(1,i)=fobj(GrassHopperPositions(i,:));
end
fitness_history(i,l)=GrassHopperFitness(1,i);
position_history(i,l,:)=GrassHopperPositions(i,:);
Trajectories(:,l)=GrassHopperPositions(:,1);
% Update the target
if GrassHopperFitness(1,i)
TargetFitness=GrassHopperFitness(1,i);
end
end
Convergence_curve(l)=TargetFitness;
%disp(['In iteration #', num2str(l), ' , target''s objective = ', num2str(TargetFitness)])
l = l + 1;
end
if (flag==1)
TargetPosition = TargetPosition(1:dim-1);
end
%time=toc
function d = distance(a,b)
% DISTANCE - computes Euclidean distance matrix
%
% E = distance(A,B)
%
% A - (DxM) matrix
% B - (DxN) matrix
%
% Returns:
% E - (MxN) Euclidean distances between vectors in A and B
%
%
% Description :
% This fully vectorized (VERY FAST!) m-file computes the
% Euclidean distance between two vectors by:
%
% ||A-B|| = sqrt ( ||A||^2 + ||B||^2 - 2*A.B )
%
% Example :
% A = rand(400,100); B = rand(400,200);
% d = distance(A,B);
% Author : Roland Bunschoten
% University of Amsterdam
% Intelligent Autonomous Systems (IAS) group
% Kruislaan 403 1098 SJ Amsterdam
% tel.(+31)20-5257524
% [email protected]
% Last Rev : Oct 29 16:35:48 MET DST 1999
% Tested : PC Matlab v5.2 and Solaris Matlab v5.3
% Thanx : Nikos Vlassis
% Copyright notice: You are free to modify, extend and distribute
% this code granted that the author of the original code is
% mentioned as the original author of the code.
if (nargin ~= 2)
error('Not enough input arguments');
end
if (size(a,1) ~= size(b,1))
error('A and B should be of same dimensionality');
end
aa=sum(a.*a,1); bb=sum(b.*b,1); ab=a'*b;
d = sqrt(abs(repmat(aa',[1 size(bb,2)]) + repmat(bb,[size(aa,2) 1]) - 2*ab));
function o=S_func(r)
f=0.5;
l=1.5;
o=f*exp(-r/l)-exp(-r); % Eq. (2.3) in the paper
end
MFO优化算法:
function [Best_flame_score,Best_flame_pos,Convergence_curve,data]=MFO(N,Max_iteration,lb,ub,dim,fobj,Positions)
%display('MFO is optimizing your problem');
%Initialize the positions of moths
%Moth_pos=initialization(N,dim,ub,lb);
Moth_pos = Positions;
Convergence_curve=zeros(1,Max_iteration);
Iteration=1;
data = [];
% Main loop
while Iteration
% Number of flames Eq. (3.14) in the paper
Flame_no=round(N-Iteration*((N-1)/Max_iteration));
for i=1:size(Moth_pos,1)
% Check if moths go out of the search spaceand bring it back
Flag4ub=Moth_pos(i,:)>ub;
Flag4lb=Moth_pos(i,:) Moth_pos(i,:)=(Moth_pos(i,:).*(~(Flag4ub+Flag4lb)))+ub.*Flag4ub+lb.*Flag4lb;
% Calculate the fitness of moths
Moth_fitness(1,i)=fobj(Moth_pos(i,:));
end
if Iteration==1
% Sort the first population of moths
[fitness_sorted I]=sort(Moth_fitness);
sorted_population=Moth_pos(I,:);
% Update the flames
best_flames=sorted_population;
best_flame_fitness=fitness_sorted;
else
% Sort the moths
double_population=[previous_population;best_flames];
double_fitness=[previous_fitness best_flame_fitness];
[double_fitness_sorted,I]=sort(double_fitness);
double_sorted_population=double_population(I,:);
fitness_sorted=double_fitness_sorted(1:N);
sorted_population=double_sorted_population(1:N,:);
% Update the flames
best_flames=sorted_population;
best_flame_fitness=fitness_sorted;
end
% Update the position best flame obtained so far
Best_flame_score=fitness_sorted(1);
Best_flame_pos=sorted_population(1,:);
previous_population=Moth_pos;
previous_fitness=Moth_fitness;
% a linearly dicreases from -1 to -2 to calculate t in Eq. (3.12)
a=-1+Iteration*((-1)/Max_iteration);
for i=1:size(Moth_pos,1)
for j=1:size(Moth_pos,2)
if i<=Flame_no % Update the position of the moth with respect to its corresponsing flame
% D in Eq. (3.13)
distance_to_flame=abs(sorted_population(i,j)-Moth_pos(i,j));
b=1;
t=(a-1)*rand+1;
% Eq. (3.12)
Moth_pos(i,j)=distance_to_flame*cos(t.*2*pi)+sorted_population(i,j);
end
if i>Flame_no % Upaate the position of the moth with respct to one flame
% Eq. (3.13)
distance_to_flame=abs(sorted_population(i,j)-Moth_pos(i,j));
b=1;
t=(a-1)*rand+1;
% Eq. (3.12)
Moth_pos(i,j)=distance_to_flame*cos(t.*2*pi)+sorted_population(Flame_no,j);
end
end
end
Convergence_curve(Iteration)=Best_flame_score;
% Display the iteration and best optimum obtained so far
% if mod(Iteration,50)==0
% display(['At iteration ', num2str(Iteration), ' the best fitness is ', num2str(Best_flame_score)]);
% end
Iteration=Iteration+1;
end
data = [data;Best_flame_score];
GA优化算法:
function [Best_flame_score,Best_flame_pos,Convergence_curve]=GA(N,Max_iteration,lb,ub,dim,fobj,Positions)
%display('GA is optimizing your problem');
%Initialize the positions of GA
%GA_pos=initialization(N,dim,ub,lb);
GA_pos = Positions;
Convergence_curve=zeros(1,Max_iteration);
data = [];
Best_flame_score =1;
fitness_function= fobj;
population_size=N;
parent_number=dim;
mutation_rate=0.1;
maximal_generation=Max_iteration;
minimal_cost=1.0e-40;
cumulative_probabilities = cumsum((parent_number:-1:1) / sum(parent_number:-1:1)); % 1个长度为parent_number的数列
number_of_variables=dim;
% 最佳适应度
% 每一代的最佳适应度都先初始化为1
% Best_flame_score = ones(maximal_generation, 1);
% 精英
% 每一代的精英的参数值都先初始化为0
% Best_flame_pos = zeros(1, number_of_variables);
% 子女数量
% 种群数量 - 父母数量(父母即每一代中不发生改变的个体)
child_number = population_size - parent_number; % 每一代子女的数目
% 初始化种群
% population_size 对应矩阵的行,每一行表示1个个体,行数=个体数(种群数量)
% number_of_variables 对应矩阵的列,列数=参数个数(个体特征由这些参数表示)
population = GA_pos;
last_generation = 0; % 记录跳出循环时的代数
generation=1;
% Main loop
while generation
% 把population矩阵带入fitness_function函数计算
% cost = feval(fitness_function, population); % 计算所有个体的适应度(population_size*1的矩阵)
for j = 1:size(population)
cost(1,j)=feval(fitness_function,population(j,:));
end
% index记录排序后每个值原来的行数
[cost, index] = sort(cost); % 将适应度函数值从小到大排序
% index(1:parent_number)
% 前parent_number个cost较小的个体在种群population中的行数
% 选出这部分(parent_number个)个体作为父母,其实parent_number对应交叉概率
population = population(index(1:parent_number), :); % 先保留一部分较优的个体
% 可以看出population矩阵是不断变化的
% cost在经过前面的sort排序后,矩阵已经改变为升序的
% cost(1)即为本代的最佳适应度
Best_flame_score= cost(1); % 记录本代的最佳适应度
% population矩阵第一行为本代的精英个体
Best_flame_pos = population(1, :); % 记录本代的最优解(精英)
% 若本代的最优解已足够好,则停止演化
if Best_flame_score < minimal_cost;
last_generation = generation;
break;
end
% 交叉变异产生新的种群
% 染色体交叉开始
for child = 1:2:child_number % 步长为2是因为每一次交叉会产生2个孩子
% cumulative_probabilities长度为parent_number
% 从中随机选择2个父母出来 (child+parent_number)%parent_number
mother = find(cumulative_probabilities > rand, 1); % 选择一个较优秀的母亲
father = find(cumulative_probabilities > rand, 1); % 选择一个较优秀的父亲
% ceil(天花板)向上取整
% rand 生成一个随机数
% 即随机选择了一列,这一列的值交换
crossover_point = ceil(rand*number_of_variables); % 随机地确定一个染色体交叉点
% 假如crossover_point=3, number_of_variables=5
% mask1 = 1 1 1 0 0
% mask2 = 0 0 0 1 1
mask1 = [ones(1, crossover_point), zeros(1, number_of_variables - crossover_point)];
mask2 = not(mask1);
% 获取分开的4段染色体
% 注意是 .*
mother_1 = mask1 .* population(mother, :); % 母亲染色体的前部分
mother_2 = mask2 .* population(mother, :); % 母亲染色体的后部分
father_1 = mask1 .* population(father, :); % 父亲染色体的前部分
father_2 = mask2 .* population(father, :); % 父亲染色体的后部分
% 得到下一代
population(parent_number + child, :) = mother_1 + father_2; % 一个孩子
population(parent_number+child+1, :) = mother_2 + father_1; % 另一个孩子
end % 染色体交叉结束
% 染色体变异开始
% 变异种群
mutation_population = population(2:population_size, :); % 精英不参与变异,所以从2开始
number_of_elements = (population_size - 1) * number_of_variables; % 全部基因数目
number_of_mutations = ceil(number_of_elements * mutation_rate); % 变异的基因数目(基因总数*变异率)
% rand(1, number_of_mutations) 生成number_of_mutations个随机数(范围0-1)组成的矩阵(1*number_of_mutations)
% 数乘后,矩阵每个元素表示发生改变的基因的位置(元素在矩阵中的一维坐标)
mutation_points = ceil(number_of_elements * rand(1, number_of_mutations)); % 确定要变异的基因
% 被选中的基因都被一个随机数替代,完成变异
mutation_population(mutation_points) = rand(1, number_of_mutations); % 对选中的基因进行变异操作
population(2:population_size, :) = mutation_population; % 发生变异之后的种群
Convergence_curve(generation)=Best_flame_score;
% 染色体变异结束
% Display the iteration and best optimum obtained so far
% if mod(generation,50)==0
% % display(['At iteration ', num2str(generation), ' the best fitness is ', num2str(Best_flame_score)]);
% display(['At iteration ', num2str(generation)]);
% end
generation=generation+1;
end
data = [data;Best_flame_score];
BOA优化算法:
function [fmin,best_pos,Convergence_curve]=BOAL(n,N_iter,Lb,Ub,dim,fobj,Positions)
% n is the population size
% N_iter represnets total number of iterations
p=0.8; % probabibility switch
power_exponent=0.1;
sensory_modality=0.01;
%Initialize the positions of search agents
%Sol=initialization(n,dim,Ub,Lb);
Sol = Positions;
for i=1:n
Fitness(i)=fobj(Sol(i,:));
end
% Find the current best_pos
[fmin,I]=min(Fitness);
best_pos=Sol(I,:);
S=Sol;
% Start the iterations -- Butterfly Optimization Algorithm
for t=1:N_iter
for i=1:n % Loop over all butterflies/solutions
%Calculate fragrance of each butterfly which is correlated with objective function
Fnew=fobj(S(i,:));
FP=(sensory_modality*(Fnew^power_exponent));
%Global or local search
if rand
dis = rand * rand * best_pos - Sol(i,:); %Eq. (2) in paper
S(i,:)=Sol(i,:)+dis*FP;
else
% Find random butterflies in the neighbourhood
epsilon=rand;
JK=randperm(n);
dis=epsilon*epsilon*Sol(JK(1),:)-Sol(JK(2),:);
S(i,:)=Sol(i,:)+dis*FP; %Eq. (3) in paper
end
% Check if the simple limits/bounds are OK
S(i,:)=simplebounds(S(i,:),Lb,Ub);
% Evaluate new solutions
Fnew=fobj(S(i,:)); %Fnew represents new fitness values
% If fitness improves (better solutions found), update then
if (Fnew<=Fitness(i))
Sol(i,:)=S(i,:);
Fitness(i)=Fnew;
end
% Update the current global best_pos
if Fnew<=fmin
best_pos=S(i,:);
fmin=Fnew;
end
end
Convergence_curve(t,1)=fmin;
%Update sensory_modality
sensory_modality=sensory_modality_NEW(sensory_modality, N_iter);
end
% Boundary constraints
function s=simplebounds(s,Lb,Ub)
% Apply the lower bound
ns_tmp=s;
I=ns_tmp ns_tmp(I)=Lb;
% Apply the upper bounds
J=ns_tmp>Ub;
ns_tmp(J)=Ub;
% Update this new move
s=ns_tmp;
function y=sensory_modality_NEW(x,Ngen)
y=x+(0.025/(x*Ngen));