ELM极限学习机与BP神经网络两种预测方法对比
文章目录
- ELM极限学习机与BP神经网络两种预测方法对比
-
- **极限学习机**
-
- ELM
- elmtrain函数
- elmpredict函数
- **BP神经网络**
极限学习机
ELM
%%
clc
clear all
%%
a = xlsread('BP_shiwaituishinei'); %导入数据
n = randperm(size( a,1)) %产生随机数
a1 = a(n(1:800),:) %训练数据 输入集
a2 = a(n(800:end),:) %训练数据输入集
P_train = a1(:,(1:5))' %测试数据输入量
T_train = a1(:,6)' %输出量
P_test = a2(:,(1:5))' %输入量
T_test = a2(:,6)' %输出量
%% 归一化
% 训练集
[Pn_train,inputps] = mapminmax(P_train,-1,1);
Pn_test = mapminmax('apply',P_test,inputps);
% 测试集
[Tn_train,outputps] = mapminmax(T_train,-1,1);
Tn_test = mapminmax('apply',T_test,outputps);
tic
%% ELM创建/训练
[IW,B,LW,TF,TYPE] = elmtrain(Pn_train,Tn_train,9,'sig',0);
%% ELM仿真测试
Tn_sim = elmpredict(Pn_test,IW,B,LW,TF,TYPE);
% 反归一化
T_sim = mapminmax('reverse',Tn_sim,outputps);
toc
%% 结果对比
result = [T_test' T_sim'];
% 均方误差
E = mse(T_sim - T_test)
% 决定系数
N = length(T_test);
R2 = (N*sum(T_sim.*T_test)-sum(T_sim)*sum(T_test))^2/((N*sum((T_sim).^2)-(sum(T_sim))^2)*(N*sum((T_test).^2)-(sum(T_test))^2))
%% 绘图
figure
subplot(2,1,1)
plot(1:length(T_test),T_test,'r-*')
hold on
plot(1:length(T_sim),T_sim,'b--+')
xlabel('测试集样本编号')
ylabel('测试集输出')
title('ELM测试集输出')
legend('期望输出','预测输出')
subplot(2,1,2)
hold on
plot(1:length(T_test),T_test-T_sim,'r-*')
xlabel('测试集样本编号')
ylabel('绝对误差')
title('ELM测试集预测误差')
%%
rmse = sqrt(mean((T_sim-T_test).^2));
meap = mean(abs((T_test - T_sim)./T_test))*100;
elmtrain函数
function [IW,B,LW,TF,TYPE] = elmtrain(P,T,N,TF,TYPE)
% ELMTRAIN Create and Train a Extreme Learning Machine
% Syntax
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,N,TF,TYPE)
% Description
% Input
% P - Input Matrix of Training Set (R*Q)
% T - Output Matrix of Training Set (S*Q)
% N - Number of Hidden Neurons (default = Q)
% TF - Transfer Function:
% 'sig' for Sigmoidal function (default)
% 'sin' for Sine function
% 'hardlim' for Hardlim function
% TYPE - Regression (0,default) or Classification (1)
% Output
% IW - Input Weight Matrix (N*R)
% B - Bias Matrix (N*1)
% LW - Layer Weight Matrix (N*S)
% Example
% Regression:
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,20,'sig',0)
% Y = elmtrain(P,IW,B,LW,TF,TYPE)
% Classification
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,20,'sig',1)
% Y = elmtrain(P,IW,B,LW,TF,TYPE)
% See also ELMPREDICT
% Yu Lei,11-7-2010
% Copyright www.matlabsky.com
% $Revision:1.0 $
if nargin < 2
error('ELM:Arguments','Not enough input arguments.');
end
if nargin < 3
N = size(P,2);
end
if nargin < 4
TF = 'sig';
end
if nargin < 5
TYPE = 0;
end
if size(P,2) ~= size(T,2)
error('ELM:Arguments','The columns of P and T must be same.');
end
[R,Q] = size(P);
if TYPE == 1
T = ind2vec(T);
end
[S,Q] = size(T);
% Randomly Generate the Input Weight Matrix
IW = rand(N,R) * 2 - 1;
% Randomly Generate the Bias Matrix
B = rand(N,1);
BiasMatrix = repmat(B,1,Q);
% Calculate the Layer Output Matrix H
tempH = IW * P + BiasMatrix;
switch TF
case 'sig'
H = 1 ./ (1 + exp(-tempH));
case 'sin'
H = sin(tempH);
case 'hardlim'
H = hardlim(tempH);
end
% Calculate the Output Weight Matrix
LW = pinv(H') * T';
elmpredict函数
function Y = elmpredict(P,IW,B,LW,TF,TYPE)
% ELMPREDICT Simulate a Extreme Learning Machine
% Syntax
% Y = elmtrain(P,IW,B,LW,TF,TYPE)
% Description
% Input
% P - Input Matrix of Training Set (R*Q)
% IW - Input Weight Matrix (N*R)
% B - Bias Matrix (N*1)
% LW - Layer Weight Matrix (N*S)
% TF - Transfer Function:
% 'sig' for Sigmoidal function (default)
% 'sin' for Sine function
% 'hardlim' for Hardlim function
% TYPE - Regression (0,default) or Classification (1)
% Output
% Y - Simulate Output Matrix (S*Q)
% Example
% Regression:
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,20,'sig',0)
% Y = elmtrain(P,IW,B,LW,TF,TYPE)
% Classification
% [IW,B,LW,TF,TYPE] = elmtrain(P,T,20,'sig',1)
% Y = elmtrain(P,IW,B,LW,TF,TYPE)
% See also ELMTRAIN
% Yu Lei,11-7-2010
% Copyright www.matlabsky.com
% $Revision:1.0 $
if nargin < 6
error('ELM:Arguments','Not enough input arguments.');
end
% Calculate the Layer Output Matrix H
Q = size(P,2);
BiasMatrix = repmat(B,1,Q);
tempH = IW * P + BiasMatrix;
switch TF
case 'sig'
H = 1 ./ (1 + exp(-tempH));
case 'sin'
H = sin(tempH);
case 'hardlim'
H = hardlim(tempH);
end
% Calculate the Simulate Output
Y = (H' * LW)';
if TYPE == 1
temp_Y = zeros(size(Y));
for i = 1:size(Y,2)
[max_Y,index] = max(Y(:,i));
temp_Y(index,i) = 1;
end
Y = vec2ind(temp_Y);
end
BP神经网络
clear
clc
%%
a = xlsread('BP_shiwaituishinei'); %导入数据
n = randperm(size( a,1)) %产生随机数
a1 = a(n(1:800),:) %训练数据 输入集
a2 = a(n(800:end),:) %训练数据输入集
P = a1(:,(1:5))' %测试数据输入量
T = a1(:,6)' %输出量
%数据归一化处理
[p1,minp,maxp,t1,mint,maxt] = premnmx(P,T)
%创建网络 中间层神经元个数4-13
net=newff(minmax(p1),[7,8,1],{'tansig','tansig','purelin'},'trainlm')
%设置训练次数
net.trainparam.epochs =8000;
%学习率
net.trainparam.lr = 0.05;
%设置目标误差
net.trainParam.goal= 1e-3;
%训练网络
[net,tr]=train(net,p1,t1);
%%
%测试数据
M1 = a2(:,(1:5))' %输入量
N1 = a2(:,6)' %输出量
M3 = tramnmx(M1, minp, maxp)
M4 = sim(net,M3)
M5 = postmnmx(M4,mint,maxt)
t1 = [1:240]
%%
%预测数据与实际数据作图
%figure 1
subplot(2,1,1)
hold on
plot(t1,N1,'r-*',t1,M5,'b--+')
legend('原始数据','预测数据')
title('BP网络温度预测')
xlabel('数据序号')
ylabel('温度值单位 ℃')
%figure 2
subplot(2,1,2)
hold on
error = M5 -N1
plot(t1,error,'-*')
title('BP网络预测误差')
ylabel('误差')
xlabel('样本')
%%
rmse = sqrt(mean((M5-N1).^2));
meap = mean(abs((N1 - M5)./N1))*100;