以优化SVM算法的参数c和g为例,对SA(模拟退火)算法MATLAB源码进行了逐行中文注解。
完整程序和示例文件地址:http://download.csdn.net/detail/u013337691/9644107
链接:http://pan.baidu.com/s/1i5G0gPB 密码:4ge8
% 使用模拟退火法寻优SVM中的参数c和g
% 使用METROPOLIS接受准则
%% 清空环境
tic % 计时
clear
clc
close all
format compact
%% 数据提取
% 载入测试数据wine,其中包含的数据为classnumber = 3,wine:178*13的矩阵,wine_labes:178*1的列向量
load wine.mat
% 选定训练集和测试集
% 将第一类的1-30,第二类的60-95,第三类的131-153做为训练集
train_wine = [wine(1:30,:);wine(60:95,:);wine(131:153,:)];
% 相应的训练集的标签也要分离出来
train_wine_labels = [wine_labels(1:30);wine_labels(60:95);wine_labels(131:153)];
% 将第一类的31-59,第二类的96-130,第三类的154-178做为测试集
test_wine = [wine(31:59,:);wine(96:130,:);wine(154:178,:)];
% 相应的测试集的标签也要分离出来
test_wine_labels = [wine_labels(31:59);wine_labels(96:130);wine_labels(154:178)];
%% 数据预处理
% 数据预处理,将训练集和测试集归一化到[0,1]区间
[mtrain,ntrain] = size(train_wine);
[mtest,ntest] = size(test_wine);
dataset = [train_wine;test_wine];
% mapminmax为MATLAB自带的归一化函数
[dataset_scale,ps] = mapminmax(dataset',0,1);
dataset_scale = dataset_scale';
train_wine = dataset_scale(1:mtrain,:);
test_wine = dataset_scale( (mtrain+1):(mtrain+mtest),: );
%% SA算法主程序
lb=[0.01,0.01]; % 参数取值下界
ub=[100,100]; % 参数取值上界
% 冷却表参数
MarkovLength=100; % 马可夫链长度
DecayScale=0.85; % 衰减参数
StepFactor=0.2; % Metropolis步长因子
Temperature0=8; % 初始温度
Temperatureend=3; % 最终温度
Boltzmann_con=1; % Boltzmann常数
AcceptPoints=0.0; % Metropolis过程中总接受点
% 随机初始化参数
range=ub-lb;
Par_cur=rand(size(lb)).*range+lb; % 用Par_cur表示当前解
Par_best_cur=Par_cur; % 用Par_best_cur表示当前最优解
Par_best=rand(size(lb)).*range+lb; % 用Par_best表示冷却中的最好解
% 每迭代一次退火(降温)一次,直到满足迭代条件为止
t=Temperature0;
itr_num=0; % 记录迭代次数
while t>Temperatureend
itr_num=itr_num+1;
t=DecayScale*t; % 温度更新(降温)
for i=1:MarkovLength
% 在此当前参数点附近随机选下一点
p=0;
while p==0
Par_new=Par_cur+StepFactor.*range.*(rand(size(lb))-0.5);
% 防止越界
if sum(Par_new>ub)+sum(Par_new0
p=1;
end
end
% 检验当前解是否为全局最优解
if (objfun_svm(Par_best,train_wine_labels,train_wine,test_wine_labels,test_wine)>...
objfun_svm(Par_new,train_wine_labels,train_wine,test_wine_labels,test_wine))
% 保留上一个最优解
Par_best_cur=Par_best;
% 此为新的最优解
Par_best=Par_new;
end
% Metropolis过程
if (objfun_svm(Par_cur,train_wine_labels,train_wine,test_wine_labels,test_wine)-...
objfun_svm(Par_new,train_wine_labels,train_wine,test_wine_labels,test_wine)>0)
% 接受新解
Par_cur=Par_new;
AcceptPoints=AcceptPoints+1;
else
changer=-1*(objfun_svm(Par_new,train_wine_labels,train_wine,test_wine_labels,test_wine)...
-objfun_svm(Par_cur,train_wine_labels,train_wine,test_wine_labels,test_wine))/Boltzmann_con*Temperature0;
p1=exp(changer);
if p1>rand
Par_cur=Par_new;
AcceptPoints=AcceptPoints+1;
end
end
end
end
%% 结果显示
disp(['最小值在点:',num2str(Par_best)]);
Objval_best= objfun_svm(Par_best,train_wine_labels,train_wine,test_wine_labels,test_wine);
disp(['最小值为:',num2str(Objval_best)]);
%% 显示运行时间
toc
%% SVM_Objective Function
function f=objfun_svm(cv,train_wine_labels,train_wine,test_wine_labels,test_wine)
% cv为长度为2的横向量,即SVM中参数c和v的值
cmd = [' -c ',num2str(cv(1)),' -g ',num2str(cv(2))];
model=svmtrain(train_wine_labels,train_wine,cmd); % SVM模型训练
[~,fitness]=svmpredict(test_wine_labels,test_wine,model); % SVM模型预测及其精度
f=1-fitness(1)/100; % 以分类预测错误率作为优化的目标函数值
(广告)欢迎扫描关注微信公众号:Genlovhyy的数据小站(Gnelovy212)