MATLAB Deep Learning toolbox安装,改写后可以画每次迭代的损失值和预测准确率

 一、安装

建议大家更新MATLAB2022b,安装时就可以安装深度学习工具包,如果是之前的版本,可以通过以下方式安装。

1、GitHub下载deep Learning toolbox: 

https://ithub.com/rasmusbergpalm/DeepLearnToolbox

2、解压后的deep Learning toolbox文件夹(自动命名为DeepLearnToolbox-master)放到matlab安装根目录的toobox文件夹里。
3、添加路径,在命令行输入addpath(genpath(‘D:\MATLAB\toolbox\DeepLearnToolbox-master’)),这个路径要根据自己的安装位置修改。然后点击主页,点击设置路径,点击保存,每次开机就可以直接调用这个工具箱的函数了。

二、改写nntrain

工具箱中的原文件如下:

function [nn, L]  = nntrain(nn, train_x, train_y, opts, val_x, val_y)
%NNTRAIN trains a neural net
% [nn, L] = nnff(nn, x, y, opts) trains the neural network nn with input x and
% output y for opts.numepochs epochs, with minibatches of size
% opts.batchsize. Returns a neural network nn with updated activations,
% errors, weights and biases, (nn.a, nn.e, nn.W, nn.b) and L, the sum
% squared error for each training minibatch.

assert(isfloat(train_x), 'train_x must be a float');
assert(nargin == 4 || nargin == 6,'number ofinput arguments must be 4 or 6')

loss.train.e               = [];
loss.train.e_frac          = [];
loss.val.e                 = [];
loss.val.e_frac            = [];
opts.validation = 0;
if nargin == 6
    opts.validation = 1;
end

fhandle = [];
if isfield(opts,'plot') && opts.plot == 1
    fhandle = figure();
end

m = size(train_x, 1);

batchsize = opts.batchsize;
numepochs = opts.numepochs;

numbatches = m / batchsize;

assert(rem(numbatches, 1) == 0, 'numbatches must be a integer');

L = zeros(numepochs*numbatches,1);
n = 1;
for i = 1 : numepochs
    tic;
    
    kk = randperm(m);
    for l = 1 : numbatches
        batch_x = train_x(kk((l - 1) * batchsize + 1 : l * batchsize), :);
        
        %Add noise to input (for use in denoising autoencoder)
        if(nn.inputZeroMaskedFraction ~= 0)
            batch_x = batch_x.*(rand(size(batch_x))>nn.inputZeroMaskedFraction);
        end
        
        batch_y = train_y(kk((l - 1) * batchsize + 1 : l * batchsize), :);
        
        nn = nnff(nn, batch_x, batch_y);
        nn = nnbp(nn);
        nn = nnapplygrads(nn);
        
        L(n) = nn.L;
        
        n = n + 1;
    end
    
    t = toc;

    if opts.validation == 1
        loss = nneval(nn, loss, train_x, train_y, val_x, val_y);
        str_perf = sprintf('; Full-batch train mse = %f, val mse = %f', loss.train.e(end), loss.val.e(end));
    else
        loss = nneval(nn, loss, train_x, train_y);
        str_perf = sprintf('; Full-batch train err = %f', loss.train.e(end));
    end
    if ishandle(fhandle)
        nnupdatefigures(nn, fhandle, loss, opts, i);
    end
        
    disp(['epoch ' num2str(i) '/' num2str(opts.numepochs) '. Took ' num2str(t) ' seconds' '. Mini-batch mean squared error on training set is ' num2str(mean(L((n-numbatches):(n-1)))) str_perf]);
    nn.learningRate = nn.learningRate * nn.scaling_learningRate;
end
end

上面文件中,L是训练过程中的损失,每次迭代(epoch),计算每个批样本(batch)的损失值,这里对其进行改写,使其输出每次迭代后所有训练样本的损失值,以及预测测试集的准确率。

1、首先我们改变函数参数个数。

function [nn,Loss,accuracy, L]  = nntrain(nn, train_x, train_y, opts,test_x,test_y,val_x, val_y)

assert(nargin == 4 || nargin == 6|| nargin == 8,'number ofinput arguments must be 4 or 6 or 8')

if nargin == 8   
    opts.validation = 1;
end            

if nargin == 6   
    opts.test = 1;
end

参数中加入了测试集的input和labels,这里修改成输入参数为8个时,opts.validation=1。

增加一个opts.test参数。

给损失值和准确率分配空间。

Loss=zeros(numepochs,1);
accuracy=zeros(numepochs,1);

loss_batch(l)=nn.L;%计算一次迭代过程中,每个batch的损失

Loss(i)=sum(loss_batch)/numbatches;计算所有训练集的损失

下面我们判断测试集输入参数是否为[ ],不为[ ]时,计算预测准确率。

    if opts.test==1
        if isempty(test_x)||isempty(test_y)
            opts.test=0;
        else
            [er, bad] = nntest(nn, test_x, test_y);
            accuracy(i)=1-er;
        end
    end

以下是修改后的nntrain函数。

function [nn,Loss,accuracy, L]  = nntrain(nn, train_x, train_y, opts,test_x,test_y,val_x, val_y)
%NNTRAIN trains a neural net
% [nn, L] = nnff(nn, x, y, opts) trains the neural network nn with input x and
% output y for opts.numepochs epochs, with minibatches of size
% opts.batchsize. Returns a neural network nn with updated activations,
% errors, weights and biases, (nn.a, nn.e, nn.W, nn.b) and L, the sum
% squared error for each training minibatch.

assert(isfloat(train_x), 'train_x must be a float');
assert(nargin == 4 || nargin == 6|| nargin == 8,'number ofinput arguments must be 4 or 6 or 8')

loss.train.e               = [];
loss.train.e_frac          = [];
loss.val.e                 = [];
loss.val.e_frac            = [];
opts.validation = 0;
opts.test = 0;
if nargin == 8   
    opts.validation = 1;
end

if nargin == 6   
    opts.test = 1;
end
fhandle = [];
if isfield(opts,'plot') && opts.plot == 1
    fhandle = figure();
end

m = size(train_x, 1);

batchsize = opts.batchsize;
numepochs = opts.numepochs;

numbatches = m / batchsize;

assert(rem(numbatches, 1) == 0, 'numbatches must be a integer');

L = zeros(numepochs*numbatches,1);
n = 1;
Loss=zeros(numepochs,1);
accuracy=zeros(numepochs,1);
for i = 1 : numepochs
    tic;
    loss_batch=zeros(numbatches,1);
    kk = randperm(m);
    for l = 1 : numbatches
        batch_x = train_x(kk((l - 1) * batchsize + 1 : l * batchsize), :);
        
        %Add noise to input (for use in denoising autoencoder)
        if(nn.inputZeroMaskedFraction ~= 0)
            batch_x = batch_x.*(rand(size(batch_x))>nn.inputZeroMaskedFraction);
        end
        
        batch_y = train_y(kk((l - 1) * batchsize + 1 : l * batchsize), :);
        
        nn = nnff(nn, batch_x, batch_y);
        nn = nnbp(nn);
        nn = nnapplygrads(nn);
        
        L(n) = nn.L;
        n = n + 1;
        loss_batch(l)=nn.L;
    end
    t = toc;
    
    Loss(i)=sum(loss_batch)/numbatches;
    
    if opts.test==1
        if isempty(test_x)||isempty(test_y)
            opts.test=0;
        else
            [er, bad] = nntest(nn, test_x, test_y);
            accuracy(i)=1-er;
        end
    end
    
    if opts.validation == 1
        loss = nneval(nn, loss, train_x, train_y, val_x, val_y);
        str_perf = sprintf('; Full-batch train mse = %f, val mse = %f', loss.train.e(end), loss.val.e(end));
    else
        loss = nneval(nn, loss, train_x, train_y);
        str_perf = sprintf('; Full-batch train err = %f', loss.train.e(end));
    end
    if ishandle(fhandle)
        nnupdatefigures(nn, fhandle, loss, opts, i);
    end
    
        
    disp(['epoch ' num2str(i) '/' num2str(opts.numepochs) '. Took ' num2str(t) ' seconds' '. Mini-batch mean squared error on training set is ' num2str(mean(L((n-numbatches):(n-1)))) str_perf]);
    nn.learningRate = nn.learningRate * nn.scaling_learningRate;
end
end

你可能感兴趣的:(MATLAB,Deep,Learning,matlab,deep,learning)