没事儿 改去吧 你数据集有问题 重新复制粘贴一行行改地址去吧
close all
clear
clc
warning off all;
addpath('./util');
addpath(('E:\code\vlfeat-0.9.21-bin\vlfeat-0.9.21\toolbox'));
vl_setup
addpath(('./rstEval'));
addpath(['./trackers/VIVID_Tracker'])
seqs=configSeqs;
trackers=configTrackers;
shiftTypeSet = {'left','right','up','down','topLeft','topRight','bottomLeft','bottomRight','scale_8','scale_9','scale_11','scale_12'};
evalType='TRE'; %'OPE','SRE','TRE'
diary(['./tmp/' evalType '.txt']);
numSeq=length(seqs);
numTrk=length(trackers);
finalPath = ['./results/results_' evalType '_CVPR13/'];
if ~exist(finalPath,'dir')
mkdir(finalPath);
end
tmpRes_path = ['./tmp/' evalType '/'];
bSaveImage=0;
if ~exist(tmpRes_path,'dir')
mkdir(tmpRes_path);
end
pathAnno = './anno/';
for idxSeq=1:length(seqs)
s = seqs{idxSeq};
% if ~strcmp(s.name, 'coke')
% continue;
% end
s.len = s.endFrame - s.startFrame + 1;
s.s_frames = cell(s.len,1);
nz = strcat('%0',num2str(s.nz),'d'); %number of zeros in the name of image
for i=1:s.len
image_no = s.startFrame + (i-1);
id = sprintf(nz,image_no);
s.s_frames{i} = strcat(s.path,id,'.',s.ext);
end
img = imread(s.s_frames{1});
[imgH,imgW,ch]=size(img);
rect_anno = dlmread([pathAnno s.name '.txt']);
numSeg = 20;
[subSeqs, subAnno]=splitSeqTRE(s,numSeg,rect_anno);
switch evalType
case 'SRE'
subS = subSeqs{1};
subA = subAnno{1};
subSeqs=[];
subAnno=[];
r=subS.init_rect;
for i=1:length(shiftTypeSet)
subSeqs{i} = subS;
shiftType = shiftTypeSet{i};
subSeqs{i}.init_rect=shiftInitBB(subS.init_rect,shiftType,imgH,imgW);
subSeqs{i}.shiftType = shiftType;
subAnno{i} = subA;
end
case 'OPE'
subS = subSeqs{1};
subSeqs=[];
subSeqs{1} = subS;
subA = subAnno{1};
subAnno=[];
subAnno{1} = subA;
otherwise
end
for idxTrk=1:numTrk
t = trackers{idxTrk};
% if ~strcmp(t.name, 'LSK')
% continue;
% end
% validate the results
if exist([finalPath s.name '_' t.name '.mat'])
load([finalPath s.name '_' t.name '.mat']);
bfail=checkResult(results, subAnno);
if bfail
disp([s.name ' ' t.name]);
end
continue;
end
switch t.name
case {'VTD','VTS'}
continue;
end
results = [];
for idx=1:length(subSeqs)
disp([num2str(idxTrk) '_' t.name ', ' num2str(idxSeq) '_' s.name ': ' num2str(idx) '/' num2str(length(subSeqs))])
rp = [tmpRes_path s.name '_' t.name '_' num2str(idx) '/'];
if bSaveImage&~exist(rp,'dir')
mkdir(rp);
end
subS = subSeqs{idx};
subS.name = [subS.name '_' num2str(idx)];
% subS.s_frames = subS.s_frames(1:20);
% subS.len=20;
% subS.endFrame=subS.startFrame+subS.len-1;
funcName = ['res=run_' t.name '(subS, rp, bSaveImage);'];
try
switch t.name
case {'VR','TM','RS','PD','MS'}
otherwise
cd(['./trackers/' t.name]);
addpath(genpath('./'))
end
eval(funcName);
switch t.name
case {'VR','TM','RS','PD','MS'}
otherwise
rmpath(genpath('./'))
cd('../../');
end
if isempty(res)
results = [];
break;
end
catch err
disp('error');
rmpath(genpath('./'))
cd('../../');
res=[];
continue;
end
res.len = subS.len;
res.annoBegin = subS.annoBegin;
res.startFrame = subS.startFrame;
switch evalType
case 'SRE'
res.shiftType = shiftTypeSet{idx};
end
results{idx} = res;
end
save([finalPath s.name '_' t.name '.mat'], 'results');
end
end
figure
t=clock;
t=uint8(t(2:end));
disp([num2str(t(1)) '/' num2str(t(2)) ' ' num2str(t(3)) ':' num2str(t(4)) ':' num2str(t(5))]);
最后跑通的perfPlot如下
clear
close all;
clc
addpath('./util');
addpath(('./rstEval'));
attPath = '.\anno\att\'; % The folder that contains the annotation files for sequence attributes
attName={'illumination variation' 'out-of-plane rotation' 'scale variation' 'occlusion' 'deformation' 'motion blur' 'fast motion' 'in-plane rotation' 'out of view' 'background clutter' 'low resolution'};
attFigName={'illumination_variations' 'out-of-plane_rotation' 'scale_variations' 'occlusions' 'deformation' 'blur' 'abrupt_motion' 'in-plane_rotation' 'out-of-view' 'background_clutter' 'low_resolution'};
plotDrawStyleAll={ struct('color',[1,0,0],'lineStyle','-'),...
struct('color',[0,1,0],'lineStyle','-'),...
struct('color',[0,0,1],'lineStyle','-'),...
struct('color',[0,0,0],'lineStyle','-'),...% struct('color',[1,1,0],'lineStyle','-'),...%yellow
struct('color',[1,0,1],'lineStyle','-'),...%pink
struct('color',[0,1,1],'lineStyle','-'),...
struct('color',[0.5,0.5,0.5],'lineStyle','-'),...%gray-25%
struct('color',[136,0,21]/255,'lineStyle','-'),...%dark red
struct('color',[255,127,39]/255,'lineStyle','-'),...%orange
struct('color',[0,162,232]/255,'lineStyle','-'),...%Turquoise
struct('color',[163,73,164]/255,'lineStyle','-'),...%purple %%%%%%%%%%%%%%%%%%%%
struct('color',[1,0,0],'lineStyle','--'),...
struct('color',[0,1,0],'lineStyle','--'),...
struct('color',[0,0,1],'lineStyle','--'),...
struct('color',[0,0,0],'lineStyle','--'),...% struct('color',[1,1,0],'lineStyle','--'),...%yellow
struct('color',[1,0,1],'lineStyle','--'),...%pink
struct('color',[0,1,1],'lineStyle','--'),...
struct('color',[0.5,0.5,0.5],'lineStyle','--'),...%gray-25%
struct('color',[136,0,21]/255,'lineStyle','--'),...%dark red
struct('color',[255,127,39]/255,'lineStyle','--'),...%orange
struct('color',[0,162,232]/255,'lineStyle','--'),...%Turquoise
struct('color',[163,73,164]/255,'lineStyle','--'),...%purple %%%%%%%%%%%%%%%%%%%
struct('color',[1,0,0],'lineStyle','-.'),...
struct('color',[0,1,0],'lineStyle','-.'),...
struct('color',[0,0,1],'lineStyle','-.'),...
struct('color',[0,0,0],'lineStyle','-.'),...% struct('color',[1,1,0],'lineStyle',':'),...%yellow
struct('color',[1,0,1],'lineStyle','-.'),...%pink
struct('color',[0,1,1],'lineStyle','-.'),...
struct('color',[0.5,0.5,0.5],'lineStyle','-.'),...%gray-25%
struct('color',[136,0,21]/255,'lineStyle','-.'),...%dark red
struct('color',[255,127,39]/255,'lineStyle','-.'),...%orange
struct('color',[0,162,232]/255,'lineStyle','-.'),...%Turquoise
struct('color',[163,73,164]/255,'lineStyle','-.'),...%purple
};
plotDrawStyle10={ struct('color',[1,0,0],'lineStyle','-'),...
struct('color',[0,1,0],'lineStyle','--'),...
struct('color',[0,0,1],'lineStyle',':'),...
struct('color',[0,0,0],'lineStyle','-'),...% struct('color',[1,1,0],'lineStyle','-'),...%yellow
struct('color',[1,0,1],'lineStyle','--'),...%pink
struct('color',[0,1,1],'lineStyle',':'),...
struct('color',[0.5,0.5,0.5],'lineStyle','-'),...%gray-25%
struct('color',[136,0,21]/255,'lineStyle','--'),...%dark red
struct('color',[255,127,39]/255,'lineStyle',':'),...%orange
struct('color',[0,162,232]/255,'lineStyle','-'),...%Turquoise
};
seqs=configSeqs;
trackers=configTrackers;
% seqs = seqs(1:10);
% trackers = trackers(1:10);
numSeq=length(seqs);
numTrk=length(trackers);
nameTrkAll=cell(numTrk,1);
for idxTrk=1:numTrk
t = trackers{idxTrk};
nameTrkAll{idxTrk}=t.namePaper;
end
nameSeqAll=cell(numSeq,1);
numAllSeq=zeros(numSeq,1);
att=[];
for idxSeq=1:numSeq
s = seqs{idxSeq};
nameSeqAll{idxSeq}=s.name;
s.len = s.endFrame - s.startFrame + 1;
numAllSeq(idxSeq) = s.len;
att(idxSeq,:)=load([attPath s.name '.txt']);
end
attNum = size(att,2);
figPath = '.\figs\overall\';
perfMatPath = '.\perfMat\overall\';
if ~exist(figPath,'dir')
mkdir(figPath);
end
metricTypeSet = {'error', 'overlap'};
% evalTypeSet = {'SRE', 'TRE', 'OPE'};
evalTypeSet ='TRE';%我把这儿改了!!!!!!!
rankingType = 'AUC';%AUC, threshod
rankingType = 'threshold';%AUC, threshold
rankNum = 10;%number of plots to show
if rankNum == 10
plotDrawStyle=plotDrawStyle10;
else
plotDrawStyle=plotDrawStyleAll;
end
thresholdSetOverlap = 0:0.05:1;
thresholdSetError = 0:50;
for i=1:length(metricTypeSet)
metricType = metricTypeSet{i};%error,overlap
switch metricType
case 'overlap'
thresholdSet = thresholdSetOverlap;
rankIdx = 11;
xLabelName = 'Overlap threshold';
yLabelName = 'Success rate';
case 'error'
thresholdSet = thresholdSetError;
rankIdx = 21;
xLabelName = 'Location error threshold';
yLabelName = 'Precision';
end
if strcmp(metricType,'error')&strcmp(rankingType,'AUC')
continue;
end
tNum = length(thresholdSet);
% for j=1:length(evalTypeSet)
evalType = evalTypeSet;%SRE, TRE, OPE
plotType = [metricType '_' evalType];
switch metricType
case 'overlap'
titleName = ['Success plots of ' evalType];
case 'error'
titleName = ['Precision plots of ' evalType];
end
dataName = [perfMatPath 'aveSuccessRatePlot_' num2str(numTrk) 'alg_' plotType '.mat'];
% If the performance Mat file, dataName, does not exist, it will call
% genPerfMat to generate the file.
if ~exist(dataName)
genPerfMat(seqs, trackers, evalType, nameTrkAll, perfMatPath);
end
load(dataName);
numTrk = size(aveSuccessRatePlot,1);
if rankNum > numTrk | rankNum <0
rankNum = numTrk;
end
figName= [figPath 'quality_plot_' plotType '_' rankingType];
idxSeqSet = 1:length(seqs);
%%
%
% PREFORMATTED
% TEXT
%
% draw and save the overall performance plot
plotDrawSave(numTrk,plotDrawStyle,aveSuccessRatePlot,idxSeqSet,rankNum,rankingType,rankIdx,nameTrkAll,thresholdSet,titleName, xLabelName,yLabelName,figName,metricType);
% draw and save the performance plot for each attribute
attTrld = 0;
for attIdx=1:attNum
idxSeqSet=find(att(:,attIdx)>attTrld);
if length(idxSeqSet) < 2
continue;
end
disp([attName{attIdx} ' ' num2str(length(idxSeqSet))])
figName=[figPath attFigName{attIdx} '_' plotType '_' rankingType];
titleName = ['Plots of ' evalType ': ' attName{attIdx} ' (' num2str(length(idxSeqSet)) ')'];
switch metricType
case 'overlap'
titleName = ['Success plots of ' evalType ' - ' attName{attIdx} ' (' num2str(length(idxSeqSet)) ')'];
case 'error'
titleName = ['Precision plots of ' evalType ' - ' attName{attIdx} ' (' num2str(length(idxSeqSet)) ')'];
end
plotDrawSave(numTrk,plotDrawStyle,aveSuccessRatePlot,idxSeqSet,rankNum,rankingType,rankIdx,nameTrkAll,thresholdSet,titleName, xLabelName,yLabelName,figName,metricType);
end
end
% end
修改的跟踪代码Dsiam如下
function results= run_main(seq,res_path,bSaveIamge)
%%
% This code is a demo for DSiam, a online updated deep tracker for fast tracking
% If you use this code,please cite:
% Qing Guo, Wei Feng, Ce Zhou, Rui Huang, Liang Wan, Song Wang.
% Learning Dynamic Siamese Network for Visual Object Tracking. In ICCV 2017.
%
% Qing Guo, 2017.
%%
%===================================%
% Path initial setting
%===================================%
netname = 'siamfc';
% '1res' denotes the multi-layer DSiam (DSiamM in paper) and uses two layers for tracking
% '0res' denotes the single-layer DSiam (DSiam in paper) and uses the last layer for tracking
nettype = '0res';
rootdir = '../DSIAM/';
addpath(genpath([rootdir,'/matconvnet/matlab/']));
vl_setupnn ;
addpath([rootdir,'utils']);
addpath([rootdir,'models']);
%===================================%
% Data initialization
%===================================%
s_frames = seq.s_frames;
initrect = seq.init_rect;
% read img
imgs = vl_imreadjpeg(s_frames,'numThreads', 12);
img = imgs{1};
if size(img,3)==1%表示图像是灰度图 如果==3 为RGB彩色图
img = repmat(img,1,1,3);% x = repmat(img,[1,1,3]);%将单通道图片转换为三通道图片
end
nFrames = numel(s_frames);%输出s_frames像素个数
state = obj_initialize(img, initrect);%这里是下面的obj_initialize函数 需要参数图像/初始矩形
state.seq.frame = 1;
state.seq.nFrames = nFrames;
state.seq.seqname = seq.name;
state.seq.time = 0;
state.seq.isfail = 0;
state.seq.scores = zeros(nFrames);
state.seq.frame = 1; %帧数序列
state.seq.scores(1) = 10;
% default
if nargin<4 %输入参数个数小于4
netname = 'vgg19';
nettype = '1res';
end
[state, opts]...
= fcnet_init(img, state);
%fcnet_init函数完成网络孪生网络的初始化和相关参数设置等工作
%fcnet_init函数在utils文件夹中
res=[initrect];
duration = 0;
for it = 2:nFrames %从第二帧开始
state.seq.frame = it;
fprintf('Processing frame %d/%d\n... ', state.seq.frame, nFrames);
% **********************************
% VOT: Get next frame
% **********************************
img = imgs{it};
if size(img,3)==1
img = repmat(img,1,1,3);
end
state = fcn_update(state, img,opts);
initstate = [state.obj.targetLoc];
duration = state.seq.time;
res = [res; initstate];
%==================================
%Display result
%==================================
% if bSaveImg
%-------------------------------Show the tracking results
imshow(uint8(img));
rectangle('Position',initstate,'LineWidth',1,'EdgeColor','r');
hold on;
text(5, 18, strcat('#',num2str(it)), 'Color','y', 'FontWeight','bold', 'FontSize',20);
set(gca,'position',[0 0 1 1]);
pause(0.01);
hold off;
% saveas(gcf,[res_path num2str(i) '.jpg'])
% imwrite(frame2im(getframe(gcf)),[res_path num2str(it)
% '.jpg']);这两句是每帧截图的
% end
end
results.res=res;
results.type='rect';
results.fps=(seq.len)/duration;
disp(['fps: ' num2str(results.fps)])
end
function [state] = obj_initialize(I, region, varargin)
gray = double(I(:,:,1));
[height, width] = size(gray);
% If the provided region is a polygon 如果提供的矩阵是多边形...
if numel(region) > 4
x1 = round(min(region(1:2:end)));
x2 = round(max(region(1:2:end)));
y1 = round(min(region(2:2:end)));
y2 = round(max(region(2:2:end)));
region = round([x1, y1, x2 - x1, y2 - y1]);
else
region = round([round(region(1)), round(region(2)), ...
round(region(1) + region(3)) - round(region(1)), ...
round(region(2) + region(4)) - round(region(2))]);
end;
%画框框
x1 = max(0, region(1));
y1 = max(0, region(2));
x2 = min(width-1, region(1) + region(3) - 1);
y2 = min(height-1, region(2) + region(4) - 1);
%那这一块是干嘛的呢?
state.obj.pos = [y1 + y2 + 1, x1 + x2 + 1] / 2;
state.obj.targetsz = [y2-y1+1,x2-x1+1];
state.obj.base_targetsz = [y2-y1+1,x2-x1+1];
state.obj.targetLoc = [x1, y1, state.obj.targetsz([2,1])];
state.obj.change_alphaf = [];
state.obj.change_featf = [];
end
function state= fcn_update(state,img,opts)
%load state
targetsz = state.obj.targetsz.*opts.targetszrate;
pos = state.obj.pos;
s_x = state.obj.s_x;
corrfeat = state.obj.corrfeat;
isfail = state.seq.isfail;
net_conv = state.net.net_conv;
net_obj = state.net.net_obj;
%load params
instanceSize = opts.instanceSize;
window = opts.window;
min_s_x = opts.min_s_x;
max_s_x = opts.max_s_x;
avgChans = opts.avgChans;
scales = opts.scales;
scaledInstance = s_x .* scales;
scaledTarget = [targetsz(1) .* scales; targetsz(2) .* scales];
tic;
% extract scaled crops for search region x at previous target position 在前一目标位置提取搜索区域X的缩放作物
x_crops = make_scale_pyramid(img, pos, scaledInstance, instanceSize, avgChans,opts);%, opts,saliency_map);显著性地图
%代码提取了三种尺度下的以目标为中心的图像patch块。然后使用孪生网络中用于跟踪的网络(网络结构如图1所示)进行一次前向传播,得到新目标位置等。到此完成跟踪当前帧,只需要不断迭代这一步骤即可。
% evaluate the offline-trained network for exemplar x features评估示例性X特征的离线训练网络
[newTargetPosition, newScale, score,responseMap,scorePos] = tracker_eval(net_conv,round(s_x), ...
corrfeat, x_crops, pos, window, opts);
pos = gather(newTargetPosition);
if opts.isupdate
% score
if score >0
wc_z = targetsz(2) + opts.contextAmount*sum(targetsz);
hc_z = targetsz(1) + opts.contextAmount*sum(targetsz);
s_z = sqrt(wc_z*hc_z);
[z_crop, ~] = get_subwindow_tracking(img, pos, ...
[opts.exemplarSize opts.exemplarSize], [round(s_z) round(s_z)], opts.avgChans,opts.averageImage);
z_crop = gpuArray(single(z_crop));
net_obj.eval({opts.netobj_input, z_crop});
state.seq.scores(state.seq.frame) = score;
tcorrfeat{1} = net_obj.vars(opts.obj_feat_id(1)).value;
tcorrfeat{2} = net_obj.vars(opts.obj_feat_id(2)).value;
% updating the target variation transformation更新目标变化转换
if opts.vartransform
net_conv = update_v(net_conv,corrfeat,tcorrfeat,opts);
end
% updating the background suppression transformation
if opts.backsupression
[x_back(:,:,:,1), ~] = get_subwindow_tracking(gather(img), pos,...
[instanceSize instanceSize], [round(scaledInstance(newScale)) round(scaledInstance(newScale))], avgChans);
x_back(:,:,:,2) = x_back(:,:,:,1).* opts.saliency_window;%state.obj.x_crop;%
net_obj.eval({opts.netobj_input, gpuArray(x_back)});
tcorrfeat{1} = net_obj.vars(opts.obj_feat_id(1)).value;
tcorrfeat{2} = net_obj.vars(opts.obj_feat_id(2)).value;
net_conv = update_w(net_conv,tcorrfeat,opts);
end
% scale damping and saturation比例抑制与饱和度
if isfail
wc_z = targetsz(2) + opts.contextAmount*sum(targetsz);
hc_z = targetsz(1) + opts.contextAmount*sum(targetsz);
s_z = sqrt(wc_z*hc_z);
scale_z = opts.exemplarSize / s_z;
d_search = (opts.instanceSize - opts.exemplarSize)/2;
pad = d_search/scale_z;
s_x = s_z + 2*pad;
isfail = 0;
else
s_x = max(min_s_x, min(max_s_x, (1-opts.scaleLR)*s_x + opts.scaleLR*scaledInstance(newScale)));
targetsz = (1-opts.scaleLR)*targetsz + opts.scaleLR*[scaledTarget(1,newScale) scaledTarget(2,newScale)];
end
else
isfail = 1;
s_x = max(min_s_x, min(max_s_x, s_x*1.1));
net_conv.layers(net_conv.getLayerIndex('circonv1_1')).block.enable = false;
net_conv.layers(net_conv.getLayerIndex('circonv1_2')).block.enable = false;
if strcmp(opts.nettype,'1res')
net_conv.layers(net_conv.getLayerIndex('circonv2_1')).block.enable = false;
net_conv.layers(net_conv.getLayerIndex('circonv2_2')).block.enable = false;
end
end
else
% scale damping and saturation
s_x = max(min_s_x, min(max_s_x, (1-opts.scaleLR)*s_x + opts.scaleLR*scaledInstance(newScale)));
targetsz = (1-opts.scaleLR)*targetsz + opts.scaleLR*[scaledTarget(1,newScale) scaledTarget(2,newScale)];
end
% validate
tmp = pos+targetsz./2;
if tmp(1)<0||tmp(2)<0||tmp(1)>opts.imgsz(1)||tmp(2)>opts.imgsz(2)
state.obj.failframes = state.obj.failframes+1;
if state.obj.failframes>=2
pos = [size(img,1),size(img,2)]./2;
state.obj.failframes =0;
end
isfail = 1;
net_conv.layers(net_conv.getLayerIndex('circonv1_1')).block.enable = false;
net_conv.layers(net_conv.getLayerIndex('circonv1_2')).block.enable = false;
if strcmp(opts.nettype,'1res')
net_conv.layers(net_conv.getLayerIndex('circonv2_1')).block.enable = false;
net_conv.layers(net_conv.getLayerIndex('circonv2_2')).block.enable = false;
end
end
targetsz = targetsz./opts.targetszrate;
state.obj.s_x = s_x;
state.obj.pos = pos;
state.obj.targetsz = targetsz;
state.obj.targetLoc = [pos([2,1]) - targetsz([2,1])/2, targetsz([2,1])];
state.seq.time = state.seq.time + toc;
state.seq.isfail = isfail;
end
function [change_alpahf,change_featf] = update_change(corrfeat,new_corrfeat,lambda,issum)
if nargin<4
issum =false;
end
% leanring filter from corrfeat to new_corrfeat
cos_window = hann(size(corrfeat,1)) * hann(size(corrfeat,2))';
tcorrfeat = bsxfun(@times, corrfeat, cos_window);
corrfeatf = fft2(tcorrfeat);
numcorr = numel(corrfeatf(:,:,1));
if ~issum
kcorrfeatf = (corrfeatf .* conj(corrfeatf))./numcorr;
else
kcorrfeatf = sum(corrfeatf .* conj(corrfeatf),3)./numel(corrfeatf);
end
tnew_corrfeat = bsxfun(@times, new_corrfeat, cos_window);
tnew_corrfeatf = fft2(tnew_corrfeat);
alphaf = (tnew_corrfeatf./ (kcorrfeatf+ lambda));
m=0.9;
change_alpahf =m*alphaf+(1-m)*tnew_corrfeat;%就改这两个就OK!!!!!!这一行是V
change_featf = corrfeatf;%就改这两行就OK!!!!!这一行是W
end
% fast online learning for V
%因为V和W求的是t-1帧的变化系数,所以从跟踪开始,在第三帧才进行计算第二帧的变化系数。
%因此在完成第二帧跟踪后,首先将exemplar(范例)的特征保存在变量corrfeat中,第二帧(t-1)帧的特征保存在tcorrfeat中,然后执行以下代码
function net = update_v(net,feats_1,feats_t,p)
[alphaf,featf] = update_change(feats_1{1}(:,:,:,1),feats_t{1},p.v_lambda);
net.params(net.getParamIndex('cir11_alphaf')).value = alphaf;
net.params(net.getParamIndex('cir11_featf')).value = featf;
net.layers(net.getLayerIndex('circonv1_1')).block.enable = true;
if strcmp(p.nettype,'1res')
[alphaf,featf] = update_change(feats_1{2}(:,:,:,1),feats_t{2},p.v1_lambda);
net.params(net.getParamIndex('cir21_alphaf')).value = alphaf;
net.params(net.getParamIndex('cir21_featf')).value = featf;
net.layers(net.getLayerIndex('circonv2_1')).block.enable = true;
end
end
% fast online learning for W
%具体来说,在t-1帧跟踪后,我们有目标位置,可以将图像I(t-1)裁剪到以目标位置为中心的区域G(t-1),
%并使用相同大小的搜索区域Z(t-1)。然后我们将G(t-1)与高斯权重图相乘,得到G(横杠)(t-1)以正确地突出显示前景区域。我们需要学习W(t-1),它鼓励G(t-1)的深层功能类似于G(横杠)(t-1),
function net = update_w(net,feats,p)
[alphaf,featf] = update_change(feats{1}(:,:,:,1),feats{1}(:,:,:,2),p.w_lambda);
net.params(net.getParamIndex('cir12_alphaf')).value = alphaf;
net.params(net.getParamIndex('cir12_featf')).value = featf;
net.layers(net.getLayerIndex('circonv1_2')).block.enable = true;
if strcmp(p.nettype,'1res')
[alphaf,featf] = update_change(feats{2}(:,:,:,1),feats{2}(:,:,:,2),p.w1_lambda);
net.params(net.getParamIndex('cir22_alphaf')).value = alphaf;
net.params(net.getParamIndex('cir22_featf')).value = featf;
net.layers(net.getLayerIndex('circonv2_2')).block.enable = true;
end
end