1、MXnet符号图:
基于MXnet所构建的符号图是一种静态计算图,图结构与内存管理都是静态的。以Resnet50_v2为例,Bottleneck结构的符号图如下:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
return conv3 + shortcut
2、加载符号图与模型参数:
MXnet预训练模型包括json配置文件与param参数文件:
-- resnet-50-0000.params
-- resnet-50-symbol.json
通过加载这两个文件,便可以获得符号图结构、模型权重与辅助参数信息:
prefix, index, num_layer = 'resnet-50', args.epoch, 50
prefix = os.path.join(ROOT_PATH, "./mx_model/models/{}".format(prefix))
symbol, param_args, param_auxs = mx.model.load_checkpoint(prefix, index)
3、PyTorch动态图:
PyTorch是一种动态类型框架,计算图构建与内存管理都是动态的,适合专注于研究的算法开发。按照命令式编程方式,能够及时获取计算图中Tensor及其导数的数值信息。Resnet50_v2的Bottleneck结构如下:
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=False):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes, eps)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, eps)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, eps)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if downsample:
self.conv_sc = nn.Conv2d(inplanes, planes * 4, kernel_size=1, stride=stride, bias=False)
self.stride = stride
def forward(self, input):
out = self.bn1(input)
out1 = self.relu(out)
residual = input
out = self.conv1(out1)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample:
residual = self.conv_sc(out1)
out += residual
return out
4、解析MXnet参数、初始化PyTorch模型:
首先需要将MXnet参数转为Numpy数组形式的字典。BN层、Conv2D层、FC层解析如下:
def bn_parse(args, auxs, name, args_dict, fix_gamma=False):
""" name0: PyTorch layer name;
name1: MXnet layer name."""
args_dict[name[0]] = {}
if not fix_gamma:
args_dict[name[0]]['running_mean'] = auxs[name[1]+'_moving_mean'].asnumpy()
args_dict[name[0]]['running_var'] = auxs[name[1]+'_moving_var'].asnumpy()
args_dict[name[0]]['gamma'] = args[name[1]+'_gamma'].asnumpy()
args_dict[name[0]]['beta'] = args[name[1]+'_beta'].asnumpy()
else:
_mv = auxs[name[1]+'_moving_var'].asnumpy()
_mm = auxs[name[1]+'_moving_mean'].asnumpy() - np.multiply(args[name[1]+'_beta'].asnumpy(), np.sqrt(_mv+eps))
args_dict[name[0]]['running_mean'] = _mm
args_dict[name[0]]['running_var'] = _mv
return args_dict
def conv_parse(args, auxs, name, args_dict):
""" name0: PyTorch layer name;
name1: MXnet layer name."""
args_dict[name[0]] = {}
w = args[name[1]+'_weight'].asnumpy()
args_dict[name[0]]['weight'] = w # N, M, k1, k2
return args_dict
def fc_parse(args, auxs, name, args_dict):
""" name0: PyTorch layer name;
name1: MXnet layer name."""
args_dict[name[0]] = {}
args_dict[name[0]]['weight'] = args[name[1]+'_weight'].asnumpy()
args_dict[name[0]]['bias'] = args[name[1]+'_bias'].asnumpy()
return args_dict
然后逐层遍历PyTorch模型的每个module,并完成模型参数赋值,从而实现用MXnet预训练模型初始化PyTorch模型的目的:
# model initialization for PyTorch from MXnet params
class resnet(object):
def __init__(self, name, num_layer, args, auxs, prefix='module.'):
self.name = name
num_stages = 4
if num_layer == 50:
units = [3, 4, 6, 3]
elif num_layer == 101:
units = [3, 4, 23, 3]
self.num_layer = str(num_layer)
self.param_dict = arg_parse(args, auxs, num_stages, units, prefix=prefix)
def bn_init(self, n, m):
if not (m.weight is None):
m.weight.data.copy_(torch.FloatTensor(self.param_dict[n]['gamma']))
m.bias.data.copy_(torch.FloatTensor(self.param_dict[n]['beta']))
m.running_mean.copy_(torch.FloatTensor(self.param_dict[n]['running_mean']))
m.running_var.copy_(torch.FloatTensor(self.param_dict[n]['running_var']))
def conv_init(self, n, m):
#m.weight.data.zero_()
m.weight.data.copy_(torch.FloatTensor(self.param_dict[n]['weight']))
def fc_init(self, n, m):
m.weight.data.copy_(torch.FloatTensor(self.param_dict[n]['weight']))
m.bias.data.copy_(torch.FloatTensor(self.param_dict[n]['bias']))
def init_model(self, model):
for n, m in model.named_modules():
if isinstance(m, nn.BatchNorm2d):
self.bn_init(n, m)
elif isinstance(m, nn.Conv2d):
self.conv_init(n, m)
elif isinstance(m, nn.Linear):
self.fc_init(n, m)
return model
5、使用MXnet的数据加载器:
基于MXnet的rec文件,可以加快数据的加载与预处理效率,有助于提升PyTorch模型的整体训练效率。以ImageNet为例,rec文件制作流程如下,其中/data/rec目录保存train.rec与val.rec,/data/train与/data/val分别是ImageNet2012的训练集与验证集:
# create ImageNet rec file
python /usr/local/lib/python2.7/dist-packages/mxnet/tools/im2rec.py /data/rec/train /data/train --list --recursive
python /usr/local/lib/python2.7/dist-packages/mxnet/tools/im2rec.py /data/rec/train.lst /data/train --num-thread 4
python /usr/local/lib/python2.7/dist-packages/mxnet/tools/im2rec.py /data/rec/val /data/val --list --recursive
python /usr/local/lib/python2.7/dist-packages/mxnet/tools/im2rec.py /data/rec/val.lst /data/val --num-thread 4
---------------
然后通过mx.io.ImageRecordIter加载数据,并将输出的NDArray转为PyTorch Tensor,便可用于PyTorch模型的训练、验证与测试,迭代器设计如下:
# MXnet val-data loader
class mx_val_loader(object):
def __init__(self, batch_size, rgb_mean='123.68,116.779,103.939', rgb_std='58.393,57.12,57.375',
image_shape='3,224,224', data_nthreads=4, size=5000, cuda=False):
data_shape = tuple([int(i) for i in image_shape.split(',')])
rgb_mean = [float(i) for i in rgb_mean.split(',')]
rgb_std = [float(i) for i in rgb_std.split(',')]
valid_data = os.path.join('/data/rec','val.rec')
self.cuda = cuda
self.size = size / batch_size
self.data = mx.io.ImageRecordIter(
path_imgrec = valid_data,
label_width = 1,
preprocess_threads = data_nthreads,
batch_size = batch_size,
data_shape = data_shape,
data_name = 'data',
label_name = 'softmax_label',
resize = data_shape[1],
rand_crop = False,
rand_mirror = False,
mean_r = rgb_mean[0],
mean_g = rgb_mean[1],
mean_b = rgb_mean[2],
std_r = rgb_std[0],
std_g = rgb_std[1],
std_b = rgb_std[2])
self.data.reset()
def __iter__(self):
for batch in self.data:
nd_data = batch.data[0].asnumpy()
nd_label = batch.label[0].asnumpy()
input_data = torch.FloatTensor(nd_data)
input_label = torch.LongTensor(nd_label)
if self.cuda:
yield input_data.cuda(non_blocking=True), input_label.cuda(non_blocking=True)
else:
yield input_data, input_label
def __len__(self):
return self.size
def get_ds(self):
return self.data
在训练时,经过封装的MXNet迭代器,其使用方式与PyTorch数据加载器相一致:
val_loader = mx_val_loader(batch_size=args.batch_size,
data_nthreads=args.workers,
size=IMGNET_VAL_SIZE,
cuda=args.evaluate)
for i, (input, target) in enumerate(val_loader):
target = target.cuda(non_blocking=True)
# compute output
output = model(input)