MXNet常用模块命令总结

NDArray

import mxnet as mx
import numpy as np

# 通过NDArray初始化一个二维矩阵
a = mx.nd.array([[1,2],[3,4]])
# 初始化指定数值类型的数组
b = mx.nd.array([[1,2],[3,4]],dtype=np.int8)
# NDArray与Numpy转换
c = a.asnumpy() # NDArray->Numpy
d = mx.nd.array(c) # Numpy->NDArray
# 查看NDArray对象所在环境
print(d.context)
# 指定变量的运行环境
d = d.as_in_context(mx.gpu(0))

Symbol

import mxnet as mx
#-------------------网络结构定义代码----------------------------------------------------
data = mx.sym.Variable('data')
conv = mx.sym.Convolution(data=data, num_filter=128, kernel=(3,3), pad=(1,1), name='conv1')
bn = mx.sym.BatchNorm(data=conv, name='bn1')
relu = mx.sym.Activation(data=bn, act_type='relu', name='relu1')
pool = mx.sym.Pooling(data=relu, kernel=(2,2), stride=(2,2), pool_type='max', name='pool1')
fc = mx.sym.FullyConnected(data=pool, num_hidden=2, name='fc1')
sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')

#-------------------调试常用代码-------------------------------------------------------
# 查看一个Symbol对象的参数
print(sym.list_arguments())
# 查看Symbol对象的层参数维度,输出维度,辅助层参数维度;调用该方法需要指定输入数据的维度
arg_shape, out_shape, aux_shape = sym.infer_shape(data=(1,3,10,10))
# 得到Symbol所有层的信息
sym_mini = sym.get_internals()['pool1_output'] # sym截取从输入到池化层为止的信息
print(sym_mini.list_arguments())
# 可以在截取之后继续添加层
fc_new = mx.sym.FullyConnected(data=sym_mini, num_hidden=5, name='fc_new')
sym_new = mx.sym.SoftmaxOutput(data=fc_new, name='softmax')
# 得到执行器
e = sym_new.bind(mx.cpu(),{'data'=mx.nd.arange(300).reshape(1, 3, 10, 10)})
# 网络结构做前向计算
output = e.forward()

Module

import mxnet as mx
#--------初始化网络结构执行器------------
mod = mx.mod.Module(symbol=sym, context=mx.gpu(0))
mod.bind(data_shapes=[('data', (8, 3, 28, 28))], label_shapes=[('softmax_label', (8,))], for_training=False)
mod.init_params()
#--------执行前向计算--------------------
data = mx.nd.random.uniform(0, 1, shape=(8, 3, 28, 28))
mod.forward(mx.io.DataBatch([data]))
print(mod.get_outputs()[0])

#--------通过Module模块训练模型-----------
import mxnet as mx
import logging
data = mx.sym.Variable('data')
conv = mx.sym.Convolution(data=data, num_filter=128, kernel=(3,3), pad=(1,1), name='conv1')
bn = mx.sym.BatchNorm(data=conv, name='bn1')
relu = mx.sym.Activation(data=bn, act_type='relu', name='relu1')
pool = mx.sym.Pooling(data=relu, kernel=(2,2), stride=(2,2), pool_type='max', name='pool1')
fc = mx.sym.FullyConnected(data=pool, num_hidden=2, name='fc1')
sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')

data = mx.nd.random.uniform(0, 1, shape=(1000, 3, 224, 224))
label = mx.nd.round(mx.nd.random.uniform(0,1,shape=(1000)))
train_data = mx.io.NDArrayIter(data={'data':data}, label={'softmax_label':label}, batch_size=8, shuffle=True)
print(train_data.provide_data)
print(train_data.provide_label)
mod = mx.mod.Module(symbol=sym, context=mx.gpu(0))
#--------可用fit方法替代 START-------------------
mod.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label)
mod.init_params()
mod.init_optimizer()
eval_metric = mx.metric.create('acc')

for epch in range(5):
    end_of_batch = false
    eval_metric.reset()
    data_iter = iter(train_data)
    next_data_batch = next(data_iter)
    while not end_of_batch:
        data_batch =  next_data_batch
        mod.forward(data_batch)
        mod.backward()
        mod.update()
        mod.update_metric(eval_metric, labels=data_batch.label)
        try:
            next_data_batch = next(data_iter)
            mod.prepare(next_data_batch)
        except StopIteration:
            end_of_batch = True
    eval_name_vals = eval_metric.get_name_value()
    print("Epoch:{} Train_Acc:{:.4f}".format(epoch, eval_name_vals[0][1]))
    arg_params, aux_params = mod.get_params()
    mod.set_params(arg_params, aux_params)
    train_data.reset()
#---------可用fit方法替代 END---------------------

# fit调用方法
logger = logging.getLogger()
logger.setLevel(logging.INFO)
mod.fit(train_data=train_data, num_epoch=5)

 

你可能感兴趣的:(深度学习相关博文,MXNet,NDArray,Symbol,Module)