TensorFlow, Mxnet, PyTorch: 要点总结及常见神经网络代码实现

目录

  • 一、深度学习框架
        • TensorFlow 1.x
        • PyTorch
        • Mxnet
  • 二、多层感知机 (MLP) (Mxnet+TensorFlow实现)
  • 三、卷积神经网络 (LeNet/AlexNet/VGG/NiN/GoogLeNet/ResNet) (Mxnet实现)
  • 四、循环神经网络 (RNN/GRU/LSTM) (Mxnet实现)
  • 五、对抗式生成网络 (DCGAN) (Mxnet实现)
  • 六、区域卷积神经网络 (SSD) (Mxnet实现)

一、深度学习框架

TensorFlow 1.x

# 所需模块
import numpy as np
import tensorflow as tf												#从TF1.x中导入
import tensorflow.compat.v1 as tf									#从TF2.0中导入1.x

# 生成
tf.Variable(1,trainable=True)    									#生成参数变量
tf.placeholder((None,3), dtype=tf.float32)							#生成占位符
tf.range(10)														#生成序列
tf.one_hot(X,10)    												#生成One-Hot数组
tf.zeros((2,3),dtype=tf.int32)										#生成零数组
tf.ones((2,3),dtype=tf.int32)										#生成一数组
tf.fill((2,3),dtype=tf.float32)										#生成常数数组
tf.constant([1,2])													#生成定值数组
tf.random_uniform((2,3,5),seed=1)									#生成均匀分布随机数数组
tf.random_normal((2,3,5),stddev=1.5)								#生成正态分布随机数数组
tf.truncated_normal((2,3,5),stddev=1.5)								#生成正态分布随机数数组(大于2倍sigma将被剪裁)

# 数学运算
tf.add(X1,X2)														#加法
tf.add_n([X1,X2,X3])												#加法
tf.subtract(X1,X2)													#减法
tf.multiply(X1,X2)													#乘法
tf.square(X)    													#平方
tf.matmul(X,W)    													#点积
tf.transpose(X)														#转置
tf.clip_by_value(X,min_,max_)										#裁剪
tf.assign(x,10)														#赋值

# 统计学运算
tf.reduce_sum(X)    												#求和
tf.reduce_mean(X)    												#平均  
tf.argmax(X,1)														#最大值索引

# 条件判断
tf.greater(X1,X2)													#比较大小
tf.less(X1,X2)														#比较大小
tf.equal(X1,X2)														#相等判断
tf.where(tf.greater(X1,X2),Y1,Y2)									#条件索引
tf.cond(cond,func_1,func_2)											#条件判断
tf.while_loop(cond,body,variables)									#条件循环

# 全局信息
tf.shape(X)															#维度
tf.expand_dims(X,axis=0)											#扩充维度
tf.squeeze(X)														#压缩维度
tf.global_variables()												#查看全局变量
tf.trainable_variables()											#查看参数变量
tf.cast(X,tf.float32)												#改变元素类型
tf.map_fn(func,elems=X)												#逐元素映射
tf.gather(X,indicies)												#调用元素
tf.split(X,5,axis=0)												#拆分
tf.concat(*outputs,axis=0)											#合并
tf.sequence_mask(mask_arr,max_len)									#生成Mask矩阵
tf.boolean_mask(X,mask)												#应用Mask矩阵

# 滑动平均
ema = tf.train.ExponentialMovingAverage(0.97)    					#滑动平均类
ema.apply([w1,w2,w3])												#应用衰减
ema.average(w1)														#提取滑动平均值

# 数据迭代
dataset = tf.data.Dataset.from_tensor_slices(data)    				#建立数据集
dataset.shuffle(1000).batch(100)									#打乱并生成批量
iterator = tf.data.Iterator.from_structure(dataset.output_types,dataset.output_shapes)    #生成迭代器
iterator.make_initializer(dataset)									#激活迭代器
iterator.get_next()													#获取批量数据

# 命令行接口
tf.app.flags.DEFINE_integer("t",7,"")								#定义整数
tf.app.flags.DEFINE_float("t",7.4,"")								#定义浮点
tf.app.flags.DEFINE_boolean("t",True,"")							#定义布尔值
tf.app.flags.DEFINE_string("t","te","")								#定义字符串
tf.app.flags.FLAGS.t												#读取指定参数
tf.app.flags.FLAGS.__flags											#读取所有参数
tf.app.flags.mark_flag_as_required() 								#标记为必填参数
tf.app.run()														#启动程序
$ python code.py --t=7 --h=True										#指定参数运行文件
hparams = tf.contrib.training.HParams(lr=0.1)						#定义超参数对象
hparams.batch_size = 32												#超参数扩展
hparams.values()													#转换为字典
hparams.to_json()													#转换为Json字符串

# TFRecord
_integers,_floats,_bytes = [1,1,1],[2,2,2],[b'3',b'3',b'3']
writer = tf.python_io.TFRecordWriter(r'/data.tfrecords')    		#定义写入类对象
for i in range(len(_integers)):
    features = tf.train.Features(feature={
    '_integers':tf.train.Feature(int64_list=tf.train.Int64List(value=[_integers[i]])),
    '_floats':tf.train.Feature(float_list=tf.train.FloatList(value=[_floats[i]])),
    '_bytes':tf.train.Feature(bytes_list=tf.train.BytesList(value=[_bytes[i]]))})
    example = tf.train.Example(features=features)
    writer.write(example.SerializeToString())						#写入文件
writer.close()
ex = next(tf.python_io.tf_record_iterator(r'/data.tfrecords'))	#查看数据类型
print(tf.train.Example.FromString(ex))
reader = tf.TFRecordReader()										#定义读取类对象
queue = tf.train.string_input_producer([r'/data.tfrecords'])		#定义读取队列
_, serialized_example = reader.read(queue)							#按队列读取
features = tf.parse_single_example(serialized_example,features={	
					'_integers':tf.FixedLenFeature([],tf.int64),	
					'_floats':tf.FixedLenFeature([],tf.float32)})
_integers = tf.cast(features['_integers'], tf.int64)
_floats = tf.cast(features['_floats'], tf.float32)
with tf.Session() as sess:
    tf.global_variables_initializer().run()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    for i in range(3):
        print(_integers.eval())
    coord.request_stop()
    coord.join(threads=threads)

# TensorBoard
tf.summary.histogram(name,variable)									#将变量加入监测目标(会话中执行)
tf.summary.scalar(name,scalar)										#将标量加入监测目标(会话中执行)
tf.summary.image(name,image)										#将图像加入监测目标(会话中执行)
tf.summary.text(name,text)											#将文本加入监测目标(立即执行)
tf.summary.audio(name,audio)										#将音频加入监测目标(立即执行)
summary_op = tf.summary.merge_all()									#将以上检测目标汇总
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)   	#配置运行时记录的信息对象(会话中执行)
run_metadata = tf.RunMetadata()										#配置运行时记录的Proto对象(会话中执行)
summary,_ = sess.run((summary_op,train_op),feed_dict=feed_dict,options=options,run_metadata=run_metada)
writer = tf.summary.FileWriter('/logs',tf.get_default_graph())  	#配置日志文件并写入计算图
writer.add_run_metadata(run_metadata,'step %03d'%i)    				#添加间隔记录至日志文件
writer.add_summary(summary,i)    									#添加检测目标汇总至日志文件
writer.close()														#关闭日志文件
$ tensorboard --logdir='D:/logs'									#打开TensorBoard(浏览器中输入localhost:6006)

# 会话
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)    #会话设置
sess = tf.Session(config=config)									#开启普通会话
sess = tf.InteractiveSession(config=config)							#开启交互式会话
sess.run(tf.global_variables_initializer)							#启动全部变量
print(x.eval())														#交互式对话下快速取值
sess.close()														#关闭会话

# 命名空间
with tf.name_scope('A'):
	with tf.variable_scope('B',reuse=tf.AUTO_REUSE):
		a = tf.Variable(1,name='a')									#在命名空间和变量空间下生成变量
		b = tf.get_variable(name='b',shape=(1),initializer=tf.zeros_initializer)    #在变量空间下生成变量
tf.zeros_initializer												#零初始器
tf.ones_initializer													#一初始器
tf.constant_initializer(2)											#常数初始器				
tf.random_uniform_initializer										#均匀分布随机数初始器
tf.random_normal_initializer										#正态分布随机数初始器
tf.truncated_normal_initializer										#正态分布裁剪随机数初始器(剪裁范围在±2σ之外的随机数)
with tf.variable_scope('',reuse=True):
	x2 = tf.get_variable(name='W/X')								#调用变量

# 计算图
graph = tf.Graph()													#计算图(不同计算图上的变量和运算不会共享)
graph.device('/gpu:0')												#指定计算图地址
with graph.as_default():											#设置默认计算图
	x = tf.constant([1,2,3])    									#在默认计算图上生成变量
sess = tf.Session(graph=graph)				    					#在指定计算图开启会话
sess.graph.finalize()												#锁定计算图(无法继续添加节点)
tf.reset_default_graph()											#重置默认计算图(清空内存占用)

# 数据集
from tensorflow.examples.tutorials.minist import input_data    		#MNIST手写识别数据库
mnist = input_data.read_data_sets('/MINIST_data/', one_hot=True)	#读取数据到指定路径
X_train,y_train,X_test,y_test = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
mnist.train.next_batch(64)											#选取批量数据

# 计算设备
config = tf.ConfigProto(log_device_placement=True,					#显示计算设备
						allow_soft_placement=True)					#自动将GPU无法执行的操作转回CPU
config.gpu_options.allow_growth = True								#按需分配GPU显存
config.gpu_options.per_process_gpu_memory_fraction = 0.4			#指定分配GPU显存
sess = tf.Session(config=config)									#应用以上配置
with tf.device('/cpu:0'):											#指定存储设备
	x = tf.constant([1,2,3])    									#在指定存储设备上生成变量			
os.environ['CUDA_VISIBLE_DEVICES'] = '2'							#指定参与计算的GPU设备
$ CUDA_VISIBLE_DEVICES='0,2' python code.py							#指定参与计算的GPU设备

# 分布式计算
cluster = tf.train.ClusterSpec({'ps':['tf-ps0:2222',				#定义集群并分配工作与任务给不同端口
									  'tf-ps1:2222']})				#(这是一个TF常用集群配置方法)
								'worker':['tf-worker0:2222',		#(ps负责存储、获取以及更新变量的取值)
					  					  'tf-worker1:2222',		#(ps, abbr. Parameter Server)
					  					  'tf-worker2:2222'],		#(worker负责反向传播算法获取梯度)
server = tf.train.Server(cluster, job_name='ps', task_index=0)		#为指定工作与任务创建服务器
server = tf.train.Server.create_local_server()						#创建本地单进程服务器
server.start()														#开启服务器
server.join()														#暂停服务器
sess = tf.Session(target=server.target)								#在服务器上新建会话
device_setter = tf.train.replica_device_setter(work_device='/job:worker/task:0',cluster=cluster)	#配置计算分配器
optimizer = tf.train.SyncReplicasOptimizer(							#配置同步模式优化器
		tf.train.GradientDescentOptimizer(learning_rate),
		replicas_to_aggregate=n_workers,
		total_num_replicas=n_workers)
sync_replicas_hook = optimizer.make_session_run_hook(is_chief)		#配置同步模式hook
with tf.device(device_setter):										#指定计算设备并分配计算资源
	is_chief = (TASK_ID == 0)											
	global_step,loss,train_op = build_model(x,y_,is_chief)			#在build_model函数中定义节点(需提前完成x、y_的分配)
	hooks = [sync_replicas_hook,									#同步模式(异步模式删除sync_replicas_hook)			
			tf.train.StopAtStepHook(last_step=TRAINING_STEPS)]		#设定训练终止迭代数
	with tf.train.MonitoredTrainingSession(							#开启分布式计算中心会话
			master=server.target,									
			is_chief=is_chief,										
			checkpoint_dir=MODEL_SAVE_PATH,
			hooks=hooks,
			save_checkpoint_secs=60,
			config=config) as sess:
		while not sess.should_stop():								#迭代
			x,y = data.next_batch(BATCH_SIZE)				
			l,step,_ = sess.run((loss,train_op,global_step),feed_dict=feed_dict(x,y))
			
# 深度学习:优化
tf.nn.tanh(Z)    													#Tanh激活函数
tf.nn.relu(Z)    													#Relu激活函数
tf.nn.sigmoid(Z)    												#Sigmoid激活函数
tf.nn.softmax(Z)													#Softmax归一化函数
tf.nn.softmax_cross_entropy_with_logits(logits=y,labels=y_) 		#交叉熵损失函数(标签为概率分布)
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=y_)	#交叉熵损失函数(标签为数值)
tf.train.GradientDescentOptimizer(learning_rate=0.01)    			#梯度下降优化器
tf.train.AdamOptimizer(learning_rate=0.01)				    		#Adam优化器
tf.train.MomentumOptimizer(learning_rate=0.01)				   		#Momentum优化器
train_op = Optimizer.minimize(loss)									#定义优化流程

# 深度学习:梯度剪裁
grads = Optimizer.compute_gradients(loss)							#定义梯度剪裁流程
for i, (g, v) in enumerate(grads):
    if g is not None: 
    	grads[i] = (tf.clip_by_norm(g, clipping_theta), v)			#剪裁梯度
train_op = Trainer.apply_gradients(grads)							#应用剪裁后的梯度

# 深度学习:构建网络
tf.nn.embedding_lookup(embedding,data)								#词向量映射
tf.nn.conv2d(X,filter=[1,2,2,1],strides=[1,1,1,1],padding='SAME') 	#卷积层
tf.nn.bias_add(X,bias)												#添加偏置项
tf.nn.relu(X)														#激活
tf.nn.moments(X,axes=[0,1])											#平均值与方差
tf.nn.max_pool(X,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID')	#最大池化层
tf.nn.avg_pool(X,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID')	#平均池化层
tf.nn.dropout(X, keep_prob=keep_prob)    							#丢弃层

# 深度学习:训练标准流程
Trainer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
with tf.Session() as sess:	    									#开启会话
	sess.run(tf.global_variables_initializer())	   					#初始化变量和参数
	for epoch in range(10000):										#迭代
		l,_ = sess.run((loss,Trainer),feed_dict={X:X_train})		#计算损失函数并更新参数

# 深度学习:控制依赖
with tf.control_dependencies([train_step,exp_decay_op]):
	train_op = tf.no_op(name='train')								#定义空Op(等同于组合上述Op)
	extra_op()														#后起Op

# 深度学习:损失函数集合
mse = tf.reduce_mean(tf.square(y_pred-y))							#MSE
l2 = tf.contrib.layers.l2_regularizer(lambda)(w)    				#L2正则项
tf.add_to_collection('losses', mse)									#添加到集合
tf.add_to_collection('losses', l2)
loss = tf.add_n(tf.get_collection('losses'))						#获取集合

# 深度学习:衰减学习率
global_step = tf.Variable(0)										#定义初始迭代轮数							
lr = tf.train.exponential_decay(0.1,global_step,100,0.96,staircase=True)    #创建衰减学习率对象
Trainer = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(loss,global_step)    #将衰减学习率应用到优化器

# 模型保存:会话保存
saver = tf.train.Saver({'x':X})										#定义模型存储类并规定载入的变量参数
saver = tf.train.import_meta_graph('/path/model.ckpt.meta')    		#定义模型读取类并规定读取计算图
saver.save(sess,'/path/model.ckpt')    								#保存模型(同时保存计算图和参数)
saver.restore(sess,'/path/model.ckpt')    							#读取模型

# 模型保存:计算图保存
from tensorflow.python.framework import graph_util
graph_def = tf.get_default_graph().as_graph_def()    				#导出计算图
ouput_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, ['add'])    #将变量转换为常量
with tf.gfile.Gfile('/path/combined_model.pb','wb') as f:
	f.write(output_graph_def.SerializeToString())    				#写入文件
from tensorflow.python.platform import gfile
with tf.gfile.FastGFile('/path/combined_model.pb','rb') as f:
	graph_def = tf.GraphDef()
	graph_def.ParseFromString(f.read())    							#读取文件
result = tf.import_graph_def(graph_def,return_elements=['add:0'])   #加载计算图4

PyTorch

# 所需模块
import torch	

# 生成
torch.tensor([5.5,3],requires_grad=True)                            #张量
torch.empty(5,3)                                                    #未初始化矩阵
torch.zeros(5,3,dtype=torch.long)                                   #零矩阵
torch.ones(5,3,dtype=torch.float)                                   #一矩阵
torch.rand(5,3)                                                     #随机数矩阵
torch.randn_like(x, dtype=torch.float)                              #同大小随机数矩阵

# 全局信息
x.size()                                                            #形状
x.view(3,5)                                                         #形状变换(如同reshape) 
x.item()                                                            #数值变量的一般形式
x.numpy()                                                           #转换为numpy数组
torch.from_numpy(x)                                                 #从numpy数组转换为张量

# 计算设备
torch.cuda.is_available()                                           #是否有CUDA设备可以使用
torch.cuda.device_count()                                           #CUDA设备数量
torch.rand(5,3,device=torch.device('cuda'))                         #在CUDA设备上创建张量
x.to(torch.device('cuda'))                                          #迁移到CUDA设备
x.to('cpu')                                                         #迁移到CPU
net = nn.DataParallel(net)                                          #网络多设备并行化

# 深度学习相关
net.parameters()                                                    #读取参数列表
loss = torch.nn.CrossEntropyLoss()(output,label)                    #交叉熵损失
loss = torch.nn.MSELoss()(output,label)                             #MSE损失
optimizer = torch.optim.SGD(net.parameters(),lr=0.001,momentum=0.9) #SGD优化器
conv1.grad                                                          #查看梯度
loss.backward()                                                     #梯度自动计算
optimizer.step()                                                    #梯度更新
with torch.no_grad():                                               #禁止梯度计算
    pass

# 深度学习:构建网络
torch.nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding)#卷积层
torch.nn.Linear(in_features,out_features,bias=True)                 #全连接层
torch.nn.functional.max_pool2d(x)                                   #池化层

Mxnet

# 所需模块
import numpy as np
import mxnet as mx													#主模块
import gluonbook as gb												#深度学习附加包(i.e.检测GPU、加载数据集)

# 生成
mx.nd.array(X, ctx=mx.gpu(0))    									#生成数组
mx.nd.one_hot(X,10)    												#生成One-Hot数组
mx.nd.arange(15)    												#生成递增数组
mx.nd.zeros((2,3))    												#生成零数组
mx.nd.ones((2,3))    												#生成一数组
mx.nd.full((2,3),7)    												#生成常数数组
mx.nd.eye(5)    													#生成对角数组
mx.nd.random_normal(shape=(2,3,5))    								#生成正态分布随机数数组
mx.nd.random_uniform(shape=(2,3,5))    								#生成均衡分布随机数数组
mx.nd.random_poisson(shape=(2,3,5))    								#生成泊松分布随机数数组
mx.random.seed(1)    												#设置随机数种子

# 数学运算
mx.nd.softmax(Z)													#Softmax归一化
mx.nd.log_softmax(Z)												#Log-Softmax归一化
mx.nd.dot(W,X)    													#点积
mx.nd.norm(X)    													#模

# 统计学运算
mx.nd.sum(X)    													#求和(有nan存在时返回nan)
mx.nd.nansum(X)    													#求和(无视nan)
mx.nd.argmax(X,axis=1)    											#最大值索引
mx.nd.argsort(X, axis=1)    										#行排序索引

# 全局信息
X.shape    															#尺寸
X.grad    															#梯度
X.dtype    															#元素类型
X.T    																#转置
X.transpose(0,2,1)    												#维度互换
X.reshape((1,4))    												#尺寸重置
X.expand_dims(axis=0)    											#添加维度
X.squeeze()    														#剔除无效维度
X.flatten()    														#剔除无效维度
X.astype(int)    													#修改元素类型
X.asscalar()    													#转换为标量
X.asnumpy()    														#转换为NumpyArray
X.as_in_context(gpu(0))    											#转移地址
X.attach_grad()    													#添加梯度标识

# 合并拆分
mx.nd.concatenate([X,Y], axis=1)									#合并
mx.nd.tile(X,(1,2,3))    											#堆叠

# 深度学习相关
gb.try_gpu()    													#检测GPU
mx.nd.tanh(Z)    													#Tanh激活函数
mx.nd.relu(Z)    													#Relu激活函数
mx.nd.sigmoid(Z)    												#Sigmoid激活函数
mx.gluon.loss.L1Loss()												#L1损失函数
mx.gluon.loss.L2Loss()												#L2损失函数
mx.gluon.loss.SoftmaxCrossEntropyLoss()								#交叉熵损失函数
mx.gluon.loss.KLDivLoss()											#相对熵损失函数

# 深度学习:构建神经网络
Net = mx.gluon.nn.Sequential()
Net.add(mx.gluon.nn.Conv2D(channels=6,kernel_size=(5,3),strides=2,padding=1,activation='relu'),  #卷积层
		mx.gluon.nn.BatchNorm(),    								#批量归一化层
		mx.gluon.nn.MaxPool2D(pool_size=2,strides=2,padding=0),		#池化层
		mx.gluon.nn.GlobalAvgPool2D(),    							#全局池化层
		mx.gluon.nn.Dense(10,activation='relu'),    				#全连接层
		mx.gluon.nn.Dropout(0.5),    								#丢弃层
		mx.gluon.nn.Flatten())    									#平铺层
Net.initialize(force_reinit=True, init=mx.init.Xavier())    		#Xavier参数初始化
Net.initialize(force_reinit=True, init=mx.init.Normal(0.1))			#正态分布参数初始化

# 深度学习:神经网络训练标准流程
for epoch in range(10000):					
	with mx.autograd.record():    									#开始记录梯度
		Z = Net(X)
		loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()(Z, y)    	#计算损失函数
	loss.backward()    												#从损失函数中反向传导计算梯度
	mx.gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1}).step(batch_size=64)    #更新梯度

二、多层感知机 (MLP) (Mxnet+TensorFlow实现)

TensorFlow, Mxnet, PyTorch: 要点总结及常见神经网络代码实现_第1张图片
在使用 MLP (Multi-Layer Perceptron) 处理图像时,需要将图形矩阵重新定义形状为单维向量。本节将通过实现同一个工程,定义相同的超参数,对 Mxnet 和 TensorFlow 的性能进行测试。以下是 Mxnet 实现:

# 导入所需模块
from time import time
import mxnet as mx
import gluonbook as gb
from sklearn.datasets import load_boston
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split

# 加载数据
ctx = gb.try_gpu()
boston = load_boston()
X_train,X_test,y_train,y_test = train_test_split(boston.data,boston.target,test_size=0.1)
X_train = mx.nd.array(scale(X_train),ctx=ctx)
X_test = mx.nd.array(scale(X_test),ctx=ctx)
y_train = mx.nd.array(scale(y_train.reshape((-1,1))),ctx=ctx)
y_test = mx.nd.array(scale(y_test.reshape((-1,1))),ctx=ctx)
    
# 定义参数
iters = 10000
lr = 0.1
num_inputs = X_train.shape[1]
num_hiddens = 8
num_outputs = y_train.shape[1]

# 定义网络架构
net = mx.gluon.nn.Sequential()
net.add(mx.gluon.nn.Dense(num_hiddens, activation='tanh'),
        mx.gluon.nn.Dense(num_outputs, activation='sigmoid'))
net.initialize(force_reinit=True, init=mx.init.Normal(), ctx=ctx)   
loss = lambda y_pred, y: mx.nd.sqrt(mx.nd.mean(mx.nd.square(y_pred-y)))
Trainer = mx.gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr})

# 模型训练
tic = time()
for i in range(iters):
    with mx.autograd.record():
        y_train_pred = net(X_train)
        l = loss(y_train_pred,y_train)
    l.backward()
    Trainer.step(batch_size=X_train.shape[0])
    if i % 1000 == 0:
        y_test_pred = net(X_test)
        test_l = loss(y_test_pred,y_test)
        print('epoch: %d, train loss: %.4f, test loss: %.4f, time: %.2fs'%(i,l.asscalar(),test_l.asscalar(),time()-tic))
        tic = time()

以下是同一案例的 TensorFlow 实现:

# 导入所需模块
from time import time
import tensorflow as tf
from sklearn.datasets import load_boston
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split

# 载入数据
boston = load_boston()
X_train,X_test,y_train,y_test = train_test_split(boston.data,boston.target,test_size=0.1)
X_train = scale(X_train)
X_test = scale(X_test)
y_train = scale(y_train.reshape((-1,1)))
y_test = scale(y_test.reshape((-1,1)))
    
# 定义参数
iters = 10000
lr = 0.1
num_inputs = X_train.shape[1]
num_hiddens = 8
num_outputs = y_train.shape[1]

# 定义网络架构
xs = tf.placeholder(shape=(None,X_train.shape[1]), dtype=tf.float32)
W1 = tf.Variable(tf.random_normal(shape=(num_inputs,num_hiddens)))
b1 = tf.Variable(tf.random_normal(shape=(1,num_hiddens)))
W2 = tf.Variable(tf.random_normal(shape=(num_hiddens,num_outputs)))
b2 = tf.Variable(tf.random_normal(shape=(1,num_outputs)))
Z1 = tf.nn.tanh(tf.matmul(xs,W1) + b1)
Z2 = tf.nn.sigmoid(tf.matmul(Z1,W2) + b2)
ys = tf.placeholder(shape=(None,1), dtype=tf.float32)
loss = tf.sqrt(tf.reduce_mean(tf.square(ys - Z2)))
Trainer = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)

# 模型训练
tic = time()
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(iters):
        l,_ = sess.run((loss, Trainer), feed_dict={xs: X_train, ys: y_train})
        if i % 1000 == 0:
            test_l = sess.run(loss, feed_dict={xs: X_test, ys: y_test})
            print('epoch: %d, train loss: %.4f, test loss: %.4f, time: %.2fs'%(i,l,test_l,time()-tic))
            tic = time()

在两个不同的深度学习框架下,运行结果如下:

Mxnet:
epoch: 0, train loss: 1.1181, test loss: 1.0897, time: 0.00s
epoch: 1000, train loss: 0.7545, test loss: 0.7919, time: 2.14s
epoch: 2000, train loss: 0.7405, test loss: 0.8322, time: 2.12s
epoch: 3000, train loss: 0.7402, test loss: 0.8345, time: 2.11s
epoch: 4000, train loss: 0.7401, test loss: 0.8332, time: 2.15s
epoch: 5000, train loss: 0.7401, test loss: 0.8323, time: 2.12s
epoch: 6000, train loss: 0.7401, test loss: 0.8344, time: 2.13s
epoch: 7000, train loss: 0.7403, test loss: 0.8296, time: 2.15s
epoch: 8000, train loss: 0.7403, test loss: 0.8258, time: 2.13s
epoch: 9000, train loss: 0.7408, test loss: 0.8278, time: 2.16s

TensorFlow:
epoch: 0, train loss: 1.1311, test loss: 1.0157, time: 0.60s
epoch: 1000, train loss: 0.7439, test loss: 0.7745, time: 0.93s
epoch: 2000, train loss: 0.7435, test loss: 0.7789, time: 0.94s
epoch: 3000, train loss: 0.7431, test loss: 0.7729, time: 0.96s
epoch: 4000, train loss: 0.7432, test loss: 0.7741, time: 0.95s
epoch: 5000, train loss: 0.7429, test loss: 0.7760, time: 0.99s
epoch: 6000, train loss: 0.7430, test loss: 0.7760, time: 0.93s
epoch: 7000, train loss: 0.7430, test loss: 0.7763, time: 0.95s
epoch: 8000, train loss: 0.7430, test loss: 0.7764, time: 0.94s
epoch: 9000, train loss: 0.7430, test loss: 0.7762, time: 0.93s

排除偶然性,以多次的运行结果来看,Mxnet 与 TensorFlow 对训练样本的拟合程度和对测试样本的泛化能力可以说不相上下,这也源于笔者反复调整代码,使得两者背后的网络架构完全一致。然而,在最为关键的算法运行效率上,高下立见。

三、卷积神经网络 (LeNet/AlexNet/VGG/NiN/GoogLeNet/ResNet) (Mxnet实现)

在这里插入图片描述
在基础架构不变的基础上,通过修改卷积层数量、通道数量、卷积核大小等参数,生成从简单到复杂的 CNN (Convolutional Neutral Network) 模型,如 LeNet, AlexNet, VGG, NiN, GoogLeNet, ResNet。LeNet 的结构即如图所示。AlexNet 在 LeNet 基础上增加卷积层,放大通道数量和卷积核大小,使用 ReLU 激活函数,更为精细复杂。而相对于 AlexNet,VGG 增加了卷积层数量,并将每一个池化层和该层之前的一个或数个完全相同的卷积层命名为一个区块,区块与区块的组合搭配配上和 AlexNet 中一致的全连接层,成为 VGG。NiN 则一改前三者通过抽取空间特征而后由全连接层输出分类结果的架构,在 VGG 的基础上,将每一个区块中的重复卷积层的卷积核大小改为1×1,并将全连接层改为全局池化层,直接输出结果。GoogLeNet 吸收了 NiN 网络嵌套网络的思想并在此基础上进行改进,区块中多线并行,全局池化后使用单连接层进行分类。ResNet 引入残差块概念,使得梯度更新能绕过中间层直接到达更接近输入层的区块,收敛更快。

# 导入所需模块
import sys, time
import mxnet as mx
from mxnet import autograd, nd, gluon, init
from mxnet.gluon import nn
import gluonbook as gb

# 搭建网络架构
def model(model_type):
	if model_type == 'lenet':
		net = nn.Sequential()
		net.add(nn.Conv2D(channels=6, kernel_size=5, activation='sigmoid'),
		        nn.MaxPool2D(pool_size=2, strides=2),
		        nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),
		        nn.MaxPool2D(pool_size=2, strides=2),
		        nn.Dense(120, activation='sigmoid'),
		        nn.Dense(84, activation='sigmoid'), 
		        nn.Dense(10))
	elif model_type == 'alexnet':
		net = nn.Sequential()
		net.add(nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'),
		        nn.MaxPool2D(pool_size=3, strides=2),
		        nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'),
		        nn.MaxPool2D(pool_size=3, strides=2),
		        nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),
		        nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'),
		        nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'),
		        nn.MaxPool2D(pool_size=3, strides=2),
		        nn.Dense(4096, activation="relu"), nn.Dropout(0.5),
		        nn.Dense(4096, activation="relu"), nn.Dropout(0.5),
		        nn.Dense(10))
	elif model_type == 'vgg':
		def vgg_block(num_convs, num_channels):
		    blk = nn.Sequential()
		    for _ in range(num_convs):
		        blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))
		    blk.add(nn.MaxPool2D(pool_size=2, strides=2))
		    return blk
		def vgg(conv_arch):
		    net = nn.Sequential()
		    for (num_convs, num_channels) in conv_arch:
		        net.add(vgg_block(num_convs, num_channels))
		    net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),
		            nn.Dense(4096, activation='relu'), nn.Dropout(0.5),
		            nn.Dense(10))
		    return net
		conv_arch = ((1, 16), (1, 32), (2, 64), (2, 128), (2, 128))
		net = vgg(conv_arch)
	elif model_type == 'nin':
		def nin_block(num_channels, kernel_size, strides, padding):
		    blk = nn.Sequential()
		    blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),
		            nn.Conv2D(num_channels, kernel_size=1, activation='relu'),
		            nn.Conv2D(num_channels, kernel_size=1, activation='relu'))
		    return blk
		net = nn.Sequential()
		net.add(nin_block(96, kernel_size=11, strides=4, padding=0),
		        nn.MaxPool2D(pool_size=3, strides=2),
		        nin_block(256, kernel_size=5, strides=1, padding=2),
		        nn.MaxPool2D(pool_size=3, strides=2),
		        nin_block(384, kernel_size=3, strides=1, padding=1),
		        nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.5),
		        nin_block(10, kernel_size=3, strides=1, padding=1),
		        nn.GlobalAvgPool2D(),
		        nn.Flatten())
	elif model_type == 'googlenet':
		class Inception(nn.Block):
		    def __init__(self, c1, c2, c3, c4, **kwargs):
		        super(Inception, self).__init__(**kwargs)
		        self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')
		        self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')
		        self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')
		        self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')
		        self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')
		        self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)
		        self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')
		    def forward(self, x):
		        p1 = self.p1_1(x)
		        p2 = self.p2_2(self.p2_1(x))
		        p3 = self.p3_2(self.p3_1(x))
		        p4 = self.p4_2(self.p4_1(x))        
		        return nd.concat(p1, p2, p3, p4, dim=1)  # 在通道维上连结输出。
		b1 = nn.Sequential()
		b1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'),
		       nn.MaxPool2D(pool_size=3, strides=2, padding=1))
		b2 = nn.Sequential()
		b2.add(nn.Conv2D(64, kernel_size=1),
		       nn.Conv2D(192, kernel_size=3, padding=1),
		       nn.MaxPool2D(pool_size=3, strides=2, padding=1))
		b3 = nn.Sequential()
		b3.add(Inception(64, (96, 128), (16, 32), 32),
		       Inception(128, (128, 192), (32, 96), 64),
		       nn.MaxPool2D(pool_size=3, strides=2, padding=1))
		b4 = nn.Sequential()
		b4.add(Inception(192, (96, 208), (16, 48), 64),
		       Inception(160, (112, 224), (24, 64), 64),
		       Inception(128, (128, 256), (24, 64), 64),
		       Inception(112, (144, 288), (32, 64), 64),
		       Inception(256, (160, 320), (32, 128), 128),
		       nn.MaxPool2D(pool_size=3, strides=2, padding=1))
		b5 = nn.Sequential()
		b5.add(Inception(256, (160, 320), (32, 128), 128),
		       Inception(384, (192, 384), (48, 128), 128),
		       nn.GlobalAvgPool2D())
		net = nn.Sequential()
		net.add(b1, b2, b3, b4, b5, nn.Dense(10))
	elif model_type == 'resnet':
		class Residual(nn.Block):
		    def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
		        super(Residual, self).__init__(**kwargs)
		        self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,strides=strides)
		        self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
		        if use_1x1conv:
		            self.conv3 = nn.Conv2D(num_channels, kernel_size=1,strides=strides)
		        else:
		            self.conv3 = None
		        self.bn1 = nn.BatchNorm()
		        self.bn2 = nn.BatchNorm()
		    def forward(self, X):
		        Y = nd.relu(self.bn1(self.conv1(X)))
		        Y = self.bn2(self.conv2(Y))
		        if self.conv3:
		            X = self.conv3(X)
		        return nd.relu(Y + X)
		def resnet_block(num_channels, num_residuals, first_block=False):
		    blk = nn.Sequential()
		    for i in range(num_residuals):
		        if i == 0 and not first_block:
		            blk.add(Residual(num_channels, use_1x1conv=True, strides=2))
		        else:
		            blk.add(Residual(num_channels))
		    return blk
		net = nn.Sequential()
		net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),
		        nn.BatchNorm(), nn.Activation('relu'),
		        nn.MaxPool2D(pool_size=3, strides=2, padding=1))
		net.add(resnet_block(64, 2, first_block=True),
		        resnet_block(128, 2),
		        resnet_block(256, 2),
		        resnet_block(512, 2))
		net.add(nn.GlobalAvgPool2D(), nn.Dense(10))
	return net

# 载入数据及定义参数
ctx = gb.try_gpu()
num_epochs = 20
model_type = 'lenet'
if model_type == 'lenet': 
	lr = 0.8 
	batch_size = 128
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size)
if model_type == 'alexnet': 
	lr = 0.01
	batch_size = 128
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size,resize=224)
if model_type == 'vgg': 
	lr = 0.05
	batch_size = 64
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size,resize=224)
if model_type == 'nin': 
	lr = 0.1
	batch_size = 128
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size,resize=224)
if model_type == 'googlenet': 
	lr = 0.1
	batch_size = 128
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size,resize=96)
if model_type == 'resnet': 
	lr = 0.05
	batch_size = 256
	train_iter, test_iter = gb.load_data_fashion_mnist(batch_size=batch_size,resize=96)

# 模型训练
net = model(model_type)
net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())
for epoch in range(num_epochs):
    train_loss_sum, train_acc_sum, start = 0, 0, time.time()
    for X, y in train_iter:
        X, y  = X.as_in_context(ctx), y.as_in_context(ctx)
        with autograd.record():
            y_hat = net(X)
            loss = gluon.loss.SoftmaxCrossEntropyLoss()(y_hat, y)
        loss.backward()
        gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}).step(batch_size)
        train_loss_sum += loss.mean().asscalar()
        train_acc_sum += gb.accuracy(y_hat, y)
    test_acc = gb.evaluate_accuracy(test_iter, net, ctx)
    print('epoch %d, loss %.4f, train acc %.4f, test acc %.4f, time %.1f sec' 
          % (epoch + 1, 
             train_loss_sum/len(train_iter),
             train_acc_sum/len(train_iter),
             test_acc, 
             time.time()-start))

# 模型/参数保存和读取
net.save_parameters(params_path)    #将参数保存为params格式文件
net.load_parameters(params_path,ctx=ctx)    #从params格式文件读取参数
hybrid_net.export(path)    #将模型和参数分别保存为json和params格式文件;注:仅使用gluon.nn.HybridSequence()类定义的net可以导出模型
hybrid_net = gluon.nn.SymbolBlock.imports(json_path, ['data'], params_path, ctx=ctx)    #从json和params格式文件读取模型和参数

四、循环神经网络 (RNN/GRU/LSTM) (Mxnet实现)

TensorFlow, Mxnet, PyTorch: 要点总结及常见神经网络代码实现_第2张图片
LSTM (Long Short-Term Memory) 在 RNN (Recurrent Neutral Network) 的基础上进行变形,能够学习长期依赖关系,而 GRU (Gated Recurrent Unit) 是 LSTM 的简化版,运行更高效。通过在输出单元/层对隐藏状态进行合并,可实现双向设计。

# 导入所需模块
import math, time
from mxnet import autograd, nd, gluon
import gluonbook as gb 

# 定义功能区块
def to_onehot(X, size):
    return [nd.one_hot(x, size) for x in X.T]
def get_params():
    _one = lambda shape: nd.random.normal(scale=0.01, shape=shape, ctx=ctx)
    _three = lambda : (_one((num_inputs, num_hiddens)), 
                       _one((num_hiddens, num_hiddens)), 
                       nd.zeros(num_hiddens, ctx=ctx))   
    if model_type == 'rnn':
        W_xh = _one((num_inputs, num_hiddens))  #输入层参数
        W_hh = _one((num_hiddens, num_hiddens))  #隐藏层参数
        W_hy = _one((num_hiddens, num_outputs))  #输出层参数
        b_h = nd.zeros(num_hiddens, ctx=ctx)
        b_y = nd.zeros(num_outputs, ctx=ctx)
        params = [W_xh, W_hh, b_h, W_hy, b_y]
    elif model_type == 'gru': 
        W_xr, W_hr, b_r = _three()  #重置门参数。    
        W_xz, W_hz, b_z = _three()  #更新门参数。    
        W_xh, W_hh, b_h = _three()  #候选隐藏状态参数。
        W_hy = _one((num_hiddens, num_outputs))  #输出层参数。
        b_y = nd.zeros(num_outputs, ctx=ctx)
        params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hy, b_y]
    elif model_type == 'lstm':       
        W_xi, W_hi, b_i = _three()  #输入门参数。
        W_xf, W_hf, b_f = _three()  #遗忘门参数。
        W_xo, W_ho, b_o = _three()  #输出门参数。
        W_xc, W_hc, b_c = _three()  #候选细胞参数。
        W_hy = _one((num_hiddens, num_outputs))  #输出层参数。
        b_y = nd.zeros(num_outputs, ctx=ctx)
        params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hy, b_y]
    for param in params:
        param.attach_grad()
    return params
def init_state(batch_size, num_hiddens, ctx):
    if (model_type == 'rnn') | (model_type == 'gru'):
        return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx),)
    elif model_type == 'lstm':
        return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx), 
                nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx))
def model(inputs, state, params):
    if model_type == 'rnn':
        W_xh, W_hh, b_h, W_hy, b_y = params
        H, = state
        outputs = []
        for X in inputs:
            H = nd.tanh(nd.dot(X, W_xh) + nd.dot(H, W_hh) + b_h)
            Y = nd.dot(H, W_hy) + b_y
            outputs.append(Y)
        return outputs, (H,)
    elif model_type == 'gru':
        W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hy, b_y = params
        H, = state
        outputs = []
        for X in inputs:        
            Z = nd.sigmoid(nd.dot(X, W_xz) + nd.dot(H, W_hz) + b_z)
            R = nd.sigmoid(nd.dot(X, W_xr) + nd.dot(H, W_hr) + b_r)
            H_tilda = nd.tanh(nd.dot(X, W_xh) + R * nd.dot(H, W_hh) + b_h)
            H = Z * H + (1 - Z) * H_tilda
            Y = nd.dot(H, W_hy) + b_y
            outputs.append(Y)
        return outputs, (H,)
    elif model_type == 'lstm':
        [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,
         W_hy, b_y] = params
        (H, C) = state
        outputs = []
        for X in inputs:        
            I = nd.sigmoid(nd.dot(X, W_xi) + nd.dot(H, W_hi) + b_i)
            F = nd.sigmoid(nd.dot(X, W_xf) + nd.dot(H, W_hf) + b_f)
            O = nd.sigmoid(nd.dot(X, W_xo) + nd.dot(H, W_ho) + b_o)
            C_tilda = nd.tanh(nd.dot(X, W_xc) + nd.dot(H, W_hc) + b_c)
            C = F * C + I * C_tilda
            H = O * C.tanh()
            Y = nd.dot(H, W_hy) + b_y
            outputs.append(Y)
        return outputs, (H, C)
def predict(prefix, num_chars, model, params, init_state, m_hiddens, vocab_size, ctx, idx_to_char, char_to_idx):
    state = init_state(1, num_hiddens, ctx)
    output = [char_to_idx[prefix[0]]]
    for t in range(num_chars + len(prefix)):
        X = to_onehot(nd.array([output[-1]], ctx=ctx), vocab_size)  #将上一时间步的输出作为当前时间步的输入
        (Y, state) = model(X, state, params)  #计算输出和更新隐藏状态
        if t < len(prefix) - 1:  #下一个时间步的输入是 prefix 里的字符或者当前的最好预测字符。
            output.append(char_to_idx[prefix[t + 1]])
        else:
            output.append(int(Y[0].argmax(axis=1).asscalar()))
    return ''.join([idx_to_char[i] for i in output])
def grad_clipping(params, theta, ctx):
    norm = nd.array([0.0], ctx)
    for param in params:
        norm += (param.grad ** 2).sum()
    norm = norm.sqrt().asscalar()
    if norm > theta:
        for param in params:
            param.grad[:] *= theta / norm
def train_and_predict(model, get_params, init_state, num_hiddens, vocab_size, ctx, corpus_indices, 
                          idx_to_char, char_to_idx, is_random_iter, num_epochs, num_steps, lr, clipping_theta, 
                          batch_size, pred_period, pred_len, prefixes):
    if is_random_iter:
        data_iter_fn = gb.data_iter_random
    else:
        data_iter_fn = gb.data_iter_consecutive     
    params = get_params()
    loss = gluon.loss.SoftmaxCrossEntropyLoss()
    for epoch in range(num_epochs):
        if not is_random_iter:  #如使用相邻采样,在 epoch 开始时初始化隐藏变量
            state = init_state(batch_size, num_hiddens, ctx)
        loss_sum, start = 0.0, time.time()
        data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, ctx) 
        for t, (X, Y) in enumerate(data_iter): #t=10000/(batch_size*num_steps)个batches(时间步长),每一个batch包含(batch_size*num_steps)的X和Y
            if is_random_iter: #如使用随机采样,在每个小批量更新前初始化隐藏变量
                state = init_state(batch_size, num_hiddens, ctx)
            else:  #否则需要使用 detach 函数从计算图分离隐藏状态变量
                for s in state:
                    s.detach()
            with autograd.record():
                inputs = to_onehot(X, vocab_size)
                outputs, state = model(inputs, state, params)  #outputs有num_steps个形状为(batch_size,vocab_size)的矩阵
                outputs = nd.concat(*outputs, dim=0)  # 拼接之后形状为(num_steps*batch_size,vocab_size)
                y = Y.T.reshape((-1,))  #Y的形状是(batch_size,num_steps),转置后再变成长,batch*num_steps的向量,这样跟输出的行一一对应
                l = loss(outputs, y).mean()  #使用交叉熵损失计算平均分类误差
            l.backward()
            grad_clipping(params, clipping_theta, ctx)  #裁剪梯度后使用SGD更新权重
            for param in params:
            	param[:] = param - lr * param.grad
            gb.sgd(params, lr, 1)  #因为已经误差取过均值,梯度不用再做平均
            loss_sum += l.asscalar()
        if (epoch + 1) % pred_period == 0:
            print('\nEpoch %d, perplexity %4f, time %.2fs'  % (
                epoch + 1, math.exp(loss_sum / (t + 1)),
                     time.time() - start))
            for prefix in prefixes:
                print('-', predict(
                    prefix, pred_len, model, params, init_state,
                    num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx))   

# 载入数据
corpus_indices, char_to_idx, idx_to_char, vocab_size = gb.load_data_jay_lyrics()

# 定义参数
ctx = gb.try_gpu()
num_inputs = vocab_size
num_hiddens = 256
num_outputs = vocab_size
num_epochs = 200
num_steps = 35
batch_size = 32
lr = 100 
clipping_theta = 0.01
prefixes = ['分开', '不分开']
pred_period = 5
pred_len = 25

# 模型训练
model_type = 'lstm'
train_and_predict(model, get_params, init_state, num_hiddens, vocab_size, ctx, corpus_indices, 
                  idx_to_char, char_to_idx, False, num_epochs, num_steps, lr, clipping_theta, 
                  batch_size, pred_period, pred_len, prefixes)
               
# 模型训练(调库)
from mxnet.gluon import rnn
model = gb.RNNModel(rnn.LSTM(num_hiddens), vocab_size)
gb.train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx, corpus_indices, idx_to_char, 
                               char_to_idx, num_epochs, num_steps, lr, clipping_theta, batch_size, 
                               pred_period, pred_len, prefixes) 

五、对抗式生成网络 (DCGAN) (Mxnet实现)

GAN (Generative Adversarial Network) 更多的是一种设计模型的理念,而不是特定的模型架构,通常由两个或两个以上的深度神经网络组合而成,网络类型由特定任务决定,如任务目标为生成图形时使用 CNN,生成语音时使用 MLP 或 RNN。根据理念,GAN 模型中神经网络分为两部分,一部分 (generator) 生成特定目标 (object),而另一部分 (discriminator) 用于辨别该目标真伪。一方以将生成目标以假乱真为目的尽可能逼近真实目标,而另一方尽可能去识别它的真伪,两方形成对抗,使得最终生成的结果极具真实性。也因此,不同于上文中的任何一种网络架构,模型需要同时对两个神经网络进行反向回馈和梯度更新。

基础的 GAN 框架同样具有多种变形形式,包括 Conditional GAN, InfoGan, pixel2pixel GAN。Conditional GAN 通过在生成器和辨别器的输入端同时增加目标分类标签,使得训练后的模型生成器能生成指定标签的目标。InfoGan 引用了 Conditional GAN 的思想,通过在 GAN 中引入潜在码 (latent code),训练时最大化潜在码与生成目标的互信息,使得生成器不仅能依据开发者需求向特定方向生成目标,同时能使辨别器识别给定目标在潜在码指定的特征上的相符程度(相似度检测)。pixel2pixel GAN 应用于图像任务,一改前三者输入随机数输出图像的模式,改为输入图像并输出图像,常用于给图像上色或抽取图像特征等图像翻译工作。

以下代码列示用于图像生成的一种基础 GAN 模型,又名 DCGAN (Deep Convolutional GAN),模型训练完成以后,可使用任意一个固定长度的随机向量生成一幅如下图所示的图形:
TensorFlow, Mxnet, PyTorch: 要点总结及常见神经网络代码实现_第3张图片

# 载入所需模块
import os
import numpy as np
import tarfile
import logging
import matplotlib.image as mpimg
import matplotlib as mpl
from matplotlib import pyplot as plt
import mxnet as mx
from mxnet import gluon
from mxnet import ndarray as nd
from mxnet.gluon import nn, utils
from mxnet import autograd
import gluonbook as gb

# 搭建网络架构
def model():
    nc, ngf = 3, 64
    netG = nn.Sequential()
    with netG.name_scope():
        netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
        netG.add(nn.BatchNorm())
        netG.add(nn.Activation('relu'))
        netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
        netG.add(nn.BatchNorm())
        netG.add(nn.Activation('relu'))
        netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
        netG.add(nn.BatchNorm())
        netG.add(nn.Activation('relu'))
        netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
        netG.add(nn.BatchNorm())
        netG.add(nn.Activation('relu'))
        netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
        netG.add(nn.Activation('tanh'))
    ndf = 64
    netD = nn.Sequential()
    with netD.name_scope():
        netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
        netD.add(nn.LeakyReLU(0.2))
        netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        netD.add(nn.Conv2D(1, 4, 1, 0, use_bias=False))
    return netG, netD

# 定义参数
epochs = 5
batch_size = 64
latent_z_size = 100
ctx = gb.try_gpu()
lr = 0.0002
beta1 = 0.5

# 载入数据
lfw_url = 'http://vis-www.cs.umass.edu/lfw/lfw-deepfunneled.tgz'
data_path = 'lfw-deepfunneled'
if os.path.exists(data_path):
    with tarfile.open(data_path+'.gz') as tar:
        tar.extractall(path=os.getcwd())
else:
    data_file = utils.download(lfw_url)
    with tarfile.open(data_file) as tar:
        tar.extractall(path=os.getcwd())
target_wd = 64
target_ht = 64
img_list = []
def transform(data, target_wd, target_ht):
    data = mx.image.imresize(data, target_wd, target_ht)
    data = nd.transpose(data, (2,0,1))
    data = data.astype(np.float32)/127.5 - 1
    if data.shape[0] == 1:
        data = nd.tile(data, (3, 1, 1))
    return data.reshape((1,) + data.shape)
for path, _, fnames in os.walk(data_path):
    for fname in fnames:
        if not fname.endswith('.jpg'):
            continue
        img = os.path.join(path, fname)
        img_arr = mx.image.imread(img)
        img_arr = transform(img_arr, target_wd, target_ht)
        img_list.append(img_arr)
train_data = mx.io.NDArrayIter(data=nd.concatenate(img_list), batch_size=batch_size)
real_label = nd.ones((batch_size,), ctx=ctx)
fake_label = nd.zeros((batch_size,),ctx=ctx)

# 模型训练
netG,netD = model()
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': lr, 'beta1': beta1})
loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
def facc(label, pred):
    pred = pred.ravel()
    label = label.ravel()
    return ((pred > 0.5) == label).mean()
metric = mx.metric.CustomMetric(facc)
logging.basicConfig(level=logging.DEBUG)
for epoch in range(epochs):
    train_data.reset()
    i = 0
    for batch in train_data:
        data = batch.data[0].as_in_context(ctx)
        latent_z = mx.nd.random_normal(0, 1, shape=(batch_size, latent_z_size, 1, 1), ctx=ctx)
        with autograd.record():
            output = netD(data).reshape((-1, 1))
            errD_real = loss(output, real_label)
            metric.update([real_label,], [output,])
            fake = netG(latent_z)
            output = netD(fake.detach()).reshape((-1, 1))
            errD_fake = loss(output, fake_label)
            errD = errD_real + errD_fake
            errD.backward()
            metric.update([fake_label,], [output,])
        trainerD.step(batch.data[0].shape[0])
        with autograd.record():
            fake = netG(latent_z)
            output = netD(fake).reshape((-1, 1))
            errG = loss(output, real_label)
            errG.backward()
        trainerG.step(batch.data[0].shape[0])
        if i % 100 == 0:
            name, acc = metric.get()
            logging.info(' epoch %d, batch %d, D_loss = %f, G_loss = %f, train acc = %f' 
                         %(epoch, i, nd.mean(errD).asscalar(), nd.mean(errG).asscalar(), acc))
        i += 1
    name, acc = metric.get()
    metric.reset()
    fake_img = fake[0]
	plt.imshow(((fake_img.asnumpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8))
    plt.show()

六、区域卷积神经网络 (SSD) (Mxnet实现)

TensorFlow, Mxnet, PyTorch: 要点总结及常见神经网络代码实现_第4张图片
区域卷积神经网络的创造是现代图像目标检测技术的起源,其中 R-CNN 是奠基之作,通过对同一图像抽取几千甚至几万个候选矩形区域进行分类,从而达到物体检测的目的。Fast R-CNN 通过将提取候选区域这一步骤用神经网络 (后接一个兴趣区域池化层, ROI, Region of Interest Pooling) 代替,从而大量节省模型训练时间。Faster R-CNN 对前两者进行取长补短,引入区域建议网络 (RPN),通过设立锚点和锚框来确定有限而有效的候选区域。Mask R-CNN 在 Faster R-CNN 的基础上加入了全连接卷积网络 (FCN),使得目标检测可以细致到具体的像素点,这一技术最行之有效的应用是图像语义分割。SSD (单发多框检测器) 将区域建议和分类合并在一处操作,从而大大加快模型速度。YOLO (你只看一次) 通过将图片特征均匀切割以解决以上模型大量区域重复计算的弊端。以下代码呈现 SSD 的实现,最终效果如图所示:

TensorFlow, Mxnet, PyTorch: 要点总结及常见神经网络代码实现_第5张图片

# 导入所需模块    
import sys,time
import numpy as np
import matplotlib.pyplot as plt
from mxnet import autograd, contrib, gluon, image, init, nd
from mxnet.gluon import loss as gloss, nn
import gluonbook as gb

# 定义功能区块
def cls_predictor(num_anchors, num_classes):
    return nn.Conv2D(num_anchors * (num_classes + 1), kernel_size=3,padding=1)
def bbox_predictor(num_anchors):
    return nn.Conv2D(num_anchors * 4, kernel_size=3, padding=1)
def forward(x, block):
    block.initialize()
    return block(x)
def flatten_pred(pred):
    return pred.transpose(axes=(0, 2, 3, 1)).flatten()
def concat_preds(preds):
    return nd.concat(*[flatten_pred(p) for p in preds], dim=1)
def down_sample_blk(num_filters):
    blk = nn.HybridSequential()
    for _ in range(2):
        blk.add(nn.Conv2D(num_filters, kernel_size=3, padding=1),
                nn.BatchNorm(in_channels=num_filters),
                nn.Activation('relu'))
    blk.add(nn.MaxPool2D(2))
    blk.hybridize()
    return blk
def body_blk():
    blk = nn.HybridSequential()
    for num_filters in [16, 32, 64]:
        blk.add(down_sample_blk(num_filters))
    return blk
def get_blk(i):
    if i == 0:
        blk = body_blk()
    elif i == 4:
        blk = nn.GlobalMaxPool2D()
    else:
        blk = down_sample_blk(128)
    return blk
def single_scale_forward(x, blk, size, ratio, cls_predictor, bbox_predictor):
    y = blk(x)
    anchor = contrib.ndarray.MultiBoxPrior(y, sizes=size, ratios=ratio)
    cls_pred = cls_predictor(y)
    bbox_pred = bbox_predictor(y)
    return (y, anchor, cls_pred, bbox_pred)
def calc_loss(cls_preds, cls_labels, bbox_preds, bbox_labels, bbox_masks):
    cls = cls_loss(cls_preds, cls_labels)
    bbox = bbox_loss(bbox_preds * bbox_masks, bbox_labels * bbox_masks)
    return cls + bbox
def cls_metric(cls_preds, cls_labels):
    return (cls_preds.argmax(axis=-1) == cls_labels).mean().asscalar()
def bbox_metric(bbox_preds, bbox_labels, bbox_masks):
    return (bbox_labels - bbox_preds * bbox_masks).abs().mean().asscalar()
def predict(x):
    anchors, cls_preds, bbox_preds = net(x.as_in_context(ctx))
    cls_probs = cls_preds.softmax().transpose((0, 2, 1))
    out = contrib.nd.MultiBoxDetection(cls_probs, bbox_preds, anchors)
    idx = [i for i, row in enumerate(out[0]) if row[0].asscalar() != -1]
    return out[:, idx]
def display(x,out):
    img = ((x.transpose((0, 2, 3, 1))).clip(0, 254) / 254)[0]
    ax = plt.subplot(111)
    ax.imshow(img.asnumpy())
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    row = out[0]
    if row.shape[0] == 5:
        gb.show_bboxes(ax, [row[1:]*img.shape[0]], colors=['yellow'])
    else:
        score = row[1].asscalar()
        gb.show_bboxes(ax, [row[2:]*img.shape[0]], '%.2f'%score, colors=['yellow'])
    plt.show()
class SSD(nn.Block):
    def __init__(self, num_classes, verbose=False, **kwargs):
        super(SSD, self).__init__(**kwargs)
        self.num_classes = num_classes
        for i in range(5):
            setattr(self, 'blk_%d'%i, get_blk(i))
            setattr(self, 'cls_%d'%i, cls_predictor(num_anchors,num_classes))
            setattr(self, 'bbox_%d'%i, bbox_predictor(num_anchors))
    def forward(self, x):
        anchors, cls_preds, bbox_preds = [None]*5, [None]*5, [None]*5
        for i in range(5):
            x, anchors[i], cls_preds[i], bbox_preds[i] = single_scale_forward(
                x, getattr(self, 'blk_%d' % i), sizes[i], ratios[i],
                getattr(self, 'cls_%d' % i), getattr(self, 'bbox_%d' % i))
        return (nd.concat(*anchors, dim=1),
                		  concat_preds(cls_preds).reshape((0, -1, self.num_classes + 1)),
                   		  concat_preds(bbox_preds))
# 定义参数
ctx = gb.try_gpu()
num_anchors = 4
sizes = [[0.2, 0.272], [0.37, 0.447], [0.54, 0.619], [0.71, 0.79], [0.88, 0.961]]
ratios = [[1, 2, 0.5]] * 5
batch_size = 32

# 载入数据
train_data, test_data = gb.load_data_pikachu(batch_size)
train_data.reshape(label_shape=(3, 5))

# 模型训练
net = SSD(num_classes=2)
net.initialize(init=init.Xavier(), ctx=ctx)
trainer = gluon.Trainer(net.collect_params(),'sgd', {'learning_rate': 0.1, 'wd': 5e-4})
cls_loss = gloss.SoftmaxCrossEntropyLoss()
bbox_loss = gloss.L1Loss()
for epoch in range(5):
    acc, mae = 0, 0
    train_data.reset()
    tic = time.time()
    for i, batch in enumerate(train_data):
        X_train = batch.data[0].as_in_context(ctx)
        Y_train = batch.label[0].as_in_context(ctx)
        with autograd.record():
            anchors, cls_preds, bbox_preds = net(X_train)
            bbox_labels, bbox_masks, cls_labels = contrib.nd.MultiBoxTarget(anchors, Y_train, cls_preds.transpose(axes=(0,2,1)))
            l = calc_loss(cls_preds, cls_labels, bbox_preds, bbox_labels, bbox_masks)
        l.backward()
        trainer.step(batch_size)
        acc += cls_metric(cls_preds, cls_labels)
        mae += bbox_metric(bbox_preds, bbox_labels, bbox_masks)
        if (i+1) % 10 == 0:
            print('epoch %d, batch %d, class err %.6f, bbox MAE %.6f, time %.1f sec' % (
                epoch, i+1, 1 - acc / (i + 1), mae / (i + 1), time.time() - tic))
            tic = time.time()
        if (i+1) % 50 == 0:
            X_test = nd.zeros(1)
            while X_test.sum().asscalar() == 0:
                batch = test_data.next()
                idx = np.int(nd.random_uniform(1,batch_size).asnumpy().tolist())
                X_test = batch.data[0].as_in_context(ctx)[idx].expand_dims(axis=0)
            out = predict(X_test)[0]
            display(X_test,out)

你可能感兴趣的:(深度学习)