nn.LSTM
and nn.GRU
layers本文将展示此练习的结果分析及相关代码。
本次的练习是PyTorch tutorial中,关于名字->国家分类的事例程序。为了测试模型的效果,我根据原文的数据展示,编写了随机选择10000次名字输出正确率的文件evaluting。原代码虽然模型简单,但是效果比较好,在evaluting下可以达到60%的正确率(hidden size=32)。为了加快训练,同时公平地对各模型都不进行调优,本文比较各模型在hidden size=32下的效果。
原始模型(正确率:55.35%):
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return Variable(torch.zeros(1, self.hidden_size))
加一层线性层在输入层(正确率:56.90%):
class RNNMoreLinear(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNNMoreLinear, self).__init__()
self.hidden_size = hidden_size
self.input_linear = nn.Linear(input_size, input_size)
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
hidden_i = self.input_linear(input)
combined = torch.cat((hidden_i, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return Variable(torch.zeros(1, self.hidden_size))
使用GRU(正确率:54.96%):
class RNN_GRU(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN_GRU, self).__init__()
self.hidden_size = hidden_size
self.gru_layers = 2
self.gru = nn.GRU(input_size, hidden_size, self.gru_layers)
self.i2o = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
output, hidden = self.gru(input.view(1 , 1, -1), hidden)
output = self.softmax(self.i2o(output.view(1, -1)))
return output, hidden
def initHidden(self):
return Variable(torch.zeros(self.gru_layers, 1, self.hidden_size))
对原始模型层叠累加(正确率: 47.69%):
class RNN_GRU(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN_GRU, self).__init__()
self.hidden_size = hidden_size
self.gru_layers = 2
self.gru = nn.GRU(input_size, hidden_size, self.gru_layers)
self.i2o = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
output, hidden = self.gru(input.view(1 , 1, -1), hidden)
output = self.softmax(self.i2o(output.view(1, -1)))
return output, hidden
def initHidden(self):
return Variable(torch.zeros(self.gru_layers, 1, self.hidden_size))
从最终结果来看,此处列举的几个结构效果都没有原文中参数下的60%正确率效果好。在hidden size=32下,原始网络的结果比较高,加一个线性层对结果有改善;而采用gru和多层RNN的效果均有所下降。其中,gru结果下降可能是参数设置问题,多层RNN结果下降可能是由于原RNN的输出经过softmax,叠加时特征提取效果不佳。
最后,附上测试代码(根据原文修改):
import torch
from torch.autograd import Variable
from data import *
import random
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
model_save_name = 'char-rnn-classification-MultiRNN.pt'
rnn = torch.load(model_save_name)
def categoryFromOutput(output):
top_n, top_i = output.data.topk(1) # Tensor out of Variable with .data
category_i = top_i[0][0]
return all_categories[category_i], category_i
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingPair():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = Variable(torch.LongTensor([all_categories.index(category)]))
line_tensor = Variable(lineToTensor(line))
return category, line, category_tensor, line_tensor
def randomTrainingExample():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = Variable(torch.LongTensor([all_categories.index(category)]))
line_tensor = Variable(lineToTensor(line))
return category, line, category_tensor, line_tensor
# Keep track of correct guesses in a confusion matrix
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
# Just return an output given a line
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
# Go through a bunch of examples and record which are correctly guessed
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomTrainingExample()
output = evaluate(line_tensor)
guess, guess_i = categoryFromOutput(output)
category_i = all_categories.index(category)
confusion[category_i][guess_i] += 1
correct = 0
for i in range(n_categories):
correct += confusion[i][i]
correct /= n_confusion
print("accuracy of {} random samples: {}".format(n_confusion, correct))
# Normalize by dividing every row by its sum
for i in range(n_categories):
confusion[i] = confusion[i] / confusion[i].sum()
# Set up plot
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
# Force label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# sphinx_gallery_thumbnail_number = 2
plt.show()