大多数常见的 sequence-to-sequence (seq2seq) model 为 encoder-decoder model,主要由两个部分组成,分别是 Encoder 和 Decoder,而这两个部分大多数是由 recurrent neural network (RNN) 实现。
Encoder 是将一连串的输入,如文字、影片、声音讯号等,编码为单个向量,这个向量可以想像为整个输入的抽象表示,包含了整个输入的资讯。
Decoder 是將 Encoder 输出的向量进行逐步解码,一次输出一个结果,直到将最终的目标全部输出为止,每次输出会影响下一个输出,一般会在开始输入 < BOS >
来表示开始解码,会在结尾出输出 < EOS >
来表示解码结束。
首先要做的是下载资料,主要是用来下载本次任务需要的数据集
!gdown --id '1r4px0i-NcrnXy1-tkBsIwvYwbWnxAhcg' --output data.tar.gz
!tar -zxvf data.tar.gz
!mkdir ckpt
!ls
之后导入需要用到的包(如果nltk
包没有下载的话,可使用第一段代码进行下载)
!pip3 install --user nltk
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.utils.data as data
import torch.utils.data.sampler as sampler
import torchvision
from torchvision import datasets, transforms
import numpy as np
import sys
import os
import random
import json
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 判斷是用 CPU 還是 GPU 執行運算
需要注意的是,不同的句子往往有着不同的长度,这无疑给训练带来了不小的麻烦(因为 RNN 的输入维度要进行相应的改变)。为了解决这个麻烦,我们使用
长度较短的句子进行填充。因此这里定义一个长度转换的类
import numpy as np
class LabelTransform(object):
def __init__(self, size, pad):
self.size = size
self.pad = pad
def __call__(self, label):
label = np.pad(label, (0, (self.size - label.shape[0])), mode='constant', constant_values=self.pad)
return label
下一步就是数据的准备了,我们定义一个Dataset。
Data (出自manythings 的 cmn-eng):
资料预处理:
import re
import json
class EN2CNDataset(data.Dataset):
def __init__(self, root, max_output_len, set_name):
self.root = root
self.word2int_cn, self.int2word_cn = self.get_dictionary('cn')
self.word2int_en, self.int2word_en = self.get_dictionary('en')
# 载入资料
self.data = []
with open(os.path.join(self.root, f'{set_name}.txt'), "r") as f:
for line in f:
self.data.append(line)
print (f'{set_name} dataset size: {len(self.data)}')
self.cn_vocab_size = len(self.word2int_cn)
self.en_vocab_size = len(self.word2int_en)
self.transform = LabelTransform(max_output_len, self.word2int_en['' ])
def get_dictionary(self, language):
# 载入字典
with open(os.path.join(self.root, f'word2int_{language}.json'), "r") as f:
word2int = json.load(f)
with open(os.path.join(self.root, f'int2word_{language}.json'), "r") as f:
int2word = json.load(f)
return word2int, int2word
def __len__(self):
return len(self.data)
def __getitem__(self, Index):
# 先将中英文词分开
sentences = self.data[Index]
sentences = re.split('[\t\n]', sentences)
sentences = list(filter(None, sentences))
#print (sentences)
assert len(sentences) == 2
# 特殊字元
BOS = self.word2int_en['' ]
EOS = self.word2int_en['' ]
UNK = self.word2int_en['' ]
# 在开头添加 ,在结尾添加 ,不在字典的 subword (词) 用 取代
en, cn = [BOS], [BOS]
# 将句子拆解为 subword 并转为整数
sentence = re.split(' ', sentences[0])
sentence = list(filter(None, sentence))
#print (f'en: {sentence}')
for word in sentence:
en.append(self.word2int_en.get(word, UNK))
en.append(EOS)
# 将句子拆解为 subword 并转为整数
# e.g. < BOS >, we, are, friends, < EOS > --> 1, 28, 29, 205, 2
sentence = re.split(' ', sentences[1])
sentence = list(filter(None, sentence))
#print (f'cn: {sentence}')
for word in sentence:
cn.append(self.word2int_cn.get(word, UNK))
cn.append(EOS)
en, cn = np.asarray(en), np.asarray(cn)
# 用 將将句子拓展到相同长度
en, cn = self.transform(en), self.transform(cn)
en, cn = torch.LongTensor(en), torch.LongTensor(cn)
return en, cn
接下来就是构建自己的模型
Encoder
class Encoder(nn.Module):
def __init__(self, en_vocab_size, emb_dim, hid_dim, n_layers, dropout):
super().__init__()
self.embedding = nn.Embedding(en_vocab_size, emb_dim)
self.hid_dim = hid_dim
self.n_layers = n_layers
self.rnn = nn.GRU(emb_dim, hid_dim, n_layers, dropout=dropout, batch_first=True, bidirectional=True)
self.dropout = nn.Dropout(dropout)
def forward(self, input):
# input = [batch size, sequence len, vocab size]
embedding = self.embedding(input)
outputs, hidden = self.rnn(self.dropout(embedding))
# outputs = [batch size, sequence len, hid dim * directions]
# hidden = [num_layers * directions, batch size , hid dim]
# outputs 是最上层RNN的输出
return outputs, hidden
Decoder
Decoder 是另一个 RNN,在最简单的 seq2seq decoder 中,仅使用 Encoder 对每一层最后的隐藏状态来进行解码,而这最好的的隐藏状态有些被称为 “content vector”,因为可以想象它对整个前文序列进行了编码, 此 “content vector” 用作 Decoder 的初始隐藏状态, 而 Encoder 的输出通常用于 Attention Mechanism 产生相应的 Attention。
参数
Decoder 的输入和输出:
class Decoder(nn.Module):
def __init__(self, cn_vocab_size, emb_dim, hid_dim, n_layers, dropout, isatt):
super().__init__()
self.cn_vocab_size = cn_vocab_size
self.hid_dim = hid_dim * 2
self.n_layers = n_layers
self.embedding = nn.Embedding(cn_vocab_size, config.emb_dim)
self.isatt = isatt
self.attention = Attention(hid_dim)
# 如果使用 Attention Mechanism 會使得輸入維度變化,請在這裡修改
# e.g. Attention 接在輸入後面會使得維度變化,所以輸入維度改為
# self.input_dim = emb_dim + hid_dim * 2 if isatt else emb_dim
self.input_dim = emb_dim
self.rnn = nn.GRU(self.input_dim, self.hid_dim, self.n_layers, dropout = dropout, batch_first=True)
self.embedding2vocab1 = nn.Linear(self.hid_dim, self.hid_dim * 2)
self.embedding2vocab2 = nn.Linear(self.hid_dim * 2, self.hid_dim * 4)
self.embedding2vocab3 = nn.Linear(self.hid_dim * 4, self.cn_vocab_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, encoder_outputs):
# input = [batch size, vocab size]
# hidden = [batch size, n layers * directions, hid dim]
# Decoder 只會是單向,所以 directions=1
input = input.unsqueeze(1)
embedded = self.dropout(self.embedding(input))
# embedded = [batch size, 1, emb dim]
if self.isatt:
attn = self.attention(encoder_outputs, hidden)
# TODO: 在這裡決定如何使用 Attention,e.g. 相加 或是 接在後面, 請注意維度變化
output, hidden = self.rnn(embedded, hidden)
# output = [batch size, 1, hid dim]
# hidden = [num_layers, batch size, hid dim]
# 將 RNN 的輸出轉為每個詞出現的機率
output = self.embedding2vocab1(output.squeeze(1))
output = self.embedding2vocab2(output)
prediction = self.embedding2vocab3(output)
# prediction = [batch size, vocab size]
return prediction, hidden
Attention
当输入过长时,或是单独靠 “content vector” 无法获取整个输入的意思时,用 Attention Mechanism 来提供 Decoder 更多的资讯
主要是根据现在 Decoder hidden state ,去计算在 Encoder outputs 中,那些与其有较高的关系,根据关系的数值来决定传给 Decoder 哪些额外的资讯
常见 Attention 的操作是用 Neural Network / Dot Product 来计算 Decoder hidden state 和 Encoder outputs 之间的关系,再对所有算出來的数值做 softmax ,最后根据过完 softmax 的值对 Encoder outputs 做 weight sum
李宏毅老师的课程在此处并没有给出具体的代码,需要大家自己补充。大家可以参考这篇文章 Seq2Seq (Attention) 的 PyTorch 实现 或者B站的视频 PyTorch35——基于注意力机制的Seq2Seq的PyTorch实现示例。
class Attention(nn.Module):
def __init__(self, hid_dim):
super(Attention, self).__init__()
self.hid_dim = hid_dim
def forward(self, encoder_outputs, decoder_hidden):
# encoder_outputs = [batch size, sequence len, hid dim * directions]
# decoder_hidden = [num_layers, batch size, hid dim]
# 一般來說是取最後一層的 hidden state 來做 attention
########
# TODO #
########
attention=None
return attention
Seq2seq模型
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.n_layers == decoder.n_layers, \
"Encoder and decoder must have equal number of layers!"
def forward(self, input, target, teacher_forcing_ratio):
# input = [batch size, input len, vocab size]
# target = [batch size, target len, vocab size]
# teacher_forcing_ratio 是有多少機率使用正確答案來訓練
batch_size = target.shape[0]
target_len = target.shape[1]
vocab_size = self.decoder.cn_vocab_size
# 準備一個儲存空間來儲存輸出
outputs = torch.zeros(batch_size, target_len, vocab_size).to(self.device)
# 將輸入放入 Encoder
encoder_outputs, hidden = self.encoder(input)
# Encoder 最後的隱藏層(hidden state) 用來初始化 Decoder
# encoder_outputs 主要是使用在 Attention
# 因為 Encoder 是雙向的RNN,所以需要將同一層兩個方向的 hidden state 接在一起
# hidden = [num_layers * directions, batch size , hid dim] --> [num_layers, directions, batch size , hid dim]
hidden = hidden.view(self.encoder.n_layers, 2, batch_size, -1)
hidden = torch.cat((hidden[:, -2, :, :], hidden[:, -1, :, :]), dim=2)
# 取的 token
input = target[:, 0]
preds = []
for t in range(1, target_len):
output, hidden = self.decoder(input, hidden, encoder_outputs)
outputs[:, t] = output
# 決定是否用正確答案來做訓練
teacher_force = random.random() <= teacher_forcing_ratio
# 取出機率最大的單詞
top1 = output.argmax(1)
# 如果是 teacher force 則用正解訓練,反之用自己預測的單詞做預測
input = target[:, t] if teacher_force and t < target_len else top1
preds.append(top1.unsqueeze(1))
preds = torch.cat(preds, 1)
return outputs, preds
def inference(self, input, target):
########
# TODO #
########
# 在這裡實施 Beam Search
# 此函式的 batch size = 1
# input = [batch size, input len, vocab size]
# target = [batch size, target len, vocab size]
batch_size = input.shape[0]
input_len = input.shape[1] # 取得最大字數
vocab_size = self.decoder.cn_vocab_size
# 準備一個儲存空間來儲存輸出
outputs = torch.zeros(batch_size, input_len, vocab_size).to(self.device)
# 將輸入放入 Encoder
encoder_outputs, hidden = self.encoder(input)
# Encoder 最後的隱藏層(hidden state) 用來初始化 Decoder
# encoder_outputs 主要是使用在 Attention
# 因為 Encoder 是雙向的RNN,所以需要將同一層兩個方向的 hidden state 接在一起
# hidden = [num_layers * directions, batch size , hid dim] --> [num_layers, directions, batch size , hid dim]
hidden = hidden.view(self.encoder.n_layers, 2, batch_size, -1)
hidden = torch.cat((hidden[:, -2, :, :], hidden[:, -1, :, :]), dim=2)
# 取的 token
input = target[:, 0]
preds = []
for t in range(1, input_len):
output, hidden = self.decoder(input, hidden, encoder_outputs)
# 將預測結果存起來
outputs[:, t] = output
# 取出機率最大的單詞
top1 = output.argmax(1)
input = top1
preds.append(top1.unsqueeze(1))
preds = torch.cat(preds, 1)
return outputs, preds