引入
- GPT-2 是一种基于 transformer 的大型语言模型,是一个经典的文本生成模型
- 该模型可以生成连贯的文本段落,完成阅读理解、问答、机器翻译等多项不同的语言建模任务
- 本次就介绍一下如何使用Paddle2.0构建一个经典的文本生成模型GPT-2
环境设置
import math
import paddle
import paddle.nn as nn
print('paddle version: %s' % paddle.__version__)
paddle version: 2.0.0-rc0
模型架构
- GPT-2是一个单向的语言模型,由前后的Embedding层和Transformer Encoder两部分组成
- 其中Transformer Encoder是由多层的Multi-Head-Self-Attention Layer+ MLP Layer堆叠而成的
构建 Multi-Head-Self-Attention Layer
- 首先从最基础的 Layer 开始构建
- 从名字就能看出这个层的特点:Multi-Head——多头、Self-Attention——自注意力
class Attention(nn.Layer):
def __init__(self,
embedding_size,
num_attention_heads,
attention_dropout,
residual_dropout):
super(Attention, self).__init__()
self.num_attention_heads = num_attention_heads
self.size_per_head = embedding_size // num_attention_heads
self.embedding_size = embedding_size
self.query_key_value = nn.Linear(embedding_size, embedding_size * 3)
self.attn_drop = nn.Dropout(attention_dropout)
self.resid_drop = nn.Dropout(residual_dropout)
self.dense = nn.Linear(embedding_size, embedding_size)
def split_heads(self, x):
x = x.reshape([-1, self.seq_len, self.num_attention_heads, self.size_per_head])
return x.transpose((0, 2, 1, 3))
def forward(self, x, kv_cache=None):
self.seq_len = x.shape[1]
x = self.query_key_value(x)
q, k, v = x.split(num_or_sections=3, axis=2)
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
if kv_cache is not None:
pk, pv = paddle.unstack(kv_cache, axis=1)
k = paddle.concat([pk, k], axis=-2)
v = paddle.concat([pv, v], axis=-2)
cached_kv = paddle.stack([k, v], axis=1)
attn = paddle.matmul(q, k, transpose_y=True)
attn = attn / math.sqrt(self.size_per_head)
attention_mask = paddle.tril(paddle.ones([self.seq_len, self.seq_len], 'float32'))
attention_mask = attention_mask.reshape([1, 1, self.seq_len, self.seq_len])
attn = attn * attention_mask - 10000.0 * (1.0 - attention_mask)
attn = nn.Softmax(axis=-1)(attn)
attn = self.attn_drop(attn)
y = paddle.matmul(attn, v)
y = y.transpose((0, 2, 1, 3))
y = paddle.reshape(y, [-1, self.seq_len, self.embedding_size])
y = self.resid_drop(self.dense(y))
return y, cached_kv
构建 MLP Layer
- 一个简单的两层全连接网络
- 使用Gelu作为第一个全连接层的激活函数
class MLP(nn.Layer):
def __init__(self, embedding_size):
super(MLP, self).__init__()
self.dense_h_to_4h = nn.Linear(embedding_size, embedding_size*4)
self.dense_4h_to_h = nn.Linear(embedding_size*4, embedding_size)
self.act = nn.functional.gelu
def forward(self, x):
h = self.act(self.dense_h_to_4h(x))
h2 = self.dense_4h_to_h(h)
return h2
构建 Attention + MLP 模块
- 将 Attention 和 MLP 拼成一个模块,并在其中加入一些 LayerNorm 层
class Block(nn.Layer):
def __init__(self,
embedding_size,
num_attention_heads,
attention_dropout,
residual_dropout):
super(Block, self).__init__()
self.input_layernorm = nn.LayerNorm(embedding_size, epsilon=1e-5)
self.attention = Attention(embedding_size, num_attention_heads, attention_dropout, residual_dropout)
self.post_attention_layernorm = nn.LayerNorm(embedding_size, epsilon=1e-5)
self.mlp = MLP(embedding_size)
def forward(self, x, kv_cache=None):
attn, cached_kv = self.attention(self.input_layernorm(x), kv_cache=kv_cache)
x = x + attn
z = self.post_attention_layernorm(x)
z = self.mlp(z)
x = x + z
return x, cached_kv
构建 Transformer Encoder
- 堆叠多层上述的模块加上最终的 LayerNorm 组成 Transformer Encoder
class Transformer(nn.Layer):
def __init__(self,
layer_size,
embedding_size,
num_attention_heads,
attention_dropout,
residual_dropout):
super(Transformer, self).__init__()
self.layers = nn.LayerList([Block(
embedding_size,
num_attention_heads,
attention_dropout,
residual_dropout)
for _ in range(layer_size)])
self.final_layernorm = nn.LayerNorm(embedding_size, epsilon=1e-5)
def forward(self, x, kv_cache=None):
cached_kvs = []
for i, layer in enumerate(self.layers):
x, cached_kv = layer(
x,
kv_cache=kv_cache[i] if kv_cache is not None else None)
cached_kvs.append(cached_kv)
x = self.final_layernorm(x)
return x, paddle.stack(cached_kvs)
构建GPT-2 Model
- 将所有需要的 Embedding 层和 Transformer Encoder 组合起来就完成了 GPT-2 Model 的构建
class GPT2Model(nn.Layer):
def __init__(self,
vocab_size,
layer_size,
block_size,
embedding_dropout,
embedding_size,
num_attention_heads,
attention_dropout,
residual_dropout):
super(GPT2Model, self).__init__()
self.word_embeddings = nn.Embedding(vocab_size, embedding_size)
self.position_embeddings = nn.Embedding(block_size, embedding_size)
self.emb_drop = nn.Dropout(embedding_dropout)
self.transformer = Transformer(
layer_size,
embedding_size,
num_attention_heads,
attention_dropout,
residual_dropout)
def forward(self, x, kv_cache=None, use_cache=False):
if kv_cache is None:
past_length = 0
else:
past_length = kv_cache[0][0].shape[-2]
position_ids = paddle.arange(past_length, x.shape[-1] + past_length, dtype='int64')
position_ids = position_ids.unsqueeze(0).expand_as(x)
x = self.word_embeddings(x)
x = self.emb_drop(x + self.position_embeddings(position_ids))
x, cached_kvs = self.transformer(x, kv_cache)
x = paddle.matmul(x, self.word_embeddings.weight, transpose_y=True)
if use_cache:
return x, cached_kvs
return x
模型测试
- 至此模型算是构建完毕
- 接下来就是测试这个模型能否正常进行前向计算
model = GPT2Model(
vocab_size=30000,
layer_size=2,
block_size=1024,
embedding_dropout=0.0,
embedding_size=2560,
num_attention_heads=32,
attention_dropout=0.0,
residual_dropout=0.0)
model.eval()
out = model(paddle.ones([1,1], 'int64'))
print(out.shape)
out, cached_kvs = model(paddle.ones([1,1], 'int64'), use_cache=True)
print(out.shape, cached_kvs.shape)
out, cached_kvs = model(paddle.ones([1,1], 'int64'), paddle.randn([2, 1, 2, 32, 1, 80], 'float32'), use_cache=True)
print(out.shape, cached_kvs.shape)
[1, 1, 30000]
[1, 1, 30000] [2, 1, 2, 32, 1, 80]
[1, 1, 30000] [2, 1, 2, 32, 2, 80]
总结
- 这样一个经典的文本生成模型GPT-2就构建完成了
- 由于GPT-2的参数量巨大,需要的训练数据量也是巨大,所有这里就暂时不介绍如何进行模型训练了
- 不过可以尝试加载已经训练完成的大型预训练模型来实现一些有趣的文本生成的简单应用
- 具体可以参考我的另一篇文章:使用GPT-2加载CPM模型实现简单的问答机器人