transformer_多头注意力机制代码笔记

transformer_多头注意力机制代码笔记

以GPT-2中多头注意力机制代码为例

class CausalSelfAttention(nn.Module):
    """
    因果掩码+多头自注意力机制
    
    A vanilla multi-head masked self-attention layer with a projection at the end.
    It is possible to use torch.nn.MultiheadAttention here but I am including an
    explicit implementation here to show that there is nothing too scary here.
    """

    def __init__(self, config):
        super().__init__()
        assert config.n_embd % config.n_head == 0
        # key, query, value projections for all heads, but in a batch
        self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)  # Q/K/V的线性映射
        # output projection
        self.c_proj = nn.Linear(config.n_embd, config.n_embd)  # 输出线性映射
        # regularization
        self.attn_dropout = nn.Dropout(config.attn_pdrop)  # dropout正则化
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
        # causal mask to ensure that attention is only applied to the left in the input sequence
        self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
                                     .view(1, 1, config.block_size, config.block_size))  # 构建casual mask矩阵
        self.n_head = config.n_head
        self.n_embd = config.n_embd

    def forward(self, x):
        B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)

        # calculate query, key, values for all heads in batch and move head forward to be the batch dim
        q, k ,v  = self.c_attn(x).split(self.n_embd, dim=2)  # 同时计算QKV
        
        # 多头切分, nh*hs=C
        # 一定要先view (B, T, nh, hs), 再transpose,不能直接view到(B, nh, T, hs)  
        k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)  
        q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
        v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)

        # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
        # 除以根号下dk保持方差稳定
        att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
        # 因果掩码
        att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
        att = F.softmax(att, dim=-1)
        att = self.attn_dropout(att)
        y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
        # contiguous:张量在底层存储时必须连续才可以使用view
        y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side

        # output projection
        y = self.resid_dropout(self.c_proj(y))
        return y

以下为对多头注意力机制代码做分步笔记
上述代码中初始化中定义了构建多头注意力机制代码的组件(结构),在forward的方法中将使用初始化中的组件构建多头注意力机制。
从forward方法开始阅读,当使用到初始化方法中的代码时再进行阅读

# x是输入的数据
# B是批大小
# T是序列长度
# C是embedding的维度
B, T, C = x.size()
# 通过self.c_atten初始化q,k,v
q, k ,v  = self.c_attn(x).split(self.n_embd, dim=2)




# 初始化q,k,v
# 验证数据维度与多头的数量是否一致
assert config.n_embd % config.n_head == 0
# 因为q,k,v是相同的大小的不同矩阵,通过线性映射获得初始化的q,k,v
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
# 多头切分, nh*hs=C,其中nh为头的个数,hs为头的维度
# 一定要先view (B, T, nh, hs), 再transpose,不能直接view到(B, nh, T, hs)  
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)  
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)



# 其中self.n_head是配置文件中预先设置好的
self.n_head = config.n_head
# transpose是为了转换不同维度上的数据
# 此处是attention的公式
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
# 除以根号下dk保持方差稳定
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
# 因果掩码
att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_dropout(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
# contiguous:张量在底层存储时必须连续才可以使用view
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side

# 前面讲C拆解为两个矩阵相乘,现在讲得到的结果返回最初的数据形状
# contiguous()是深拷贝
# output projection
y = self.resid_dropout(self.c_proj(y))



# output projection
self.c_proj = nn.Linear(config.n_embd, config.n_embd)  # 输出线性映射
# regularization
self.attn_dropout = nn.Dropout(config.attn_pdrop)  # dropout正则化
self.resid_dropout = nn.Dropout(config.resid_pdrop)

你可能感兴趣的:(LLM,笔记,深度学习,pytorch)