对Torch官方文档中BiLSTM+CRF的个人理解(持续修改)

想说的都在代码注释里:

# Author: Robert Guthrie


import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim

'''
第一,句子x中的每一个单元都代表着由字嵌入或词嵌入构成的向量。其中,字嵌入是随机初始化的,词嵌入是通过数据训练得到的。所有的嵌入在训练过程中都会调整到最优。
第二,这些字或词嵌入为BiLSTM-CRF模型的输入,输出的是句子x中每个单元的标签。
'''


# 设置随机数种子,每次的随机数都一样
torch.manual_seed(1)

# 返回vec中每一行最大的那个元素的下标
def argmax(vec):
    # return the argmax as a python int
    _, idx = torch.max(vec, 1)
    # 获取该元素:tensor只有一个元素才能调用item方法
    return idx.item()

# 单词转为索引
def prepare_sequence(seq, to_ix):
    # 生成列表
    idxs = [to_ix[w] for w in seq]
    return torch.tensor(idxs, dtype=torch.long)

# 计算一维向量vec 与 其最大值的   log_sum_exp
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
    max_score = vec[0, argmax(vec)]
    max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
    # 减去最大值是为了防止数值溢出
    return max_score + \
        torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))


class BiLSTM_CRF(nn.Module):
    
    def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
        
        #调用父类的init
        super(BiLSTM_CRF, self).__init__()
        # 嵌入维度: 词向量维度
        self.embedding_dim = embedding_dim
        # 隐藏层维度
        self.hidden_dim = hidden_dim
        # 词汇量大小
        self.vocab_size = vocab_size
        # 标签转下标的词典
        self.tag_to_ix = tag_to_ix

        # 输出维度:目标取值范围大小
        self.tagset_size = len(tag_to_ix)

        ''' Embedding的用法
        A simple lookup table that stores embeddings of a fixed dictionary and size.

        This module is often used to store word embeddings and retrieve them using indices. 
        
        The input to the module is a list of indices, and the output is the corresponding word embeddings.
        
        requires_grad: 用于说明当前量是否需要在计算中保留对应的梯度信息
        '''
        # an Embedding module containing 词汇量大小的 tensors of size 词向量维度
        self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
        # LSTM模型
        '''
        默认参数意义:input_size,hidden_size,num_layers
        hidden_size : LSTM在运行时里面的维度。隐藏层状态的维数,即隐藏层节点的个数
        torch里的LSTM单元接受的输入都必须是3维的张量(Tensors):
           第一维体现的每个句子的长度,即提供给LSTM神经元的每个句子的长度,如果是其他的带有带有序列形式的数据,则表示一个明确分割单位长度,
           第二维度体现的是batch_size,即每一次给网络句子条数
           第三维体现的是输入的元素,即每个具体的单词用多少维向量来表示
        '''
        self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,
                            num_layers=1, bidirectional=True)

        # Maps the output of the LSTM into tag space.
        # 建立一个 把LSTM的输出到标签空间 的映射关系,通过一个线性连接层将 BiLSTM 隐状态维度 转变为 tag 的种类大小
        self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)

        # Matrix of transition parameters.  Entry i,j is the score of transitioning *to* i *from* j.
        # 转移矩阵是随机的,在网络中会随着训练不断更新
        self.transitions = nn.Parameter(
            torch.randn(self.tagset_size, self.tagset_size))

        # These two statements enforce the constraint that we never transfer to the start tag and we never transfer from the stop tag
        # 转移矩阵: 列标 转 行标
        # 规定:其他tag不能转向start,stop也不能转向其他tag
        self.transitions.data[tag_to_ix[START_TAG], :] = -10000
        self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000

        # 初始化hidden layer
        self.hidden = self.init_hidden()

    def init_hidden(self):
        # randn : output ~ N(0,1)  [取自正态分布的随机数,但不意味着它的数据符合正态分布]
        return (torch.randn(2, 1, self.hidden_dim // 2),
                torch.randn(2, 1, self.hidden_dim // 2))

    # Do the forward algorithm to compute the partition function
    # 前向算法:feats是LSTM所有时间步的输出 
    def _forward_alg(self, feats):

        # alpha初始为-10000
        init_alphas = torch.full((1, self.tagset_size), -10000.)

        # START_TAG has all of the score.
        # start位置的alpha为0
        init_alphas[0][self.tag_to_ix[START_TAG]] = 0.

        # Wrap in a variable so that we will get automatic backprop
        # 包装进变量,实现自动反向传播
        forward_var = init_alphas

        # Iterate through the sentence
        for feat in feats:

            # The forward tensors at this timestep
            # 当前 timestep 的前向tensor
            alphas_t = []  
            for next_tag in range(self.tagset_size):

                # broadcast the emission score: it is the same regardless of the previous tag
                '''
                LSTM生成的矩阵是emit score[观测/发射概率], 即公式中的H()函数的输出
                CRF是判别式模型
                emit score: BilSTM 对序列中 每个位置 的 对应标签 打分的和
                transition score 是该序列状态转移矩阵中对应的和
                Score = EmissionScore + TransitionScore
                '''
                emit_score = feat[next_tag].view(
                    1, -1).expand(1, self.tagset_size)

                # the ith entry of trans_score is the score of transitioning to next_tag from i
                trans_score = self.transitions[next_tag].view(1, -1)

                # The ith entry of next_tag_var is the value for the edge (i -> next_tag) before we do log-sum-exp
                next_tag_var = forward_var + trans_score + emit_score

                # The forward variable for this tag is log-sum-exp of all the scores.
                alphas_t.append(log_sum_exp(next_tag_var).view(1))
            
            # 合并分数[cat dim=0]
            # ??? 不太懂
            forward_var = torch.cat(alphas_t).view(1, -1)

        terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
        alpha = log_sum_exp(terminal_var)
        return alpha

    # LSTM的输出, 即emit score
    def _get_lstm_features(self, sentence):
        self.hidden = self.init_hidden()
        embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
        lstm_out, self.hidden = self.lstm(embeds, self.hidden)
        lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
        lstm_feats = self.hidden2tag(lstm_out)
        return lstm_feats

    # CRF的输出,即emit+transition scores
    def _score_sentence(self, feats, tags):
        # Gives the score of a provided tag sequence
        score = torch.zeros(1)
        tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])
        
        # 转移+前向
        for i, feat in enumerate(feats):
            score = score + \
                self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]

        score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
        return score

    # Decoding的意义:   
    # 给定一个已知的观测序列,求其最有可能对应的状态序列
    def _viterbi_decode(self, feats):
        backpointers = []

        # Initialize the viterbi variables in log space
        init_vvars = torch.full((1, self.tagset_size), -10000.)
        init_vvars[0][self.tag_to_ix[START_TAG]] = 0

        # forward_var at step i holds the viterbi variables for step i-1
        forward_var = init_vvars

        
        for feat in feats:
            bptrs_t = []  # holds the backpointers for this step
            viterbivars_t = []  # holds the viterbi variables for this step

            for next_tag in range(self.tagset_size):
                # next_tag_var[i] holds the viterbi variable for tag i at the previous step, 
                # plus the score of transitioning from tag i to next_tag.
                # We don't include the emission scores here 
                # because the max does not depend on them (we add them in below)
                next_tag_var = forward_var + self.transitions[next_tag]
                # 找到此刻最好的状态转入点
                best_tag_id = argmax(next_tag_var)
                # 记录点
                bptrs_t.append(best_tag_id)
                viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))

            # Now add in the emission scores, and assign forward_var to the set
            # of viterbi variables we just computed
            forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
            backpointers.append(bptrs_t)

        # Transition to STOP_TAG
        terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
        best_tag_id = argmax(terminal_var)
        path_score = terminal_var[0][best_tag_id]

        # Follow the back pointers to decode the best path.
        best_path = [best_tag_id]
        
        # 回退找路
        for bptrs_t in reversed(backpointers):
            best_tag_id = bptrs_t[best_tag_id]
            best_path.append(best_tag_id)
        
        # Pop off the start tag (we dont want to return that to the caller)
        # 去掉start
        start = best_path.pop()
        assert start == self.tag_to_ix[START_TAG]  # Sanity check
        best_path.reverse()
        return path_score, best_path

    def neg_log_likelihood(self, sentence, tags):
        feats = self._get_lstm_features(sentence)
        # 前向算法分数
        forward_score = self._forward_alg(feats)
        # 真实分数
        gold_score = self._score_sentence(feats, tags)
        # log P(y|x) = forward_score - gold_score
        return forward_score - gold_score

    # don‘t confuse this with _forward_alg above.
    # 重写 原module里的 forward
    def forward(self, sentence):  
        # Get the emission scores from the BiLSTM
        lstm_feats = self._get_lstm_features(sentence)

        # Find the best path, given the features.
        score, tag_seq = self._viterbi_decode(lstm_feats)

        return score, tag_seq

if __name__ == '__main__':
    START_TAG = ""
    STOP_TAG = ""
    EMBEDDING_DIM = 5
    HIDDEN_DIM = 4

    # Make up some training data
    training_data = [(
        "the wall street journal reported today that apple corporation made money".split(),
        "B I I I O O O B I O O".split()
    ), (
        "georgia tech is a university in georgia".split(),
        "B I O O O O B".split()
    )]

    word_to_ix = {}
    for sentence, tags in training_data:
        for word in sentence:
            if word not in word_to_ix:
                word_to_ix[word] = len(word_to_ix)

    tag_to_ix = {"B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4}

    model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)
    optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)

    # Check predictions before training
    with torch.no_grad():
        precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
        precheck_tags = torch.tensor([tag_to_ix[t] for t in training_data[0][1]], dtype=torch.long)
        # 注意这里打印的是未训练的模型的预测结果
        print(model(precheck_sent))

    # Make sure prepare_sequence from earlier in the LSTM section is loaded
    for epoch in range(
            300):  # again, normally you would NOT do 300 epochs, it is toy data
        for sentence, tags in training_data:
            # Step 1. Remember that Pytorch accumulates gradients.
            # We need to clear them out before each instance
            model.zero_grad()

            # Step 2. Get our inputs ready for the network, that is,
            # turn them into Tensors of word indices.
            sentence_in = prepare_sequence(sentence, word_to_ix)
            targets = torch.tensor([tag_to_ix[t] for t in tags], dtype=torch.long)

            # Step 3. Run our forward pass.
            loss = model.neg_log_likelihood(sentence_in, targets)

            # Step 4. Compute the loss, gradients, and update the parameters by
            # calling optimizer.step()
            loss.backward()
            optimizer.step()

    # Check predictions after training
    with torch.no_grad():
        precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
        print(model(precheck_sent))
    # We got it!

你可能感兴趣的:(NLP入门,算法,深度学习,网络,机器学习,python)