Pytorch学习笔记(五)

9)在Pytorch中使用LSTM
学习Pytorch的RNN使用时最好去官方文档看一下API是如何使用的:(http://pytorch.org/docs/nn.html#recurrent-layers)。一个需要注意的地方是在Pytorch中RNN的输入input的shape的三维分别是 (seq_len, batch, input_size),隐藏层h_0的shape三维分别是 (num_layers * num_directions, batch, hidden_size),输出output的shape三维分别是 (seq_len, batch, hidden_size * num_directions),这与之前使用的Tensorflow和Keras将batch作为第一维有点不太一样。

import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F

def prepare_sequence(seq, to_ix):
    idxs = [to_ix[w] for w in seq]
    tensor = torch.LongTensor(idxs)
    return autograd.Variable(tensor)


training_data = [
    ("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
    ("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
]
word_to_ix = {}
for sent, tags in training_data:
    for word in sent:
        if word not in word_to_ix:
            word_to_ix[word] = len(word_to_ix)
print(word_to_ix)
tag_to_ix = {"DET": 0, "NN": 1, "V": 2}

EMBEDDING_DIM = 6
HIDDEN_DIM = 6


class LSTMTagger(nn.Module):

    def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
        super(LSTMTagger, self).__init__()
        self.hidden_dim = hidden_dim

        self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)

        self.lstm = nn.LSTM(embedding_dim, hidden_dim)

        self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
        self.hidden = self.init_hidden()

    def init_hidden(self):

        return (autograd.Variable(torch.zeros(1, 1, self.hidden_dim)),
                autograd.Variable(torch.zeros(1, 1, self.hidden_dim)))

    def forward(self, sentence):
        embeds = self.word_embeddings(sentence)
        lstm_out, self.hidden = self.lstm(
            embeds.view(len(sentence), 1, -1), self.hidden)
        tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
        tag_scores = F.log_softmax(tag_space)
        return tag_scores


model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix))
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)



inputs = prepare_sequence(training_data[0][0], word_to_ix)
print(inputs)
print("inputs size: ", inputs.size())
tag_scores = model(inputs)
print(tag_scores)
print("tag_scores size: ", tag_scores.size())

for epoch in range(300):
    for sentence, tags in training_data:
        optimizer.zero_grad()

        model.hidden = model.init_hidden()

        sentence_in = prepare_sequence(sentence, word_to_ix)
        targets = prepare_sequence(tags, tag_to_ix)

        tag_scores = model(sentence_in)

        loss = loss_function(tag_scores, targets)
        loss.backward()
        optimizer.step()

inputs = prepare_sequence(training_data[0][0], word_to_ix)
tag_scores = model(inputs)

print(tag_scores)

运行结果如下:

{'dog': 1, 'ate': 2, 'the': 3, 'Everybody': 5, 'apple': 4, 'book': 8, 'read': 6, 'The': 0, 'that': 7}
Variable containing:
 0
 1
 2
 3
 4
[torch.LongTensor of size 5]

inputs size:  torch.Size([5])
Variable containing:
-1.1750 -1.2042 -0.9385
-1.2109 -1.1668 -0.9398
-1.1762 -1.2194 -0.9259
-1.2111 -1.2005 -0.9135
-1.2451 -1.1828 -0.9022
[torch.FloatTensor of size 5x3]

tag_scores size:  torch.Size([5, 3])
Variable containing:
-0.0832 -4.6391 -2.6573
-6.3608 -0.0345 -3.4355
-2.6776 -2.4210 -0.1714
-0.0497 -6.1473 -3.0711
-6.2093 -0.0339 -3.4624
[torch.FloatTensor of size 5x3]

你可能感兴趣的:(Pytorch)