Today I want to use LSTM to do sentiment classification on the IMDB dataset.
I am deploying today’s experiment on colab because of the huge amount of LSTM operations.
Recommend this blog about torchtext.
⚠️: Filed -> splits -> build_vocab
import numpy as np
import torch
from torch import nn, optim
!pip install torch==1.8.0 torchtext==0.9.0
from torchtext.legacy import data, datasets
torch.manual_seed(1024)
!python -m spacy download en_core_web_md
TEXT = data.Field(tokenize='spacy', tokenizer_language='en_core_web_md')
LABEL = data.LabelField(dtype=torch.float)
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
print('len of train data:', len(train_data))
print('len of test data:', len(test_data))
print(train_data.examples[10].text)
print(train_data.examples[10].label)
TEXT.build_vocab(train_data, max_size=10000, vectors='glove.6B.100d')
LABEL.build_vocab(train_data)
print(len(TEXT.vocab))
print(TEXT.vocab.itos[:])
print(TEXT.vocab.stoi['here'])
print(LABEL.vocab.stoi)
batchsz = 30
train_iterator, test_iterator = data.BucketIterator.splits(
(train_data, test_data),
batch_size = batchsz,
)
class lstm(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim):
super(lstm, self).__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.rnn = nn.LSTM(embedding_dim, hidden_dim, num_layers=2,
bidirectional=True, dropout=0.5)
self.fc = nn.Linear(hidden_dim*2, 1)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
embedding = self.dropout(self.embedding(x))
output, (hidden, cell) = self.rnn(embedding)
hidden = torch.cat([hidden[-2], hidden[-1]], dim=1)
hidden = self.dropout(hidden)
out = self.fc(hidden)
return out
lstm = lstm(len(TEXT.vocab), 100, 256)
print(lstm)
pretrained_embedding = TEXT.vocab.vectors
print('pretrained_embedding:', pretrained_embedding.shape)
lstm.embedding.weight.data.copy_(pretrained_embedding)
print('embedding layer inited.')
optimizer = optim.Adam(lstm.parameters(), lr=5e-2)
criterion = nn.BCEWithLogitsLoss()
def binary_acc(preds, y):
preds = torch.round(torch.sigmoid(preds))
correct = torch.eq(preds, y).float()
acc = correct.sum() / len(correct)
return acc
def train(lstm, iterator, optimizer, criterion):
avg_acc = []
lstm.train()
for i, batch in enumerate(iterator)
pred = lstm(batch.text).squeeze(1)
loss = criterion(pred, batch.label)
acc = binary_acc(pred, batch.label).item()
avg_acc.append(acc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(i, acc)
avg_acc = np.array(avg_acc).mean()
print('avg acc:', avg_acc)
def evaluate(lstm, iterator, criterion):
avg_acc = []
lstm.eval()
with torch.no_grad():
for batch in iterator:
pred = lstm(batch.text).squeeze(1)
loss = criterion(pred, batch.label)
acc = binary_acc(pred, batch.label).item()
avg_acc.append(acc)
avg_acc = np.array(avg_acc).mean()
print('test acc:', avg_acc)
for epoch in range(5):
train(lstm, train_iterator, optimizer, criterion)
evaluate(lstm, test_iterator, criterion)
Thank you for the current age of knowledge sharing and the people willing to share it, thank you! The knowledge on this blog is what I’ve learned on this site, thanks for the support!