由于本机没有GPU,所以使用Google Colab进行实现。
进入到https://drive.google.com,点击云端网盘,上传文本文件(文本文件下载地址为 https://download.csdn.net/download/herosunly/11087806 )到指定位置。
在Colab Notebook中输入以下代码:
from google.colab import drive
drive.mount('/content/gdrive')
在验证框中输入验证码,例如4/IgGET_eqUceDq3XtHUXw_WBJrZggw5_7ai2TWY1Cqb2hl6tV_EqFmmM
出现下图中的Mounted at /content/gdrive说明环境准备成功。
Epoch代表每一大轮,每一大轮会对所有的样本进行训练,每一大轮包括若干个小轮(Iterations),每一小轮会对BatchSize个样本进行训练。
torch.nn.Embedding
torch.nn.Embedding(num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None)[SOURCE]
A simple lookup table that stores embeddings of a fixed dictionary and size.
This module is often used to store word embeddings and retrieve them using indices. The input to the module is a list of indices, and the output is the corresponding word embeddings.
torch相关包加载
import torch
import torch.nn as nn #神经网络工具箱torch.nn
import torch.nn.functional as F #神经网络函数torch.nn.functional
import torch.utils.data as tud #Pytorch读取训练集需要用到torch.utils.data类
import torch.nn.parameter as Parameter #参数更新和优化函数
词频统计和相似度、数据分析、数学包加载
from collections import Counter #统计词频
import sklearn
from sklearn.metrics.pairwise import cosine_similarity #余弦相似度函数
import pandas as pd
import numpy as np
import scipy #数据分析三件套
import random
import math #数学和随机离不开
随机化种子
USE_CUDA = torch.cuda.is_available() #GPU可用的标志位
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
if USE_CUDA:
torch.cuda.manual_seed(1)
超参数
# 词向量相关的超参数
K = 100 # number of negative samples 负样本随机采样数量,文本量越大该参数设置越小
C = 3 # nearby words threshold 指定周围三个单词进行预测,类似于3gram,根据和博士同事交流,一遍使用奇数,如阿里云安全比赛中,CNN的Filter size使用的是3、5 、7 、9。
EMBEDDING_SIZE = 100 #词向量维度
MAX_VOCAB_SIZE = 30000 # the vocabulary size 词汇表多大
# 参数优化的超参数
NUM_EPOCHS = 2 # The number of epochs of training 所有数据的训练大轮次数,每一大轮会对所有的数据进行训练
BATCH_SIZE = 128 # the batch size 每一轮中的每一小轮训练128个样本
LEARNING_RATE = 0.2 # the initial learning rate #学习率
LOG_FILE = "word-embedding.log"
得到单词词典(key为单词,value为频次),从而得到四个变量(idx_to_word、word_to_idx、word_counts、word_freq)。
with open("/content/gdrive/My Drive/Colab Notebooks/text8.train.txt", "r") as f: #读入文件
text = f.read() #得到文本内容
text = text.lower().split() #分割成词列表
vocab_dict = dict(Counter(text).most_common(MAX_VOCAB_SIZE - 1)) #两步得到单词字典表,key是单词,value是次数
vocab_dict[''] = len(text) - sum(list(vocab_dict.values())) #把不常用的单词都编码为""
idx_to_word = list(vocab_dict.keys())
word_to_idx = {word:ind for ind, word in enumerate(idx_to_word)}
word_counts = np.array(list(vocab_dict.values()),dtype = np.float32)
word_freqs = word_counts / sum(word_counts)
VOCAB_SIZE = len(idx_to_word) #获得词典的实际长度,有可能不足3W
有了Dataloader之后,我们可以轻松随机打乱整个数据集,拿到batch的数据等等。一个Dataloader需要以下内容:
这里有一个好的tutorial(https://pytorch.org/tutorials/beginner/data_loading_tutorial.html)介绍如何使用PyTorch dataloader。为了使用dataloader,我们需要定义以下两个function:
__len__ 需要返回整个数据集中有多少个item。
__get__ 根据给定的index返回一个item。
class WordEmbeddingDataset(tud.Dataset):
def __init__(self, text, idx_to_word, word_to_idx, word_counts, word_freqs):
''' text: a list of words, all text from the training dataset
word_to_idx: the dictionary from word to idx
idx_to_word: idx to word mapping
word_freq: the frequency of each word
word_counts: the word counts
感觉并不需要存idx_to_word,相关信息已经包含在word_to_idx中了
'''
super().__init__() #通过父类初始化模型,然后重写两个方法
self.text_encoded = [word_to_idx.get(word, word_to_idx[""]) for word in text] #通过两步操作,把词数字化表示。如果不在词典中,也表示为unk
self.text_encoder = torch.Tensor(self.text_encoded).long() #转变为LongTensor,为什么要这样转换,是为了增大存储单词量
self.word_to_idx = word_to_idx #保存数据
self.idx_to_word = idx_to_word #保存数据
self.word_freqs = torch.Tensor(word_freqs) #保存数据
self.word_counts = torch.Tensor(word_counts) #保存数据
def __len__(self):
#魔法函数__len__
return len(self.text_encoded) #所有单词的总数
def __getitem__(self, idx):
#魔法函数__getitem__,这个函数跟普通函数不一样
''' 这个function返回以下数据用于训练
- 中心词
- 这个单词附近的(positive)单词
- 随机采样的K个单词作为negative sample
'''
center_word = self.text_encoded[idx] #取得中心词
pos_indices = list(range(idx - C, idx)) + list(range(idx + 1, idx + C + 1)) #注意左闭右开
pos_indices = [i%len(self.text_encoded) for i in pos_indices] #不知是否有更好的方式
pos_words = self.text_encoded[pos_indices]
#周围词索引,就是希望出现的正例单词
#print(pos_words)
neg_words = torch.multinomial(self.word_freqs, K * pos_words.shape[0], True)#该步是基于paper中内容
#负例采样单词索引,torch.multinomial作用是对self.word_freqs做K * pos_words.shape[0]次取值,输出的是self.word_freqs对应的下标。
#取样方式采用有放回的采样,并且self.word_freqs数值越大,取样概率越大。
#每个正确的单词采样K个,pos_words.shape[0]是正确单词数量
return center_word, pos_words, neg_words
注:multinomial返回的是索引,但它为什么是words呢? 这里的原因是word_freqs本质上就是词表(dict.values())。而center_word和pos_words本质上是通过索引取出单词。
dataset = WordEmbeddingDataset(text, word_to_idx, idx_to_word, word_freqs, word_counts)
dataloader = tud.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
在Pytorch神经网络模型中,需要定义__init__()和forward两个函数。
class EmbeddingModel(nn.Module):
def __init__(self, vocab_size, embed_size):
''' 初始化输入和输出embedding
'''
super().__init__()
self.vocab_size = vocab_size
self.embed_size = embed_size
#一般来说,横轴代表样本,纵轴代表特征
self.in_embed = nn.Embedding(self.vocab_size, self.embed_size, sparse=False)
self.out_embed = nn.Embedding(self.vocab_size, self.embed_size, sparse=False)
initrange = 0.5 / self.embed_size # 后加的代码
self.in_embed.weight.data.uniform_(-initrange, initrange) # 后加的代码
self.out_embed.weight.data.uniform_(-initrange, initrange) # 后加的代码
def forward(self, input_labels, pos_labels, neg_labels):
'''
input_labels: 中心词, [batch_size]
pos_labels: 中心词周围 context window 出现过的单词 [batch_size * (window_size * 2)]
neg_labelss: 中心词周围没有出现过的单词,从 negative sampling 得到 [batch_size, (window_size * 2 * K)]
return: loss, [batch_size]
'''
batch_size = input_labels.size(0)
input_embedding = self.in_embed(input_labels) # B * embed_size
pos_embedding = self.out_embed(pos_labels) # B * (2*C) * embed_size
neg_embedding = self.out_embed(neg_labels) # B * (2*C * K) * embed_size
log_pos = torch.bmm(pos_embedding, input_embedding.unsqueeze(2)).squeeze(2) # B * (2*C)
# unsqueeze(2): input_embedding->B * embed_size * 1
# bmm是batch matrix multiply
log_neg = torch.bmm(neg_embedding, -input_embedding.unsqueeze(2)).squeeze(2) # B * (2*C*K)
log_pos = F.logsigmoid(log_pos).sum(1) #.sum()结果只为一个数,.sum(1)结果是一维的张量
log_neg = F.logsigmoid(log_neg).sum(1) # batch_size
loss = log_pos + log_neg
return -loss
def input_embeddings(self):
return self.in_embed.weight.data.cpu().numpy()
model = EmbeddingModel(VOCAB_SIZE, EMBEDDING_SIZE)
if USE_CUDA:
model = model.cuda()
def evaluate(filename, embedding_weights):
if filename.endswith(".csv"):
data = pd.read_csv(filename, sep=",")
else:
data = pd.read_csv(filename, sep="\t")
human_similarity = []
model_similarity = []
for i in data.iloc[:, 0:2].index:
word1, word2 = data.iloc[i, 0], data.iloc[i, 1]
if word1 not in word_to_idx or word2 not in word_to_idx:
continue
else:
word1_idx, word2_idx = word_to_idx[word1], word_to_idx[word2]
word1_embed, word2_embed = embedding_weights[[word1_idx]], embedding_weights[[word2_idx]]
model_similarity.append(float(sklearn.metrics.pairwise.cosine_similarity(word1_embed, word2_embed)))
human_similarity.append(float(data.iloc[i, 2]))
return scipy.stats.spearmanr(human_similarity, model_similarity)# , model_similarity
def find_nearest(word):
index = word_to_idx[word]
embedding = embedding_weights[index]
cos_dis = np.array([scipy.spatial.distance.cosine(e, embedding) for e in embedding_weights])
return [idx_to_word[i] for i in cos_dis.argsort()[:10]]
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
for e in range(NUM_EPOCHS):
for i, (input_labels, pos_labels, neg_labels) in enumerate(dataloader):
# TODO
input_labels = input_labels.long()
pos_labels = pos_labels.long()
neg_labels = neg_labels.long()
if USE_CUDA:
input_labels = input_labels.cuda()
pos_labels = pos_labels.cuda()
neg_labels = neg_labels.cuda()
optimizer.zero_grad()
loss = model(input_labels, pos_labels, neg_labels).mean()
loss.backward()
optimizer.step()
if i % 100 == 0:
with open(LOG_FILE, "a") as fout:
fout.write("epoch: {}, iter: {}, loss: {}\n".format(e, i, loss.item()))
print("epoch: {}, iter: {}, loss: {}".format(e, i, loss.item()))
if i % 2000 == 0:
embedding_weights = model.input_embeddings()
sim_simlex = evaluate("/content/gdrive/My Drive/Colab Notebooks/simlex-999.txt", embedding_weights)
sim_men = evaluate("/content/gdrive/My Drive/Colab Notebooks/men.txt", embedding_weights)
sim_353 = evaluate("/content/gdrive/My Drive/Colab Notebooks/wordsim353.csv", embedding_weights)
with open(LOG_FILE, "a") as fout:
print("epoch: {}, iteration: {}, simlex-999: {}, men: {}, sim353: {}, nearest to monster: {}\n".format(
e, i, sim_simlex, sim_men, sim_353, find_nearest("monster")))
fout.write("epoch: {}, iteration: {}, simlex-999: {}, men: {}, sim353: {}, nearest to monster: {}\n".format(
e, i, sim_simlex, sim_men, sim_353, find_nearest("monster")))
embedding_weights = model.input_embeddings()
np.save("embedding-{}".format(EMBEDDING_SIZE), embedding_weights)
torch.save(model.state_dict(), "embedding-{}.th".format(EMBEDDING_SIZE))
在 MEN 和 Simplex-999 数据集上做评估
embedding_weights = model.input_embeddings()
print("simlex-999", evaluate("simlex-999.txt", embedding_weights))
print("men", evaluate("men.txt", embedding_weights))
print("wordsim353", evaluate("wordsim353.csv", embedding_weights))
寻找nearest neighbors
for word in ["good", "fresh", "monster", "green", "like", "america", "chicago", "work", "computer", "language"]:
print(word, find_nearest(word))
单词之间的关系
man_idx = word_to_idx["man"]
king_idx = word_to_idx["king"]
woman_idx = word_to_idx["woman"]
embedding = embedding_weights[woman_idx] - embedding_weights[man_idx] + embedding_weights[king_idx]
cos_dis = np.array([scipy.spatial.distance.cosine(e, embedding) for e in embedding_weights])
for i in cos_dis.argsort()[:20]:
print(idx_to_word[i])