1. word2vec地址
- 官网地址:https://code.google.com/archive/p/word2vec/
- GitHub地址:https://github.com/tmikolov/word2vec
2. word2vec源码注释
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Comment time 2019-04-30
#include
#include
#include
#include
#include
#define MAX_STRING 100
#define EXP_TABLE_SIZE 1000
#define MAX_EXP 6
#define MAX_SENTENCE_LENGTH 1000
#define MAX_CODE_LENGTH 40
const int vocab_hash_size = 30000000; // Maximum 30 * 0.7 = 21M words in the vocabulary
typedef float real; // Precision of float numbers
struct vocab_word {
long long cn; // 词频,从训练集中计数得到或直接提供词频文件
int *point; // huffman树中从根节点到该词的路径,存放的是路径上每个节点的索引
char *word, *code, codelen; // word=该词,code=该词的huffman编码,codelen=该词的haffman编码的长度
};
char train_file[MAX_STRING], output_file[MAX_STRING]; // 训练文件和输出文件名称定义
char save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING]; // 词汇表输出文件和词汇表读入文件名称定义
struct vocab_word *vocab; // 声明词汇表结构体
/*
* binary=0则vectors.bin输出为二进制(默认),binary=1则为文本形式;
* cbow=1使用cbow框架,cbow=0使用skip-gram框架;
* debug_mode>0,加载完毕后输出汇总信息;debug_mode>1,加载训练词汇的时候输出信息,训练过程中输出信息;
* window:窗口大小,在cbow中表示了word vector的最大的sum范围,在skip-gram中表示了max space between words(w1,w2,p(w1 | w2));
* min_count:设置最低频率,默认是5,如果一个词语在文档中出现的次数小于5,那么就会丢弃;
* num_threads:线程数;
* min_reduce:ReduceVocab删除词频小于这个值的词,因为哈希表总共可以装填的词汇数是有限的;如果词典的大小N>0.7*vocab_hash_size,则从词典中删除所有词频小于min_reduce的词。
*/
int binary = 0, cbow = 1, debug_mode = 2, window = 5, min_count = 5, num_threads = 12, min_reduce = 1;
int *vocab_hash; // 词hash表,下标是词的hash值,内容是词在vocab中的位置,a[word_hash] = word index in vocab
/*
* vocab_max_size:辅助变量,每次当词表大小超出vocab_max_size时,一次性将词表大小增加1000
* vocab_size:词表的大小,接近vocab_max_size的时候会扩容
* layer1_size:隐层的节点数or词向量的长度?
*/
long long vocab_max_size = 1000, vocab_size = 0, layer1_size = 100;
/*
* train_words:训练的单词总数(词频累加)
* word_count_actual:已经训练完的word个数
* file_size:训练文件大小,ftell得到
* classes:输出word clusters的类别数(聚类的数目)
* alpha:BP算法的学习速率,过程中自动调整
* starting_alpha:alpha初始值
* sample:亚采样概率的参数,亚采样的目的是以一定概率拒绝高频词,使得低频词有更多出镜率,默认为0,即不进行亚采样
* syn0:存储词表中每个词的词向量
* syn1:huffman树中每个非叶节点的向量(权重)
* syn1neg:负采样时每个词的辅助向量
* expTable:预先存储sigmod函数结果,算法执行中查表,提前计算好,提高效率
* start:算法运行的起始时间,用于计算平均每秒钟处理多少词
*/
long long train_words = 0, word_count_actual = 0, iter = 5, file_size = 0, classes = 0;
real alpha = 0.025, starting_alpha, sample = 1e-3;
real *syn0, *syn1, *syn1neg, *expTable;
clock_t start;
int hs = 0, negative = 5; // hs:采用hs还是ns的标志位,默认采用ns
const int table_size = 1e8; // 静态采样表的规模
int *table; // 采样表
/*
* 根据词频生成采样表,也就是每个单词的能量分布表,table在负采样中用到
* 网络模型初始化:负采样初始化,生成负采样概率表
*/
void InitUnigramTable() {
int a, i;
double train_words_pow = 0;
double d1, power = 0.75;
table = (int *)malloc(table_size * sizeof(int));
for (a = 0; a < vocab_size; a++) train_words_pow += pow(vocab[a].cn, power);
i = 0; // 词表的索引
d1 = pow(vocab[i].cn, power) / train_words_pow; // 已遍历词的能量值占总能量的比
for (a = 0; a < table_size; a++) { // table表的索引
table[a] = i; // 单词i占用table的a位置(table反映的是一个单词能量的分布,一个单词能量越大,所占用的table的位置越多)
if (a / (double)table_size > d1) {
i++;
d1 += pow(vocab[i].cn, power) / train_words_pow;
}
if (i >= vocab_size) i = vocab_size - 1; // 如果词表遍历完毕后能量表还没填满,将能量表table中剩下的位置用词表中最后一个词填充
}
}
/* Reads a single word from a file, assuming space + tab + EOL to be word boundaries
* 从文件中读取单个单词到word,以空格' ',tab'\t',EOL'\n'为词的分界符
* 每一行的末尾输出一个
*/
void ReadWord(char *word, FILE *fin) {
int a = 0, ch; // a是用于向word中插入字符的索引;ch是从fin中读取的每个字符
while (!feof(fin)) {
ch = fgetc(fin);
if (ch == 13) continue;
if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
if (a > 0) {
if (ch == '\n') ungetc(ch, fin);
break;
}
if (ch == '\n') {
strcpy(word, (char *)"");
return;
} else continue;
}
word[a] = ch;
a++;
if (a >= MAX_STRING - 1) a--; // Truncate too long words
}
word[a] = 0; // 字符串末尾以/0作为结束符
}
/* Returns hash value of a word
* 返回一个词的hash值,通过线性探测的开放定止法解决hash冲突
*/
int GetWordHash(char *word) {
unsigned long long a, hash = 0;
for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a];
hash = hash % vocab_hash_size;
return hash;
}
/* Returns position of a word in the vocabulary; if the word is not found, returns -1
* 返回一个词在词表中的位置,若不存在则返回-1
* 先计算词的hash值,然后在词hash表中,以该值为下标,查看对应的值
* 如果该索引在词表中对应的词与正在查找的词不符,说明发生了hash值冲突,按照开放地址法去寻找这个词
*/
int SearchVocab(char *word) {
unsigned int hash = GetWordHash(word);
while (1) {
if (vocab_hash[hash] == -1) return -1;
if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash];
hash = (hash + 1) % vocab_hash_size; // 继续顺序往下查找,因为前面存储的时候,遇到冲突就是顺序往下查找存储位置的
}
return -1;
}
/* Reads a word and returns its index in the vocabulary
* 从文件流中读取一个词,并返回这个词在词汇表中的位置,相当于将之前的两个函数包装了起来
*/
int ReadWordIndex(FILE *fin) {
char word[MAX_STRING];
ReadWord(word, fin);
if (feof(fin)) return -1;
return SearchVocab(word);
}
/* Adds a word to the vocabulary
* 将词添加到词汇表中,返回该词在词汇表中的位置
*/
int AddWordToVocab(char *word) {
unsigned int hash, length = strlen(word) + 1;
if (length > MAX_STRING) length = MAX_STRING;
vocab[vocab_size].word = (char *)calloc(length, sizeof(char));
strcpy(vocab[vocab_size].word, word);
vocab[vocab_size].cn = 0; // 词频初始化为0
vocab_size++; // 词汇表现有词数
// Reallocate memory if needed
if (vocab_size + 2 >= vocab_max_size) {
vocab_max_size += 1000; // 扩容1000个词位
vocab = (struct vocab_word *)realloc(vocab, vocab_max_size * sizeof(struct vocab_word));
}
hash = GetWordHash(word); // 词的hash值用之前的函数计算
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size; // 如果该hash值与其他词产生冲突,则使用开放定址法为这个词寻找一个hash位
vocab_hash[hash] = vocab_size - 1; // 记录在词汇表中的存储位置
return vocab_size - 1; // 返回该词在词汇表中的位置
}
/* Used later for sorting by word counts
* 按照词频从大到小排序,比较函数,词汇表需使用词频进行排序(qsort),从大到小进行排序
*/
int VocabCompare(const void *a, const void *b) {
return ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn;
}
/* Sorts the vocabulary by frequency using word counts
* 根据词频排序,按照词频对词表中的项从大到小排序,把出现数量少的word排在vocab数组的后面
*/
void SortVocab() {
int a, size;
unsigned int hash;
// Sort the vocabulary and keep at the first position(保留回车在首位)
qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare); // 对词汇表进行快速排序
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; // 词汇重排了后哈希记录的index打乱了,这里进行hash表重置
size = vocab_size;
train_words = 0; // 用于训练的词汇总数(词频累加)
for (a = 0; a < size; a++) {
// Words occuring less than min_count times will be discarded from the vocab
// 将出现次数小于min_count的词从词表中去除,出现次数大于min_count的重新计算hash值,更新hash词表
if ((vocab[a].cn < min_count) && (a != 0)) {
vocab_size--;
free(vocab[a].word);
} else {
// Hash will be re-computed, as after the sorting it is not actual
hash=GetWordHash(vocab[a].word);
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
vocab_hash[hash] = a;
train_words += vocab[a].cn; // 词频累加
}
}
vocab = (struct vocab_word *)realloc(vocab, (vocab_size + 1) * sizeof(struct vocab_word)); // 由于删除了词频较低的词,这里重新指定词表的内存空间
// Allocate memory for the binary tree construction(为huffman树的构建预先申请空间)
for (a = 0; a < vocab_size; a++) {
vocab[a].code = (char *)calloc(MAX_CODE_LENGTH, sizeof(char));
vocab[a].point = (int *)calloc(MAX_CODE_LENGTH, sizeof(int));
}
}
/* Reduces the vocabulary by removing infrequent tokens
* 从词表中删除出现次数小于min_reduce的词,每执行一次该函数min_reduce自动加1
*/
void ReduceVocab() {
int a, b = 0;
unsigned int hash;
for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) {
vocab[b].cn = vocab[a].cn;
vocab[b].word = vocab[a].word;
b++;
} else free(vocab[a].word); // 清理指针所指向的内存区域
vocab_size = b; // 最后剩下b个词,词频均大于min_reduce
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; // 重置hash表
for (a = 0; a < vocab_size; a++) {
// Hash will be re-computed, as it is not actual(在删除了低频词后,需要重新对词库中的词进行hash值的计算)
hash = GetWordHash(vocab[a].word);
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
vocab_hash[hash] = a;
}
fflush(stdout);
min_reduce++;
}
// Create binary Huffman tree using the word counts
// Frequent words will have short uniqe binary codes
/*
* 利用统计到的词频构建二叉huffman树
* 出现频率越高的词将获得短的、唯一的huffman编码
*/
void CreateBinaryTree() {
long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH]; // point[]用来暂存从根节点到一个词的huffman树路径
char code[MAX_CODE_LENGTH]; // code[]用来暂存一个词的huffman编码
// 内存分配,huffman树中,若有n个叶子节点,则一共会有2n-1个节点
long long *count = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long)); // count[]存储词频
long long *binary = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long)); // binary[]记录各节点对应的二进制编码
long long *parent_node = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long)); // parent_node[]记录每个节点的父节点
for (a = 0; a < vocab_size; a++) count[a] = vocab[a].cn; // count[]前vocab_size个元素为haffman树的叶子节点,初始化为词表中所有词的词频
for (a = vocab_size; a < vocab_size * 2; a++) count[a] = 1e15; // count[]后vocab_size个元素为huffman树中即将生成的非叶子节点(合并节点)的词频,初始化为一个大值1e15
pos1 = vocab_size - 1;
pos2 = vocab_size;
// Following algorithm constructs the Huffman tree by adding one node at a time
// pos1,pos2为别为词表中词频次低和最低的两个词的下标(初始时就是词表最末尾两个)
for (a = 0; a < vocab_size - 1; a++) {
// First, find two smallest nodes 'min1, min2'
if (pos1 >= 0) {
if (count[pos1] < count[pos2]) {
min1i = pos1;
pos1--;
} else {
min1i = pos2;
pos2++;
}
} else {
min1i = pos2;
pos2++;
}
if (pos1 >= 0) {
if (count[pos1] < count[pos2]) {
min2i = pos1;
pos1--;
} else {
min2i = pos2;
pos2++;
}
} else {
min2i = pos2;
pos2++;
}
count[vocab_size + a] = count[min1i] + count[min2i];
parent_node[min1i] = vocab_size + a;
parent_node[min2i] = vocab_size + a;
binary[min2i] = 1;
}
// Now assign binary code to each vocabulary word
for (a = 0; a < vocab_size; a++) {
b = a;
i = 0;
while (1) {
code[i] = binary[b];
point[i] = b;
i++;
b = parent_node[b];
if (b == vocab_size * 2 - 2) break;
}
vocab[a].codelen = i;
vocab[a].point[0] = vocab_size - 2;
for (b = 0; b < i; b++) {
vocab[a].code[i - b - 1] = code[b];
vocab[a].point[i - b] = point[b] - vocab_size;
}
}
free(count);
free(binary);
free(parent_node);
}
/*
* 从训练文件中获取所有词汇并构建词表和hash比
*/
void LearnVocabFromTrainFile() {
char word[MAX_STRING];
FILE *fin;
long long a, i;
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; // 初始化hash词表
fin = fopen(train_file, "rb"); // 打开训练文件
if (fin == NULL) {
printf("ERROR: training data file not found!\n");
exit(1);
}
vocab_size = 0; // 初始化词表大小
AddWordToVocab((char *)""); // 最初将添加到vocab的第一个位置,后续再读取word的时候,把"\N换成了"
while (1) {
ReadWord(word, fin); // 从文件中读入一个词
if (feof(fin)) break;
train_words++; // 总词数加1,并输出当前训练信息
if ((debug_mode > 1) && (train_words % 100000 == 0)) {
printf("%lldK%c", train_words / 1000, 13);
fflush(stdout);
}
i = SearchVocab(word); // 查找词在词库中位置
// 如果词表中不存在这个词,则将该词添加到词表中,创建其在hash表中的值,初始化词频为1;反之,词频加1
if (i == -1) {
a = AddWordToVocab(word);
vocab[a].cn = 1;
} else vocab[i].cn++;
// 如果词表大小超过一定规模,则做一次词表删减操作,删除词典中出现次数小于min_reduce的词
if (vocab_size > vocab_hash_size * 0.7) ReduceVocab();
}
SortVocab(); // 按词频对词表进行排序
if (debug_mode > 0) {
printf("Vocab size: %lld\n", vocab_size);
printf("Words in train file: %lld\n", train_words);
}
file_size = ftell(fin); // 获取训练文件的大小
fclose(fin); // 关闭文件句柄
}
/*
* 输出单词和词频到文件
*/
void SaveVocab() {
long long i;
FILE *fo = fopen(save_vocab_file, "wb");
for (i = 0; i < vocab_size; i++) fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn);
fclose(fo);
}
/*
* 从词汇表文件中读词并构建词表和hash表
* 由于词汇表中的词语不存在重复,因此与LearnVocabFromTrainFile相比没有做重复词汇的检测
*/
void ReadVocab() {
long long a, i = 0;
char c;
char word[MAX_STRING];
FILE *fin = fopen(read_vocab_file, "rb"); // 打开词汇表文件
if (fin == NULL) {
printf("Vocabulary file not found\n");
exit(1);
}
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1; // 初始化hash词表
vocab_size = 0;
while (1) {
ReadWord(word, fin); // 从文件中读入一个词
if (feof(fin)) break;
a = AddWordToVocab(word); // 将该词添加到词表中,创建其在hash表中的值,并通过输入的词汇表文件中的值来更新这个词的词频
fscanf(fin, "%lld%c", &vocab[a].cn, &c);
i++;
}
SortVocab(); // 对词表进行排序,剔除词频低于阈值min_count的值,输出当前词表大小和总词数
if (debug_mode > 0) {
printf("Vocab size: %lld\n", vocab_size);
printf("Words in train file: %lld\n", train_words);
}
fin = fopen(train_file, "rb"); // 打开训练文件,将文件指针移至文件末尾,获取训练文件的大小
if (fin == NULL) {
printf("ERROR: training data file not found!\n");
exit(1);
}
fseek(fin, 0, SEEK_END);
file_size = ftell(fin);
fclose(fin); // 关闭文件句柄
}
/*
* 初始化神经网络结构
* syn0:存储词表中每个词的词向量
* syn1:huffman树中每个非叶节点的向量
* layer1_size:词向量的长度
*/
void InitNet() {
long long a, b;
unsigned long long next_random = 1;
// 调用posiz_memalign来获取一块数量为vocab_size * layer1_size,128byte页对齐的内存
a = posix_memalign((void **)&syn0, 128, (long long)vocab_size * layer1_size * sizeof(real)); // 为syn0分配内存空间
if (syn0 == NULL) {printf("Memory allocation failed\n"); exit(1);}
if (hs) {
a = posix_memalign((void **)&syn1, 128, (long long)vocab_size * layer1_size * sizeof(real)); // 为syn1分配内存空间
if (syn1 == NULL) {printf("Memory allocation failed\n"); exit(1);}
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
syn1[a * layer1_size + b] = 0; // 初始化syn1为0
}
// 如果要使用负采样,则需要为syn1neg分配内存空间,syn1neg是负采样时每个词的辅助向量
if (negative>0) {
a = posix_memalign((void **)&syn1neg, 128, (long long)vocab_size * layer1_size * sizeof(real));
if (syn1neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
syn1neg[a * layer1_size + b] = 0; // 初始化syn1neg为0
}
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++) {
next_random = next_random * (unsigned long long)25214903917 + 11;
syn0[a * layer1_size + b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / layer1_size; // 初始化词向量syn0,每一维的值为[-0.5, 0.5]/layer1_size范围内的随机数
}
CreateBinaryTree(); // 创建huffman树
}
/*
* 核心代码,多线程模型训练
* 默认在执行该线程函数前,已经完成词表排序、huffman树的生成以及每个词的huffman编码计算
*/
void *TrainModelThread(void *id) {
// cw:窗口长度(中心词除外)
// word:在提取句子时用来表示当前词在词表中的索引
// last_word:用于在窗口扫描辅助,记录当前扫描到的上下文单词
// setence_length:当前处理的句子长度
// setence_position:当前处理的单词在当前句子中的位置
long long a, b, d, cw, word, last_word, sentence_length = 0, sentence_position = 0;
// word_count:当前线程当前时刻已训练的语料的长度
// last_word_count:当前线程上一次记录时已训练的语料长度
// sen:当前从文件中读取的待处理句子,存放的是每个词在词表中的索引
long long word_count = 0, last_word_count = 0, sen[MAX_SENTENCE_LENGTH + 1];
// l1:在skip-gram模型中,在syn0中定位当前词词向量的起始位置
// l2:在syn1或syn1neg中定位中间节点向量或负采样向量的起始位置
//target:在负采样中存储当前样本
//label:在负采样中存储当前样本的标记
long long l1, l2, c, target, label, local_iter = iter;
unsigned long long next_random = (long long)id; // next_random:用来辅助生成随机数
real f, g;
clock_t now;
real *neu1 = (real *)calloc(layer1_size, sizeof(real)); // 输入词向量,在CBOW模型中是Context(x)中各个词的向量和,在skip-gram模型中是中心词的词向量
real *neu1e = (real *)calloc(layer1_size, sizeof(real)); // 累计误差项
FILE *fi = fopen(train_file, "rb");
fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET); // file_size就是之前LearnVocabFromTrainFile和ReadVocab函数中获取的训练文件的大小
while (1) {
if (word_count - last_word_count > 10000) { // 每训练约10000词输出一次训练进度
word_count_actual += word_count - last_word_count; // word_count_actual是所有线程总共当前处理的词数
last_word_count = word_count;
if ((debug_mode > 1)) {
now=clock();
printf("%cAlpha: %f Progress: %.2f%% Words/thread/sec: %.2fk ", 13, alpha,
word_count_actual / (real)(iter * train_words + 1) * 100,
word_count_actual / ((real)(now - start + 1) / (real)CLOCKS_PER_SEC * 1000)); // 当前的学习率cAlpha,训练总进度(当前训练的总词数/(迭代次数*训练样本总词数)+1)Progress,每个线程每秒处理的词数Words/thread/sec
fflush(stdout);
}
alpha = starting_alpha * (1 - word_count_actual / (real)(iter * train_words + 1)); // 在初始学习率的基础上,随着实际训练词数的上升,逐步降低当前学习率(自适应调整学习率)
if (alpha < starting_alpha * 0.0001) alpha = starting_alpha * 0.0001; // 调整的过程中保证学习率不低于starting_alpha * 0.0001
}
// 如果当前句子长度为0,从训练样本中取出一个句子,句子间以回车分割
if (sentence_length == 0) {
while (1) {
word = ReadWordIndex(fi); // 从文件中读入一个词,将该词在词表中的索引赋给word
if (feof(fi)) break; // 读到文件末尾
if (word == -1) continue; // 没有这个单词
word_count++; // 单词计数增加
if (word == 0) break; // word为0是个回车,表示句子结束
// The subsampling randomly discards frequent words while keeping the ranking same
// 对高频词进行随机下采样,丢弃掉一些高频词,能够使低频词向量更加准确,同时加快训练速度
if (sample > 0) {
real ran = (sqrt(vocab[word].cn / (sample * train_words)) + 1) * (sample * train_words) / vocab[word].cn;
next_random = next_random * (unsigned long long)25214903917 + 11;
if (ran < (next_random & 0xFFFF) / (real)65536) continue; // 以1-ran的概率舍弃高频词
}
sen[sentence_length] = word; // sen存放的为该词在词典中的索引,并且sen[]中词的顺序与文本中词的顺序一致
sentence_length++;
if (sentence_length >= MAX_SENTENCE_LENGTH) break; // 1000个词视作一个句子,如果句子长度超出最大长度则截断
}
sentence_position = 0; // 定位到句子头
}
// 如果当前线程处理的词数超过了它应该处理的最大值,那么开始新一轮迭代
// 如果迭代数超过上限,则停止迭代
if (feof(fi) || (word_count > train_words / num_threads)) {
word_count_actual += word_count - last_word_count;
local_iter--;
if (local_iter == 0) break;
word_count = 0;
last_word_count = 0;
sentence_length = 0;
fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
continue;
}
word = sen[sentence_position]; // 取出当前单词
if (word == -1) continue; // 没有这个单词继续下一个
for (c = 0; c < layer1_size; c++) neu1[c] = 0; // 初始化输入词向量
for (c = 0; c < layer1_size; c++) neu1e[c] = 0; // 初始化累计误差项
next_random = next_random * (unsigned long long)25214903917 + 11; // 生成一个[0,window-1]的随机数,用来确定|context(w)|窗口的实际宽度
b = next_random % window;
// *** CBOW模型,根据上下文预测当前词 ***
if (cbow) { //train the cbow architecture
// in -> hidden
cw = 0;
// 一个词的窗口为[setence_position - window + b, sentence_position + window - b],因此窗口总长度为 2*window - 2*b + 1
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) { // 去除窗口的中心词,这是我们要预测的内容,仅仅提取上下文
c = sentence_position - window + a; // sentence_position表示的是当前的位置,c表示上下文词的具体位置
if (c < 0) continue; // 越界检查
if (c >= sentence_length) continue;
last_word = sen[c]; // sen数组中存放的是句子中的每个词在词表中的索引
if (last_word == -1) continue;
for (c = 0; c < layer1_size; c++) neu1[c] += syn0[c + last_word * layer1_size]; // 计算窗口中词向量的和
cw++; // 统计实际窗口中的有效词数
}
if (cw) {
for (c = 0; c < layer1_size; c++) neu1[c] /= cw; // 求平均向量和
// *** Hierarchical SOFTMAX 分层softmax优化 ***
// 如果采用分层softmax优化,根据huffman树上从根节点到当前词的叶节点的路径,遍历所有经过的中间节点
if (hs) for (d = 0; d < vocab[word].codelen; d++) {
f = 0;
l2 = vocab[word].point[d] * layer1_size; // l2为当前遍历到的中间节点的向量在syn1中的起始位置
// Propagate hidden -> output
for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1[c + l2]; // f为输入向量neu1与中间结点向量的内积
if (f <= -MAX_EXP) continue; // 检测f有没有超出Sigmoid函数表的范围
else if (f >= MAX_EXP) continue;
else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]; // 如果f没有超出范围则对f进行Sigmoid变换
// 'g' is the gradient multiplied by the learning rate
// g是梯度和学习率的乘积
g = (1 - vocab[word].code[d] - f) * alpha;
// Propagate errors output -> hidden
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2]; // 根据计算得到的修正量g和输入向量更新中间节点的向量值
// Learn weights hidden -> output
for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c];
}
// *** NEGATIVE SAMPLING 负采样优化 ***
// NEGATIVE SAMPLING
if (negative > 0) for (d = 0; d < negative + 1; d++) {
if (d == 0) { // 第一次循环处理的是目标单词,即正样本
target = word;
label = 1;
} else { // 从能量表中随机抽取负样本
next_random = next_random * (unsigned long long)25214903917 + 11;
target = table[(next_random >> 16) % table_size];
if (target == 0) target = next_random % (vocab_size - 1) + 1;
if (target == word) continue;
label = 0;
}
l2 = target * layer1_size; // 在负采样优化中,每个词在syn1neg数组中对应一个辅助向量,此时的l2为syn1neg中目标单词向量的起始位置
f = 0;
for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2]; // f为输入向量neu1与辅助向量的内积
if (f > MAX_EXP) g = (label - 1) * alpha;
else if (f < -MAX_EXP) g = (label - 0) * alpha;
else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2]; // 用辅助向量和g更新累计误差
for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c]; // 用输入向量和g更新辅助向量
}
// hidden -> in
// 根据获得的的累计误差,更新context(w)中每个词的词向量word vectors
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
c = sentence_position - window + a;
if (c < 0) continue;
if (c >= sentence_length) continue;
last_word = sen[c];
if (last_word == -1) continue;
for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c];
}
}
}
// *** skip-gram模型,根据当前词预测上下文 ***
else { //train skip-gram
// 因为需要预测Context(w)中的每个词,因此需要循环2window - 2b + 1次遍历整个窗口,遍历时跳过中心单词
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
c = sentence_position - window + a;
if (c < 0) continue;
if (c >= sentence_length) continue;
last_word = sen[c]; // last_word为当前待预测的上下文单词
if (last_word == -1) continue;
l1 = last_word * layer1_size; // l1为当前单词的词向量在syn0中的起始位置
for (c = 0; c < layer1_size; c++) neu1e[c] = 0; // 初始化累计误差
// HIERARCHICAL SOFTMAX
if (hs) for (d = 0; d < vocab[word].codelen; d++) { // 根据huffman树上从根节点到当前词的叶节点的路径,遍历所有经过的中间节点
f = 0;
l2 = vocab[word].point[d] * layer1_size;
// Propagate hidden -> output
for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1[c + l2];
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
// 'g' is the gradient multiplied by the learning rate
g = (1 - vocab[word].code[d] - f) * alpha;
// Propagate errors output -> hidden
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
// Learn weights hidden -> output
for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * syn0[c + l1];
}
// NEGATIVE SAMPLING
if (negative > 0) for (d = 0; d < negative + 1; d++) {
if (d == 0) {
target = word;
label = 1;
} else {
next_random = next_random * (unsigned long long)25214903917 + 11;
target = table[(next_random >> 16) % table_size];
if (target == 0) target = next_random % (vocab_size - 1) + 1;
if (target == word) continue;
label = 0;
}
l2 = target * layer1_size;
f = 0;
for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1neg[c + l2];
if (f > MAX_EXP) g = (label - 1) * alpha;
else if (f < -MAX_EXP) g = (label - 0) * alpha;
else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * syn0[c + l1];
}
// Learn weights input -> hidden
for (c = 0; c < layer1_size; c++) syn0[c + l1] += neu1e[c];
}
}
sentence_position++; // 完成了一个词的训练,句子中位置往后移一个词
// 处理完一句句子后,将句子长度置为零,进入循环,重新读取句子并进行逐词计算
if (sentence_position >= sentence_length) {
sentence_length = 0;
continue;
}
}
fclose(fi);
free(neu1);
free(neu1e);
pthread_exit(NULL);
}
/*
* 完整的模型训练流程
*/
void TrainModel() {
long a, b, c, d;
FILE *fo;
pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t)); // 创建多线程,线程数为num_threads
printf("Starting training using file %s\n", train_file);
starting_alpha = alpha; // 初始化学习率
// 如果有词汇表文件,则从中加载生成词表和hash表,否则从训练文件中加载
if (read_vocab_file[0] != 0) ReadVocab(); else LearnVocabFromTrainFile();
// 将词表中的词和词频输出到文件
if (save_vocab_file[0] != 0) SaveVocab();
if (output_file[0] == 0) return;
// 训练网络结构初始化
InitNet();
// 如果使用负采样优化,则需要初始化能量表
if (negative > 0) InitUnigramTable();
start = clock(); // 开始计时
for (a = 0; a < num_threads; a++) pthread_create(&pt[a], NULL, TrainModelThread, (void *)a); // 创建训练线程
for (a = 0; a < num_threads; a++) pthread_join(pt[a], NULL);
fo = fopen(output_file, "wb"); // 训练结束进行输出
// 如果classes参数为0,则输出所有词向量到文件中
if (classes == 0) {
// Save the word vectors
fprintf(fo, "%lld %lld\n", vocab_size, layer1_size); // 词汇量,vector维数
for (a = 0; a < vocab_size; a++) {
fprintf(fo, "%s ", vocab[a].word);
if (binary) for (b = 0; b < layer1_size; b++) fwrite(&syn0[a * layer1_size + b], sizeof(real), 1, fo);
else for (b = 0; b < layer1_size; b++) fprintf(fo, "%lf ", syn0[a * layer1_size + b]);
fprintf(fo, "\n");
}
}
// 如果classes参数不为0,则需要对词向量进行K-means聚类,输出词类,classes为最后要分成的类的个数
else {
// Run K-means on the word vectors
int clcn = classes, iter = 10, closeid; // 3个参数分别是总类数,总迭代次数,用来存储计算过程中离某个词最近的类编号
int *centcn = (int *)malloc(classes * sizeof(int)); // centcnL:属于每个类的单词数
int *cl = (int *)calloc(vocab_size, sizeof(int)); // cl:每个单词所属的类编号
real closev, x;
real *cent = (real *)calloc(classes * layer1_size, sizeof(real)); // cent:每个类的中心向量
for (a = 0; a < vocab_size; a++) cl[a] = a % clcn; // 先给所有单词随机指派类
for (a = 0; a < iter; a++) { // 循环迭代
for (b = 0; b < clcn * layer1_size; b++) cent[b] = 0; // 初始化类中心向量数组为0
for (b = 0; b < clcn; b++) centcn[b] = 1; // 初始化每个类含有的单词数为1
// 将刚才随意分配的所属于同一个类的词向量相加,并且计算属于每个类的词数
for (c = 0; c < vocab_size; c++) {
for (d = 0; d < layer1_size; d++) cent[layer1_size * cl[c] + d] += syn0[c * layer1_size + d];
centcn[cl[c]]++;
}
for (b = 0; b < clcn; b++) {
closev = 0;
for (c = 0; c < layer1_size; c++) {
cent[layer1_size * b + c] /= centcn[b]; // 计算每个类的平均中心向量
closev += cent[layer1_size * b + c] * cent[layer1_size * b + c]; // closev为类平均中心向量的二范数的平方
}
closev = sqrt(closev); // 对closev开方,此时的closev即为类平均中心向量的二范数
for (c = 0; c < layer1_size; c++) cent[layer1_size * b + c] /= closev; // 用得到的范数对中心向量进行归一化
}
// 遍历词表中的每个词,为其重新分配距离最近的类
for (c = 0; c < vocab_size; c++) {
closev = -10;
closeid = 0;
for (d = 0; d < clcn; d++) {
x = 0;
// 对词向量和归一化的类中心向量做内积,内积越大说明两点之间距离越近
for (b = 0; b < layer1_size; b++) x += cent[layer1_size * d + b] * syn0[c * layer1_size + b];
// 取所有类中与这个词的词向量内积最大的一个类,将词分到这个类中
if (x > closev) {
closev = x;
closeid = d;
}
}
cl[c] = closeid;
}
}
// Save the K-means classes
for (a = 0; a < vocab_size; a++) fprintf(fo, "%s %d\n", vocab[a].word, cl[a]); // 输出K-means聚类结果到文件中
free(centcn);
free(cent);
free(cl);
}
fclose(fo);
}
/*
* 当参数缺失时,输出提示信息
*/
int ArgPos(char *str, int argc, char **argv) {
int a;
for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) { // 查找对应的参数
if (a == argc - 1) {
printf("Argument missing for %s\n", str);
exit(1);
}
return a; // 匹配成功,返回值所在的位置
}
return -1;
}
int main(int argc, char **argv) {
int i;
if (argc == 1) { // 参数个数异常输出如下信息
printf("WORD VECTOR estimation toolkit v 0.1c\n\n");
printf("Options:\n");
printf("Parameters for training:\n");
printf("\t-train \n"); // 1.指定训练文件
printf("\t\tUse text data from to train the model\n");
printf("\t-output \n"); // 2.指定输出文件,存储结果词向量或者单词类
printf("\t\tUse to save the resulting word vectors / word clusters\n");
printf("\t-size \n"); // 3.词向量的维数,对应layer1_size(默认值是100)
printf("\t\tSet size of word vectors; default is 100\n");
printf("\t-window \n"); // 4.窗口大小,在cbow中表示了word vector的最大的叠加范围;在skip-gram中表示了max space between words(w1,w2,p(w1 | w2))(默认值是5)
printf("\t\tSet max skip length between words; default is 5\n");
printf("\t-sample \n"); // 5.亚采样拒绝概率的参数
printf("\t\tSet threshold for occurrence of words. Those that appear with higher frequency in the training data\n");
printf("\t\twill be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)\n");
printf("\t-hs \n"); // 6.使用hs求解,默认为0表示不使用hs(默认值是0)
printf("\t\tUse Hierarchical Softmax; default is 0 (not used)\n");
printf("\t-negative \n"); // 7.使用ns的时候采样的样本数(默认值为5)
printf("\t\tNumber of negative examples; default is 5, common values are 3 - 10 (0 = not used)\n");
printf("\t-threads \n"); // 8.指定线程数(默认值是12)
printf("\t\tUse threads (default 12)\n");
printf("\t-iter \n"); // 9.训练迭代轮数(默认值是5)
printf("\t\tRun more training iterations (default 5)\n");
printf("\t-min-count \n"); // 10.长尾词的词频阈值(默认值是5)
printf("\t\tThis will discard words that appear less than times; default is 5\n");
printf("\t-alpha \n"); // 11.初始的学习速率,默认skip-gram为0.025,CBOW为0.05
printf("\t\tSet the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW\n");
printf("\t-classes \n"); // 12.输出单词类别而不输出词向量,默认为0表示输出词向量
printf("\t\tOutput word classes rather than word vectors; default number of classes is 0 (vectors are written)\n");
printf("\t-debug \n"); // 13.调试等级,默认为2
printf("\t\tSet the debug mode (default = 2 = more info during training)\n");
printf("\t-binary \n"); // 14.是否将结果输出为二进制文件,默认为0表示不输出为二进制
printf("\t\tSave the resulting vectors in binary moded; default is 0 (off)\n");
printf("\t-save-vocab \n"); // 15.词汇表存储文件
printf("\t\tThe vocabulary will be saved to \n");
printf("\t-read-vocab \n"); // 16.词汇表加载文件,则可以不指定trainfile
printf("\t\tThe vocabulary will be read from , not constructed from the training data\n");
printf("\t-cbow \n"); // 17.使用cbow模型,默认值为1,值为0表示使用skip-gram模型
printf("\t\tUse the continuous bag of words model; default is 1 (use 0 for skip-gram model)\n");
printf("\nExamples:\n"); // 参数示例
printf("./word2vec -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3\n\n");
return 0;
}
output_file[0] = 0; // 输出文件
save_vocab_file[0] = 0; // 输出词的文件
read_vocab_file[0] = 0; // 词汇表加载文件
// 参数与变量的对应关系
if ((i = ArgPos((char *)"-size", argc, argv)) > 0) layer1_size = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-train", argc, argv)) > 0) strcpy(train_file, argv[i + 1]);
if ((i = ArgPos((char *)"-save-vocab", argc, argv)) > 0) strcpy(save_vocab_file, argv[i + 1]);
if ((i = ArgPos((char *)"-read-vocab", argc, argv)) > 0) strcpy(read_vocab_file, argv[i + 1]);
if ((i = ArgPos((char *)"-debug", argc, argv)) > 0) debug_mode = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-binary", argc, argv)) > 0) binary = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-cbow", argc, argv)) > 0) cbow = atoi(argv[i + 1]);
if (cbow) alpha = 0.05;
if ((i = ArgPos((char *)"-alpha", argc, argv)) > 0) alpha = atof(argv[i + 1]);
if ((i = ArgPos((char *)"-output", argc, argv)) > 0) strcpy(output_file, argv[i + 1]);
if ((i = ArgPos((char *)"-window", argc, argv)) > 0) window = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-sample", argc, argv)) > 0) sample = atof(argv[i + 1]);
if ((i = ArgPos((char *)"-hs", argc, argv)) > 0) hs = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-negative", argc, argv)) > 0) negative = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-threads", argc, argv)) > 0) num_threads = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-iter", argc, argv)) > 0) iter = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-min-count", argc, argv)) > 0) min_count = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-classes", argc, argv)) > 0) classes = atoi(argv[i + 1]);
vocab = (struct vocab_word *)calloc(vocab_max_size, sizeof(struct vocab_word)); // 存储每一个词的结构体
vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int)); // 存储词的hash
expTable = (real *)malloc((EXP_TABLE_SIZE + 1) * sizeof(real)); // 申请EXP_TABLE_SIZE+1个空间
// 预处理:提前计算sigmod值,并保存起来
for (i = 0; i < EXP_TABLE_SIZE; i++) {
expTable[i] = exp((i / (real)EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table
expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1)
}
TrainModel(); // 模型训练
return 0;
}
2. 参考文献:
- word2vec 源代码 完整注释
- word2vec源码详细解析
- GitHub版代码注释
- word2vec源码详解(带流程)
- word2vec源码解析