文本向量化方法比较:tf-idf、doc2bow、doc2vec、lsi、lda

先放个代码和结果,改天闲了总结。
用余弦距离计算相似度以判断向量化效果
tf-idf、doc2bow稀疏,适合短文本
doc2vec效果时好时坏,偶然性大,不稳
lsi、lda效果好且较稳,但lda计算量偏大

from gensim.models import doc2vec
from gensim import corpora,models
import jieba,os
from gensim.similarities.docsim import Similarity
raw_documents=[]
for root,p,files in os.walk('C:/Users/Administrator/Desktop/testdata/'):
    for file in files:
        f=open(root+file,encoding='utf8')
        s=f.read().replace(' ','').replace('\t','').replace('\r\n','').replace('\r','').replace('\n','')
        raw_documents.append(s)
        f.close()
print('data ok!')
corpora_documents = []
corpora_documents2=[]
for i, item_text in enumerate(raw_documents):
    words_list = list(jieba.cut(item_text))
    document = doc2vec.TaggedDocument(words=words_list, tags=[i])
    corpora_documents.append(words_list)
    corpora_documents2.append(document)
# 生成字典和向量语料
dictionary = corpora.Dictionary(corpora_documents)
corpus = [dictionary.doc2bow(text) for text in corpora_documents]
#sim模型
similarity = Similarity('-Similarity-index', corpus, num_features=10000)
#测试数据
test_data_1 = '周杰伦是个低调爱做慈善的好明星'
test_cut_raw_1 = list(jieba.cut(test_data_1))
#用sim计算相似度
'''test_corpus_1 = dictionary.doc2bow(test_cut_raw_1)
similarity.num_best = 5
print('——————————————sim———————————————')
print(similarity[test_corpus_1])  # 返回最相似的样本材料,(index_of_document, similarity) tuples'''

#doc2vec计算相似度
model = doc2vec.Doc2Vec(size=89, min_count=1, iter=10)
model.build_vocab(corpora_documents2)
model.train(corpora_documents2,total_examples=model.corpus_count, epochs=model.iter)
print('——————————————doc2vec———————————————')
inferred_vector = model.infer_vector(test_cut_raw_1)
sims = model.docvecs.most_similar([inferred_vector], topn=5)
print(sims)

#转化成tf-idf向量
tfidf_model=models.TfidfModel(corpus)
corpus_tfidf = [tfidf_model[doc] for doc in corpus]
#转化成lsi向量
lsi= models.LsiModel(corpus_tfidf,id2word=dictionary,num_topics=50)
corpus_lsi = [lsi[doc] for doc in corpus]
similarity_lsi=Similarity('Similarity-Lsi-index', corpus_lsi, num_features=1600,num_best=5)
test_corpus_3 = dictionary.doc2bow(test_cut_raw_1)  # 2.转换成bow向量
test_corpus_tfidf_3 = tfidf_model[test_corpus_3]  # 3.计算tfidf值
test_corpus_lsi_3 = lsi[test_corpus_tfidf_3]  # 4.计算lsi值
# lsi.add_documents(test_corpus_lsi_3) #更新LSI的值
print('——————————————lsi———————————————')
print(similarity_lsi[test_corpus_lsi_3])
#转化成lda向量
lda= models.LdaModel(corpus_tfidf,id2word=dictionary,num_topics=50)
corpus_lda = [lda[doc] for doc in corpus]
similarity_lda=Similarity('Similarity-LDA-index', corpus_lda, num_features=1600,num_best=5)
test_corpus_lda_3 = lda[test_corpus_tfidf_3]  # 4.计算lda值
# lda.add_documents(test_corpus_lda_3) #更新Lda的值
print('——————————————lda———————————————')
print(similarity_lda[test_corpus_lda_3])
print(lsi)
print('——————————————向量———————————————')
print(lsi[corpus_tfidf[0]])
#print(lsi.print_topics())

你可能感兴趣的:(文本向量化方法比较:tf-idf、doc2bow、doc2vec、lsi、lda)