机器学习算法Python实现:word2vec 求词语相似度

#!/usr/bin/env Python3
# coding=utf-8
import jieba
jieba.load_userdict("C:\\Users\\Desktop\\s_proj\\dict.txt")  #自定义分词词典

#分词并将结果存入txt
f1 =open("C:\\Users\\Desktop\\neg.txt","r",encoding='utf-8',errors='ignore')
f2 =open("C:\\Users\\Desktop\\car_fenci.txt", 'w',encoding='utf-8',errors='ignore')
lines =f1.readlines()  # 读取全部内容
w=''
for line in lines:
    line.replace('\t', '').replace('\n', '').replace(' ','')
    seg_list = jieba.cut(line, cut_all=False)
    f2.write(" ".join(seg_list))
f1.close()
f2.close()

from gensim.models import word2vec
import logging
logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', level=logging.INFO)

#训练word2vec模型
sentences=word2vec.Text8Corpus(u"C:\\Users\\Desktop\\car_fenci.txt")
model =word2vec.Word2Vec(sentences,size=400, window=10, min_count=1)  #训练skip-gram模型,默认window=5
print (model)

#保存模型
model.save("model_word")
# 以一种C语言可以解析的形式存储词向量  
model.wv.save_word2vec_format("model_word.bin", binary=True)  
if __name__ == "__main__":  
    pass  

#打开要计算相似度的2个文本
f3 =open(r"C:\Users\Desktop\s_proj\keyword.txt","r",encoding='utf-8',errors='ignore')
f4=open(r"C:\Users\Desktop\s_proj\c.txt","r",encoding='utf-8',errors='ignore')

#要计算的2个文本预处理
f=[]
ff=[]
for i in f3.readlines():
    i=i.replace('\n','')
    f.append(i)
    for j in f4.readlines():
        j=j.replace('\n','')
        ff.append(j) 

#相似度计算
for ii in f:
    for jj in ff:
        try:
            y1 = model.wv.similarity(ii, jj)  
            print (ii+' '+jj,y1) 
        except:
            print (ii+' '+jj,0) 

你可能感兴趣的:(机器学习算法Python实现,机器学习)