功能:输出两段文本的语义相似度
工具:python2 gensim:version = '3.4.0’
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author:
@contact:
@time:
@content:预处理
"""
import sys,jieba,time,re,codecs
reload(sys)
sys.setdefaultencoding('utf8')
#输入:一行一条文本
def etl():
f = open('trainData\data.txt','r')
out=open("trainData\etlData.txt","w")
dict = []
input = f.readlines()
for i in input:
# a,b=i.split(" ")
strr = i.strip()#去空格
strr = strr.strip(" ")
strr = strr.strip(" ")
pattern1 = re.compile(r'(?<=\().*?(?=\))')#去掉()、[]、【】里的内容
strr = pattern1.sub('', strr)
pattern2 = re.compile(r'(?<=\[).*?(?=\])')
strr = pattern2.sub('', strr)
strr = strr.replace(' ', '')
pattern3 = re.compile(r'(?<=【).*?(?=】)')
strr = pattern3.sub('', strr)
if strr not in dict:#去重
dict.append(strr)
out.write(str(strr)+'\n')
out.close()
etl()
#分词词典去重
def dict():
f = open('dict\dict.txt','r')
out=open("dict\dict_etl.txt","w")
input = f.readlines()
dict=[]
for i in input:
i = i.strip()
if i not in dict:
dict.append(i)
out.write(str(i)+'\n')
out.close()
dict()
# 分词,加载分词词典
def fenci():
f = open('trainData/etlData.txt','r')
input = f.readlines()
output = file('trainData/etlData_jieba.txt', 'w')
print("start time:" + str(time.localtime(time.time()).tm_hour) + ":" + str(time.localtime(time.time()).tm_min) + "")
jieba.load_userdict('dict/dict_etl.txt')
for line in input:
line=line.strip('\n')
seg_list = jieba.cut(line)
output.write(' '.join(seg_list) + '\n')
output.close()
print("end time:" + str(time.localtime(time.time()).tm_hour) + ":" + str(time.localtime(time.time()).tm_min) + "")
fenci()
#去数字、停用词
def stops():
stopwords = []
st = codecs.open('dict/stopwords.txt', 'rb',encoding='utf-8')
for l in st:
l = l.strip()
stopwords.append(l)
f = open('trainData/etlData_jieba.txt', 'r')
out = open("trainData/finalData.txt", "w")
input = f.readlines()
for line in input:
list=[]
list=line.split(" ")
stop_strArr=""
for i in list:
if i.isdigit() != True:
if i not in stopwords:
stop_strArr=stop_strArr+str(i)+" "
stop_strArr = stop_strArr.strip()
out.write(str(stop_strArr)+"\n")
out.close()
stops()
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author:
@contact:
@time:
@content:gensim:__version__ = '3.4.0'
"""
import sys,gensim, logging,time
reload(sys)
sys.setdefaultencoding('utf8')
print("start time:" + str(time.localtime(time.time()).tm_hour) + ":" + str(time.localtime(time.time()).tm_min) + "")
sentences= gensim.models.doc2vec.TaggedLineDocument('trainData/finalData.txt')
model = gensim.models.Doc2Vec(sentences,size=300, iter=2,workers=4,alpha=0.025, min_alpha=0.025)
for epoch in range(5):
model.train(sentences, total_examples=model.corpus_count, epochs=10)
model.alpha -= 0.002
model.min_alpha = model.alpha
model.train(sentences, total_examples=model.corpus_count, epochs=10)
model.save('model/model.model')
print("end time:" + str(time.localtime(time.time()).tm_hour) + ":" + str(time.localtime(time.time()).tm_min) + "")
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author:
@contact:
@time:
"""
import sys,gensim,os
from gensim.models.doc2vec import Doc2Vec
reload(sys)
sys.setdefaultencoding('utf-8')
TaggededDocument = gensim.models.doc2vec.TaggedDocument
#获取语料集合
def get_datasest(filename,cate,x_train=[]):
x_train = []
doc=filename
word_list = doc.split(' ')
length = len(word_list)
word_list[length-1] = word_list[length-1].strip()
document = TaggededDocument(word_list, tags=["a"+str(cate)])
x_train.append(document)
return x_train
#增量式训练模型
def incrementaTraining(model_path,incr_corpuses):
for corpus,cate in incr_corpuses.items():
x_train = []
x_train = get_datasest(corpus,cate,x_train)
model = Doc2Vec.load(model_path)
print ("原模型的预料量: "+ str(model.corpus_count))
# model.build_vocab(x_train)
tte = model.corpus_count+len(x_train)
print("现模型的预料量: "+ str(tte))
model.train(x_train,total_examples=tte,epochs=70) #完成增量训练
model.save('E:/docSim/model/new.txt')
sims = model.docvecs.most_similar("a"+str(cate))
print(sims)
return
if __name__ == '__main__':
model_path = "E:/docSim/model/model.model"
incr_corpuses = {'满纸 荒唐言 辛酸泪 都云作者痴 其中味 曹雪芹 贵族家庭 家庭 盛极而衰':0000}
incrementaTraining(model_path,incr_corpuses)
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author:
@contact:
@time:
@content:评估模型效果以及设置相似性阈值:如1000组完全一样的文本,相似度值聚集的范围即可衡量模型预测文本相似度的能力以及阈值从此范围中产生;
"""
from __future__ import division
from gensim.models.doc2vec import Doc2Vec
import jieba,sys,numpy as np
reload(sys)
sys.setdefaultencoding('utf8')
#输入:存放n组完全一样文本的txt路径,每个文本占一行
#输出:相似度,相似度大于某一个数值的比例
def cos(path):
f = open(path, 'r')
input = f.readlines()
out = open("E:/docSim/result/Percent.txt", "w")
count6=count7=count8=count9=count95=0
model = Doc2Vec.load("E:/docSim/model/model.model")
for i in range(0,4,2):
text1=' '.join(jieba.cut(input[i].strip('\n'))) + '\n'
text2=' '.join(jieba.cut(input[i+1].strip('\n'))) + '\n'
# print text1,'\n',text2
vector1 = model.infer_vector(text1)
vector2 = model.infer_vector(text2)
# print vector1,'\n',vector2
# 计算两个向量的长度
v1=np.sqrt(vector1.dot(vector1))
v2=np.sqrt(vector2.dot(vector2))
cos=vector1.dot(vector2)/(v1*v2)
out.write(str(cos) + ' ')
if cos>0.6:
count6=count6+1
if cos>0.7:
count7=count7+1
if cos > 0.8:
count8 = count8 + 1
if cos > 0.9:
count9=count9+1
if cos>0.95:
count95=count95+1
count6=count6/4.0
count7 = count7 / 4.0
count8 = count8 / 4.0
count9 = count9/4.0
count95 = count95 / 4.0
out.write('\n'+ '相似度大于0.6的比例:'+str(count6))
out.write('\n' + '相似度大于0.7的比例:'+str(count7))
out.write('\n' + '相似度大于0.8的比例:'+str(count8))
out.write('\n' + '相似度大于0.9的比例:'+str(count9))
out.write('\n' + '相似度大于0.95的比例:'+str(count95))
out.close()
cos("E:/docSim/testData/EvaSimPercent.txt")
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author:
@contact:
@time:
@content:参数设置合理性评估:通过不同参数对应的文本TOPN是否稳定来判断最优参数设置。
"""
from __future__ import division
import jieba,sys
reload(sys)
sys.setdefaultencoding('utf8')
from gensim.models.doc2vec import Doc2Vec
def topN(path):
model = Doc2Vec.load("model/model.txt")#加载模型
#TOPN:存在语料库里
sims = model.docvecs.most_similar(2)#训练语料库里第2(标号从0开始)个语料的topN
vector1 = model.docvecs[0]#训练语料库里第0个语料的向量
sims = model.docvecs.similarity(0, 1)#训练语料库里第1和第2个语料的余弦相似度
#TOPN:不在训练语料库里
f = open(path, 'r')
input = f.readlines()
out = open("result/topN.txt", "w")
for i in range(0,1):
text1=' '.join(jieba.cut(input[0].strip('\n')))
text1 = text1.split(' ')
vector1 = model.infer_vector(text1)
sims = model.docvecs.most_similar([vector1], topn=10)
out.write(input[0]+str(sims) + '\n')
out.close()
topN("testData/doc.txt")#一条文本占一行
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author:
@contact:
@time:
@content:命令行传参的形式运行;两种方法计算文本相似度
"""
from __future__ import division
from gensim.models.doc2vec import Doc2Vec
import jieba,sys,numpy as np,codecs
reload(sys)
sys.setdefaultencoding('utf8')
#传入两个参数:两段文本
text1=sys.argv[1]
text2=sys.argv[2]
# 分词
text1=' '.join(jieba.cut(text1.strip('\n')))
text2=' '.join(jieba.cut(text2.strip('\n')))
text1 = text1.split(' ')
text2 = text2.split(' ')
#去停用词
stopwords = []
st = codecs.open('stopwords.txt', 'rb', encoding='utf-8')
for l in st:
l = l.strip()
stopwords.append(l)
text1Stop=[]
for i in text1:
if i not in stopwords:
text1Stop.append(i)
text2Stop = []
for i in text2:
if i not in stopwords:
text2Stop.append(i)
#文本相似度计算,方法一:
all= text1Stop + text2Stop
Len = len(list(set(all)))
count = 0.0
for i in list(set(text1Stop)):
if i in text2Stop:
count = count + 1
coincidePer=count / (Len+0.000001)
print coincidePer
# 文本相似度计算,方法二:
import time
print("查看此刻的时间:" + str(time.localtime(time.time()).tm_hour) + ":" + str(time.localtime(time.time()).tm_sec) + "")
model = Doc2Vec.load("model/model.model",mmap='r')
print("查看此刻的时间:" + str(time.localtime(time.time()).tm_hour) + ":" + str(time.localtime(time.time()).tm_sec) + "")
#推测两句话的向量
model.random.seed(0)
vector1 = model.infer_vector(text1Stop,steps=5,alpha=0.025)
model.random.seed(0)
vector2 = model.infer_vector(text2Stop,steps=5,alpha=0.025)
# 计算两个向量的长度
v1=np.sqrt(vector1.dot(vector1))
v2=np.sqrt(vector2.dot(vector2))
cos=vector1.dot(vector2)/(v1*v2)
print(cos)