因为最近是在一家IT公司实习,有接触到自然语言处理等方面的知识,在网上查找了一些博客,并简单运行代码。下面是一个代码的整理。有关《人民的名义》的一个人名的相似度的计算的一个复现,在该博客的基础上做了一些修改。
import jieba
import jieba.analyse
import logging
import os
import gensim
from gensim.models import word2vec
import importlib,sys
importlib.reload(sys)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def deal_data(path_in,s):
jieba.suggest_freq('沙瑞金', True)
jieba.suggest_freq('田国富', True)
jieba.suggest_freq('高育良', True)
jieba.suggest_freq('侯亮平', True)
jieba.suggest_freq('钟小艾', True)
jieba.suggest_freq('陈岩石', True)
jieba.suggest_freq('欧阳菁', True)
jieba.suggest_freq('易学习', True)
jieba.suggest_freq('王大路', True)
jieba.suggest_freq('蔡成功', True)
jieba.suggest_freq('孙连城', True)
jieba.suggest_freq('季昌明', True)
jieba.suggest_freq('丁义珍', True)
jieba.suggest_freq('郑西坡', True)
jieba.suggest_freq('赵东来', True)
jieba.suggest_freq('高小琴', True)
jieba.suggest_freq('赵瑞龙', True)
jieba.suggest_freq('林华华', True)
jieba.suggest_freq('陆亦可', True)
jieba.suggest_freq('刘新建', True)
jieba.suggest_freq('刘庆祝', True)
path_out=s
with open(path_in,encoding='utf-8', errors='ignore') as f:
document=f.read()
document_cut=jieba.cut(document)
result=' '.join(document_cut)
result=result.encode('utf-8')
with open(path_out,'wb') as f2:
f2.write(result)
f.close()
f2.close()
return path_out
def train_model(model_path):
path_out=deal_data(path_in,s)
sentence=word2vec.LineSentence(path_out)
model=word2vec.Word2Vec(sentence,hs=1,min_count=1,window=3,size=100)
model.save(model_path)
return model
def predict_model(name):
model=train_model(model_path)
req_count = 5
for key in model.most_similar(name, topn=100):
if len(key[0]) == 3:
req_count -= 1
print(str(key[0])+" "+str(key[1]))
if req_count == 0:
break
if __name__=='__main__':
path_in='C:/Users/sunny/Desktop/实验语料/in_the_name_of_people.txt'
s='C:/Users/sunny/Desktop/实验语料/in_the_name_of_people_segment.txt'
model_path='C:/Users/sunny/Desktop/实验语料/w2c.model'
predict_model('侯亮平')
代码解读
改代码大致分为四个部分,首先定义人名,告诉电脑这这几个词是不用分词的,随后利用jieba分词进行分词,分此后的结果放在路径s中,可以根据自己的需要修改路径,随后可以在指定文件夹中看到分词后的txt文件,deal_data函数返回的是保存分词后结果的路径,并用于第二个函数train_model,但是换了一个代表符变成了path_out,train_model是用分词后的结果训练词向量模型,训练过程如下:
等训练完成后,指定的model_path路径中会出现一个model文件,在利用这个model文件,运用到第三个函数predict_model,输入的参数即为人的性命,最后的输出结果如下: