jieba分词并提取关键词

将分词结果写入一个文本文档,再将排名前100的关键词写入另一个文本文档。

import jieba
import jieba.analyse  #关键词提取
SourceTxt="E:\\Programe\\PySeg\\RawTxt\\14.txt"  #待分词的文本
TargetTxt="E:\\Programe\\PySeg\\TagTxt\\14.txt"  #分词后写入的文本
jieba.load_userdict("E:\\Programe\\PySeg\\DHdirec.txt")  #加载自定义词典

#分词    
with open(SourceTxt,'r',encoding='utf-8-sig') as sourcefile,open(TargetTxt,'a+',encoding='utf-8-sig') as targetfile:
        for line in sourcefile:
                seg=jieba.cut(line.strip(),cut_all=False,HMM=True)  #对每一行进行分词,返回一个迭代器,可用循环或.join()打开
                output=' '.join(seg) #output类型为字符串
                targetfile.write(output)  #将分词后的文本写入
                targetfile.write('\n')
        print("写入成功")

#提取关键词        
KeywordTxt="E:/Programe/PySeg/Keywords14.txt"
with open(TargetTxt,'r',encoding='utf-8-sig') as f,open(KeywordTxt,'a+',encoding='utf-8-sig') as kwordfile:
        text=f.readlines()
        keywords=jieba.analyse.extract_tags(str(text),topK=100,withWeight=True,allowPOS=())
        for word in keywords:
                for i in word:
                        kwordfile.write(str(i)+' ')
                kwordfile.write('\n')
        print("写入成功")

你可能感兴趣的:(自然语言处理)