jieba分词-选择词性-词频统计

导入模块包

import numpy as np
import pandas as pd
import jieba
import jieba.analyse
import codecs
import os #更改当前路径
import re

设置当前工作路径

os.chdir(r'文件路径1')
#设置pd的显示长度
pd.set_option('max_colwidth',500)

载入数据


rows=pd.read_excel('某某.xlsx',dtype=str)

segments = []
for index, row in rows.iterrows():
    content = row[4] #这里表示第4列
    #TextRank 关键词抽取,只获取固定词性
    words = jieba.analyse.textrank(content, topK=50,withWeight=False,allowPOS=('ns', 'n', 'vn', 'v')) #词性'ns', 'n', 'vn', 'v'
    splitedStr = ''
    for word in words:
        # 记录全局分词
        segments.append({'word':word, 'count':1})
        splitedStr += word + ' '
dfSg = pd.DataFrame(segments)

词频统计

dfWord = dfSg.groupby('word')['count'].sum()
#导出csv
#dfWord.to_csv('keywords1.csv',encoding='utf-8')

你可能感兴趣的:(jieba分词-选择词性-词频统计)