"""
@author: wg
@software: PyCharm
@file: word_frequency_statistics.py
@time: 2017/3/16 0016 10:46
"""
import os
import nltk
'''
利用NLTK 统计多个文本中的词频
'''
dirs = os.listdir('../../data/大秦帝国/')
dictionary = {}
stopwords = ['、','(',')',',','。',':','“','”','\n\u3000','\u3000','的','‘','’']
'''
def process():
for d in dirs: #遍历根目录下的文件夹
subdir = os.listdir('../../data/大秦帝国/')
for f in subdir: # 遍历文件夹下的文件
text = open('', 'r', encoding='utf-8').read() # 读取文本内容
print('D:/sogouOutput/'+d+'/'+f)
fredist = nltk.FreqDist(text.split(' ')) # 获取单文件词频
for localkey in fredist.keys(): # 所有词频合并。 如果存在词频相加,否则添加
if localkey in stopwords: # 检查是否为停用词
print('-->停用词:', localkey)
continue
if localkey in dictionary.keys(): # 检查当前词频是否在字典中存在
dictionary[localkey] = dictionary[localkey] + fredist[localkey] # 如果存在,将词频累加,并更新字典值
print('--> 重复值:', localkey, dictionary[localkey])
else: # 如果字典中不存在
dictionary[localkey] = fredist[localkey] # 将当前词频添加到字典中
print('--> 新增值:', localkey, dictionary[localkey])
print('===================================================')
print(sorted(dictionary.items(), key = lambda x:x[1])) # 根据词频字典值排序,并打印
'''
def process():
subdir = os.listdir('../../data/wordcloud/')
for f in subdir:
text = open('../../data/wordcloud/'+f, 'r', encoding='utf-8').read()
print('../../data/wordcloud/'+f)
fredist = nltk.FreqDist(text.split(' '))
for localkey in fredist.keys():
if localkey in stopwords:
print('-->停用词:', localkey)
continue
if localkey in dictionary.keys():
dictionary[localkey] = dictionary[localkey] + fredist[localkey]
print('--> 重复值:', localkey, dictionary[localkey])
else:
dictionary[localkey] = fredist[localkey]
print('--> 新增值:', localkey, dictionary[localkey])
print('===================================================')
print(sorted(dictionary.items(), key = lambda x:x[1]))
if __name__ == '__main__':
process()