代码以及释义如下:
```
import jieba
import jieba.analyse
import wordcloud
from PIL import Image, ImageSequence
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud, ImageColorGenerator
import jieba
import jieba.analyse
##============分词===============
stopwords=[lines.strip() for lines in open('chineseStopWords.txt',encoding='utf-8').readlines()] #停用词文件的读取
stopwords.append('')
path = '《三国演义》罗贯中.txt' #第一步爬虫结果存储的路径
file_in = open(path,'r',encoding='utf-8').read()
jieba.del_word("却说") #删除一些不想要的词
jieba.del_word("二人")
jieba.del_word("荆州")
words=jieba.lcut(file_in) #分词
# cut_text=" ".join(words)
##==============词频统计=============
word_freq={} #词频统计的字典
for word in words: #这一步是对近义词进行统计
if (word in stopwords) or len(word)==1 : #禁用词和低频词不统计了
continue
elif word=='玄德' or word == '玄德曰':
newword='刘备'
elif word=='丞相':
newword='曹操'
elif word=='孔明' or word=='孔明曰':
newword='诸葛亮'
elif word=='关公' or word=='云长':
newword='关羽'
else:
newword=word
if newword in word_freq:
word_freq[newword]+=1
else:
word_freq[newword]=1
# word_freq[word]=word_freq.get(word,0)+1
# print(word_freq)
freq_word=[]
for word,freq in word_freq.items():
fenci=freq_word.append((word,freq))
freq_word.sort(key=lambda x:x[1],reverse=True) #词语根据词频排序
for word,freq in freq_word[:50]:
with open('wukaiaaaaa.txt','a',encoding='utf-8')as fp:
fp.write(word+'\n') #将前五十词频文件保存
print(word,freq) #打印排名前50%的单词
##===========词云制作=======================
#background_image=np.array(Image.open("music.png"))
c=[lines.strip("\n") for lines in open('wukaiaaaaa.txt',encoding='utf-8').readlines()] #读取词频前五十的文本文件
cc=" ".join(c) #必要,否则 expected string or bytes-like object:应为字符串或类似对象的字节
wordcloud=WordCloud(
font_path = 'C:/Users/Windows/fonts/simkai.ttf', #字体样式设置
background_color="white",width=2000,height=2000,
mask=np.array(Image.open("music.png")) #mask=background_image
).generate(cc) #词云图设置
plt.imshow(wordcloud,interpolation="bilinear")
plt.axis("off") #不展示坐标轴 否则为plt.axis("off") ,可以试一下结果如何
plt.show() #词云图的展示
```
结果如图:
词频:
词云:背景图随便选了张,有点丑,但是不难发现词频与字体大小的关系