编写程序统计《红楼梦》中前10位出场最多的人物,并生成词云图。
(1)首先在网络上下载《红楼梦》------红楼梦.txt
(2)在网上寻找自己要生成的图片------a.jpg(如下,可以自行选择,放到所要执行的python文件的同级目录下)
import os
import random
import jieba
import numpy as np
from os import path
from PIL import Image
from wordcloud import WordCloud
import matplotlib.pyplot as plt
def paiming(path):
# 根据路径以utf-8的格式读取文件内容
txt = open(path, 'r', encoding='utf-8').read()
stopword=open("stopwords.txt","r",encoding="utf-8").read()
words = jieba.lcut(txt)
# 通过结果分析,记录需要排除的一些不是人名的名词
excludes = ["什么","一个","我们","你们","如今","说道","知道","起来","这里",
"出来","那里","众人","自己","一面","只见","两个"
,"没有","怎么","不是","不知","这个","听见","这样","进来","姑娘","太太","咱们","就是",'东西','告诉']
# 定义空的词典类型
counts = {}
for word in words:
if len(word) == 1:
continue
else:
counts[word] = counts.get(word, 0) + 1
for word in excludes:
del counts[word]
items = list(counts.items())
items.sort(key=lambda x: x[1], reverse=True)
str=[]
for i in range(10):
word, count = items[i]
if word=="宝玉":
word="贾宝玉"
if word=="黛玉":
word="林黛玉"
str+=[word]
return " ".join(str)
#d=path.dirname(__file__)
mask=np.array(Image.open("a.jpg"))
#text=open(path.join(d,"红楼梦.txt"),encoding="utf-8").read()
text=paiming("红楼梦.txt")
wordcloud=WordCloud(font_path = 'simkai.ttf',width = 1000,height = 700,
background_color = 'black',mask=mask,random_state=3,min_font_size=80).generate(str(text))
plt.title("wordcloud")
plt.imshow(wordcloud)
plt.axis("off")
plt.show()