import jieba
1.读取小说内容
with open('./novel/threekingdom.txt', 'r', encoding='utf-8') as f:
words = f.read()
counts = {} # {‘曹操’:234,‘回寨’:56}
excludes = {"将军", "却说", "丞相", "二人", "不可", "荆州", "不能", "如此", "商议",
"如何", "主公", "军士", "军马", "左右", "次日", "引兵", "大喜", "天下",
"东吴", "于是", "今日", "不敢", "魏兵", "陛下", "都督", "人马", "不知",
"孔明曰","玄德曰","刘备","云长"}
# 2. 分词
words_list = jieba.lcut(words)
# print(words_list)
for word in words_list:
if len(word) <= 1:
continue
else:
# 更新字典中的值
# counts[word] = 取出字典中原来键对应的值 + 1
# counts[word] = counts[word] + 1 # counts[word]如果没有就要报错
# 字典。get(k) 如果字典中没有这个键 返回 NONE
counts[word] = counts.get(word, 0) + 1
print(len(counts))
# 3. 词语过滤,删除无关词,重复词
counts['孔明'] = counts['孔明'] + counts['孔明曰']
counts['玄德'] = counts['玄德'] + counts['玄德曰'] +counts['刘备']
counts['关公'] = counts['关公'] +counts['云长']
for word in excludes:
del counts[word]
# 4.排序 [(), ()]
items = list(counts.items())
print(items)
def sort_by_count(x):
return x[1]
items.sort(key=sort_by_count, reverse=True)
for i in range(10):
# 序列解包
role, count = items[i]
print(role, count)
# 5得出结论
matplotlib
导入
from matplotlib import pyplot as plt
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import numpy as np
使用100个点 绘制 [0 , 2π]正弦曲线图
.linspace 左闭右闭区间的等差数列
x = np.linspace(0, 2*np.pi, num=100)
print(x)
y = np.sin(x)
正弦和余弦在同一坐标系下
cosy = np.cos(x)
plt.plot(x, y, color='g', linestyle='--',label='sin(x)')
plt.plot(x, cosy, color='r',label='cos(x)')
plt.xlabel('时间(s)')
plt.ylabel('电压(V)')
plt.title('欢迎来到python世界')
图例
plt.legend()
plt.show()
红楼梦 top1o人物分析
import jieba
from wordcloud import WordCloud
# 1.读取小说内容
with open('./novel/all.txt','r',encoding='utf-8') as f:
words = f.read()
counts = {}
excludes = {'什么','我们','你们','如今','说道','知道','姑娘','起来','这里','出来','众人','那里','奶奶',
'自己','太太','一面','只见','两个','没有','怎么','不是','不知','这个','听见','这样','进来',
'咱们','就是','东西','告诉','回来','回来','只是','大家','老爷','只得','丫头','这些','他们',
'不敢','出去','所以','一个','贾宝玉','王熙凤','老太太','凤姐儿','林黛玉','薛宝钗'}
# 2.分词
words_list = jieba.lcut(words)
print(words_list)
for word in words_list:
if len(word) <= 1:
continue
else:
counts[word] = counts.get(word, 0) + 1
print(counts)
# 3.词语过滤重复词
counts['宝玉'] = counts['宝玉'] + counts['贾宝玉']
counts['黛玉'] = counts['黛玉'] + counts['林黛玉']
counts['宝钗'] = counts['宝钗'] + counts['薛宝钗']
counts['贾母'] = counts['老太太'] + counts['贾母']
counts['凤姐'] = counts['凤姐'] + counts['王熙凤']+ counts['凤姐儿']
#删除无关词
for word in excludes:
del counts[word]
# 4.排序[(),()]
items = list(counts.items())
print(items)
def sort_by_count(x):
return x[1]
# items.sort(key=sort_by_count,reverse=True)
items.sort(key=lambda i:i[1],reverse=True)
li = []
# 遍历
for i in range(10):
# 序列解包
role, count = items[i]
print(role, count)
# _ 是告诉看代码的人,循环里面不需要临时变量
for _ in range(count):
li.append(role)
# 5.得出结论
text = ' '.join(li)
WordCloud(
font_path='msyh.ttc',
background_color='white',
width=880,
height=600,
# 两个相邻重复词之间的匹配
collocations=False
).generate(text).to_file
.png')