三国T10分析
- 读取小说内容
- 将小说内容进行分词
- 词语过滤,删除分词后的无关词和重复词
- 将词语出现次数进行排序
- 得出结论,绘制词云
- _:该下划线的作用在于告诉别人,里面的循环不需要使用临时变量
- collocations=False:相邻两个重复词之间的匹配
import jieba
from wordcloud import WordCloud
#需要排除的词语
exclude = {"将军", "却说", "丞相", "二人", "不可", "荆州", "不能", "如此", "商议",
"如何", "主公", "军士", "军马", "左右", "次日", "引兵", "大喜", "天下",
"东吴", "于是", "今日", "不敢", "魏兵", "陛下", "都督", "人马", "不知","孔明曰","玄德曰","刘备","云长"}
# 1.读取小说内容
with open('./novel/threekingdom.txt','r', encoding='utf-8') as f:
words=f.read()
count ={}
#2.分词
words_list=jieba.lcut(words)
print(words_list)
for word in words_list:
if len(word)<=1:
continue
else:
count[word]=count.get(word,0)+1
#count[word]=取出字典中原来键对应的值+1
# 更新字典中的值
#3.词语过滤,删除无关词,重复词
#更新
count['孔明']=count['孔明']+count['孔明曰']
count['玄德']=count['玄德']+count['玄德曰']+count['刘备']
count['关公']=count['关公']+count['云长']
for word in exclude:
del count[word]
#利用函数来排序
#def sort_by_count(x):
# return x[1]
#4.排序[()]
items=list(count.items())
#print(items)
items.sort(key=lambda x:x[1],reverse=True)
print(items)
li=[]
#_是告诉看代码的人,里面的循环不需要使用临时变量
for i in range (10):
#序列解包
role,count=items[i]
#print(items[i])
for _j in range(count):
li.append(role)
#5.得出结论
text=' '.join(li)
WordCloud(
font_path='msyh.ttc',
background_color='lightgreen',
width=800,
height=600,
#相邻两个重复词之间的匹配
collocations=False
).generate(text).to_file('top10.png')
匿名函数
sum_num=lambda x1,x2:x1+x2
print(sum_num(2,3))
name_info_list=[
('张三',4500),
('张yi',2500),
('张er',1500),
('张qi',6500),
]
name_info_list.sort(key=lambda x:x[1],reverse=True)
print(name_info_list)
stu_info=[
{"name":'张三',"age":18},
{"name":'张yi',"age":33},
{"name":'张er',"age":8},
{"name":'张qq',"age":25}
]
stu_info.sort(key=lambda i:i['age'])
print(stu_info)
列表推导式
- 列表推导式—[表达式 for 临时变量 in 可迭代对象 可以跟追加条件]
# for循环生成的
li=[]
for i in range(10):
li.append(i)
print(li)
#列表推导式
print([i for i in range(10)])
#筛选出列表中所有的偶数
li=[]
for i in range(10):
if i%2==0:
li.append(i)
print(li)
#使用列表解析
print([i for i in range(10) if i%2==0])
#筛选出列表中大于0的数
from random import randint
num_list=[randint(-10,10) for _ in range(10)]
print(num_list)
#使用列表解析
print([i for i in num_list if i>0 ])
#字典解析
#生成100个学生的成绩
stu_grades={"student{}".format(i):randint(50,100) for i in range(1,101)}
print(stu_grades)
#筛选大于60分的所有学生
print({k:v for k,v in stu_grades.items() if v>60})
matplotlib
1. 曲线图
- .linspace()—左闭右闭区间的等差数列
- .legend()—图例
from matplotlib import pyplot as plt
import numpy as np
#设置字体为SimHei
plt.rcParams["font.sans-serif"] = ['SimHei']
#设置正常显示字符
plt.rcParams['axes.unicode_minus'] = False
x = np.linspace(0, 2*np.pi, num=100)
y = np.sin(x)
# 正弦和余弦在同一坐标系下
cosy = np.cos(x)
plt.plot(x, y, color='g', linestyle='--',label='sin(x)')
plt.plot(x, cosy, color='r',label='cos(x)')
plt.xlabel('时间(s)')
plt.ylabel('电压(V)')
plt.title('欢迎来到python世界')
# 图例
plt.legend()
plt.show()
柱状图
- string.ascii_uppercase[]—遍历字符串中的每个字符
from random import randint
x = ['口红{}'.format(x) for x in string.ascii_uppercase[:5] ]
y = [randint(200, 500) for _ in range(5)]
print(x)
print(y)
plt.xlabel('口红品牌')
plt.ylabel('价格(元)')
plt.bar(x, y)
plt.show()
饼图
- axis()—横纵坐标刻度
- explode属性—圆心距离
from random import randint
import string
counts = [randint(3500, 9000) for _ in range(6)]
labels = ['员工{}'.format(x) for x in string.ascii_lowercase[:6] ]
# 距离圆心点距离
explode = [0.1,0,0, 0, 0,0]
colors = ['red', 'purple','blue', 'yellow','gray','green']
plt.pie(counts,explode = explode,shadow=True, labels=labels, autopct = '%1.1f%%',colors=colors)
#图例所在的象限
plt.legend(loc=2)
#纵横坐标的刻度一致
plt.axis('equal')
plt.show()
散点图
#均值为 0 标准差为1 的正太分布数据
x = np.random.normal(0, 1, 1000)
y = np.random.normal(0, 1, 1000)
plt.scatter(x, y, alpha=0.1)
三国top10饼图
import jieba
from wordcloud import WordCloud
from matplotlib import pyplot as plt
import numpy as np
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
exclude = {"将军", "却说", "丞相", "二人", "不可", "荆州", "不能", "如此", "商议",
"如何", "主公", "军士", "军马", "左右", "次日", "引兵", "大喜", "天下",
"东吴", "于是", "今日", "不敢", "魏兵", "陛下", "都督", "人马", "不知","孔明曰","玄德曰","刘备","云长"}
with open('./novel/threekingdom.txt','r', encoding='utf-8') as f:
words=f.read()
count ={}
words_list=jieba.lcut(words)
for word in words_list:
if len(word)<=1:
continue
else:
count[word]=count.get(word,0)+1
count['孔明']=count['孔明']+count['孔明曰']
count['玄德']=count['玄德']+count['玄德曰']+count['刘备']
count['关公']=count['关公']+count['云长']
for word in exclude:
del count[word]
items=list(count.items())
items.sort(key=lambda x:x[1],reverse=True)
li=[]
mingzi=[]
for i in range (10):
role,count=items[i]
#通过循环得到最高十位的次数和名字
li.append(count)
mingzi.append(role)
#绘制饼图
plt.pie(li,labels=mingzi,autopct='%1.1f%%')
plt.show()
红楼梦top10词云
import jieba
from wordcloud import WordCloud
exclude = {"什么", "一个", "我们", "你们", "如今", "说道", "老太太", "知道", "姑娘",
"起来", "这里", "出来", "众人", "那里", "奶奶", "自己", "太太", "一面",
"只见", "两个", "没有", "怎么", "不是", "不知", "这个", "听见", "这样","进来","咱们",
"就是","东西","告诉","回来","只是","大家","老爷","只得","丫头","这些","他们","不敢","出去","所以","凤姐儿"}
# 1.读取小说内容
with open('./novel/all.txt','r', encoding='utf-8') as f:
words=f.read()
count ={}
#2.分词
words_list=jieba.lcut(words)
for word in words_list:
if len(word)<=1:
continue
else:
count[word]=count.get(word,0)+1
#3.词语过滤,删除无关词,重复词
#更新
count['凤姐']=count['凤姐']+count['凤姐儿']
for word in exclude:
del count[word]
#4.排序[()]
items=list(count.items())
items.sort(key=lambda x:x[1],reverse=True)
print(items)
li=[]
for i in range (10):
role,count=items[i]
for _j in range(count):
li.append(role)
text=' '.join(li)
WordCloud(
font_path='msyh.ttc',
background_color='lightgreen',
width=800,
height=600,
#相邻两个重复词之间的匹配
collocations=False
).generate(text).to_file('红楼梦.png')