跨文件调用函数:
#caculate文件:
def caculateNum(num):
'''
计算1~num之间的累加和
:param num: 累加和的末位
:return: 累加和
'''
# res = 0
# for i in range(1,num+1):
# res += i
# return res
return sum([i for i in range (1, num+1)])
pass
#text1文件:
import caculate
res = caculate.caculateNum(100)
print(res)
必须参数和关键字参数
必须参数: 必须严格的遵循正确的顺序传入,调用的时候必须和声明的时候保持一致
def f(name, age):
print('I am %s , I am %d years old .'%(name, age))
#pass不加也没问题发生
f('eric', 18)
关键字参数: 使用关键字参数可以允许函数调用和声明时顺序不一致
#python 解释器能够用参数名字匹配参数值
# f(age= 18, name= 'eric')
默认参数
缺省的参数没有传入时,默认值会生效
#与.get()类似
def f(name, age, sex = 'male'):
print('I am %s , I am %d years old .'%(name, age))
print('Sex is %s'%sex)
f(name= '李四', age= 19)
f('张三',88,'female')
#至于是否显示指定参数,以方便之后阅读为准
匿名函数
语法:
lambda 参数: 表达式
lambda 参数1, 参数2,......(冒号前的参数可以有多个)
后面的是表达式,只能是一个表达式,不写return,返回值就是表达式的结果;
优点:
减少代码量, 代码看起来"优雅"
def rect(x,y):
return x*y
area = rect(3, 5)
print(area)
#使用lambda表达式
res = lambda x, y: x*y
print(res(4, 5))
store = ['33',88]
s = "当当自营" if len(store) == 0 else store[0]
print(s)
def cal(x,y):
if x > y:
return x*y
else:
return x/y
#使用lambda表达式
calc = lambda x, y:x*y if x>y else x/y
print('使用lambda: ',calc(5, 4))
print('使用lambda: ',calc(2, 4))
- 列表的排序中使用lambda表达式
stus = [
{'name':'zhangsan','age':33},
{'name':'lisi','age':22},
{'name':'wangwu','age':43},
{'name':'zhaolui','age':18},
{'name':'tangqi','age':9},
]
print('排序前',stus)
#key值是按照哪个元素为依据进行排序,reverse为True为由大到小
res = sorted(stus,key= lambda x: x['age'], reverse= True)
print('排序前',res)
res = sorted(stus,key= lambda x: x['name'])
print('name排序后',res)
案例 三国小说人物出场词频统计
- 代码需要jieba和WordCloud
import jieba# 用于分词
from collections import Counter # 用于统计
from wordcloud import WordCloud#生成词云
#jieba分词
txt = '我来到北京清华大学'
#将字符串分割成等量的中文
seg_list = jieba.lcut(txt)
print(seg_list)
def parse():
"""三国小说人物出场词频统计"""
#定义无关词的集合
excludes = {"将军","却说","丞相","二人","不可","荆州","不能","如此","商议",
"如何","主公","军士","军马","左右","次曰","引兵","大喜","天下",
"东吴","于是","今日","不敢","魏兵","陛下","都督","人马","不知",
"玄德曰","孔明曰","刘备","关公"}
with open('threekingdom.txt','r',encoding='utf-8')as f:
txt = f.read()
#print(txt)
words = jieba.lcut(txt)
print(words)
#字典内容:'曹操':555
counts = {}
for word in words:
if len(word) == 1:
continue
else:
# 往字典里添加元素
# count['key'] = 次数+1
counts[word] = counts.get(word, 0) + 1
#如果字典中存在值,则加一,如果不存在,则新建并给予默认值0,再加一.
print(counts)
#将与有关词同义的无关词加到有关词中
counts['孔明'] = counts.get('孔明') + counts.get('孔明曰')
counts['玄德'] = counts.get('玄德') + counts.get('玄德曰') + counts.get('刘备')
counts['关公'] = counts.get('关公') + counts.get('云长')
#删除无关词
for word in excludes:
del counts[word]
#统计出现频次最高的前10个词(方法一)
items = list(counts.items())
#print('排序前',items)
items.sort(key = lambda x: x[1], reverse=True)
#print('排序后',items)
for i in range(10):
character,count = items[i]
print(character, count)
#统计出现频次最高的前10个词(方法二)
#需要collections库文件
# roles = Counter(counts)
# role = roles.most_common(10)
#构造词云字符串
li = []
for i in range(10):
character,count = items[i]
for _ in range(count):
li.append(character)
cloud_txt = ",".join(li)
wc = WordCloud(
background_color = 'white',#背景色
font_path = 'msyh.ttc',#文字编码格式
#是否包含两个词的搭配,默认是T
collocations = False
).generate(cloud_txt)
wc.to_file('三国中出现前十的人物.png')