有耗时操作,网络请求,IO,文件的读写
from time import sleep
def task1():
for i in range(3):
print("1-------->{}".format(i))
yield i
sleep(2)
def task2():
for i in range(3):
print("2-------->{}".format(i))
yield i
sleep(1)
if __name__ == '__main__':
t1 = task1()
t2 = task2()
while True:
try:
next(t1)
next(t2)
except:
print("任务结束")
break
import time
from greenlet import greenlet
def a():
for i in range(5):
print("A" + str(i))
gb.switch()
time.sleep(2)
def b():
for i in range(5):
print("B" + str(i))
gc.switch()
time.sleep(2)
def c():
for i in range(5):
print("C" + str(i))
ga.switch()
time.sleep(2)
if __name__ == '__main__':
ga = greenlet(a)
gb = greenlet(b)
gc = greenlet(c)
ga.switch()
import time
import gevent
from gevent import monkey
monkey.patch_all()
def a():
for i in range(5):
print("A" + str(i))
time.sleep(1)
def b():
for i in range(5):
print("B" + str(i))
time.sleep(1)
def c():
for i in range(5):
print("C" + str(i))
time.sleep(1)
if __name__ == '__main__':
ga = gevent.spawn(a)
gb = gevent.spawn(b)
gc = gevent.spawn(c)
ga.join()
gb.join()
gc.join()
import urllib.request
import gevent
from gevent import monkey
monkey.patch_all()
def download(url):
# 网络请求耗时操作
response = urllib.request.urlopen(url)
text = response.read()
print("下载了{}网站的数据".format(url))
if __name__ == '__main__':
urls = ['http://www.163.com', 'http://www.qq.com', 'http://www.baidu.com']
try:
g1 = gevent.spawn(download, urls[0])
g2 = gevent.spawn(download, urls[1])
g3 = gevent.spawn(download, urls[2])
g1.join()
g2.join()
g3.join()
except:
print("报错了")