十二、 实战-高速下载全套王者荣耀高清壁纸(5)
续上,多线程,下载成功
示例代码
import requests
from urllib import parse
import os
from urllib import request
import threading
import queue
headers= {
"User-Agent": "Mozilla/5.0(Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/79.0.3945.130 Safari/537.36",
"Referer":"http://pvp.qq.com/web201605/wallpaper.shtml",
"Cookie":"tvfe_boss_uuid=488a8e48c1abea6d; pgv_pvi=7952056320; pgv_pvid=8762704384;pac_uid=0_5df3abeb9d26a; _ga=GA1.2.307520198.1578669164;Qs_lvt_323937=1578669166; Qs_pv_323937=3014627143395298000; isHostDate=18287;isOsSysDate=18287; isOsDate=18287; PTTuserFirstTime=1579996800000;PTTosSysFirstTime=1579996800000; PTTosFirstTime=1579996800000;pgv_info=ssid=s4291900342; ts_last=pvp.qq.com/web201605/wallpaper.shtml; ts_uid=5989698568;weekloop=0-0-0-5; eas_sid=h1h5v8R0g0W4z763z3Q9p4z3d4;gpmtips_cfg=%7B%22iSendApi%22%3A0%2C%22iShowCount%22%3A0%2C%22iOnlineCount%22%3A0%2C%22iSendOneCount%22%3A0%2C%22iShowAllCount%22%3A0%2C%22iHomeCount%22%3A0%7D;25ccfec4f8bd9940e1abeafb17ed5209=1;pvpqqcomrouteLine=wallpaper_wallpaper_wallpaper_wallpaper;PTTDate=1580048552956"
}
class Producer(threading.Thread):
def __init__(self, page_queue, image_queue,*args, **kwargs): # *args代表任意位置参数,**kwargs 代表任意关键参数
super(Producer, self).__init__(*args,**kwargs)
self.page_queue = page_queue
self.image_queue = image_queue
def run(self) -> None:
while not self.page_queue.empty():
page_url = self.page_queue.get()
resp = requests.get(page_url,headers=headers)
result = resp.json()
datas = result['List']
for data in datas:
image_urls =extract_images(data)
name =parse.unquote(data['sProdName']).replace("1:1","").strip() #报错,用replace删去1:1与用strip()删去空格
# images/猪八戒-年年有余1.jpg
dir_path =os.path.join("images", name)
if notos.path.exists(dir_path):
os.mkdir(dir_path)
for index, image_url inenumerate(image_urls):
self.image_queue.put({"image_url": image_url,"image_path": os.path.join(dir_path, "%d.jpg" %(index+1))})
class Consumer(threading.Thread):
def __init__(self, image_queue, *args,**kwargs):
super(Consumer, self).__init__(*args,**kwargs)
self.image_queue = image_queue
def run(self) -> None:
while True:
try:
image_obj =self.image_queue.get(timeout=10)
image_url =image_obj.get("image_url")
image_path =image_obj.get("image_path")
try:
request.urlretrieve(image_url, image_path)
print(image_path + "下载完成!")
except:
print(image_path+"下载失败!")
except:
break
def extract_images(data):
image_urls = []
for x in range(1, 9):
image_url =parse.unquote(data['sProdImgNo_%d'% x]).replace("200", "0")
image_urls.append(image_url)
return image_urls
def main():
page_queue = queue.Queue(18)
image_queue = queue.Queue(1000)
for x in range(1, 18):
page_url ="http://apps.game.qq.com/cgi-bin/ams/module/ishow/V1.0/query/workList_inc.cgi?activityId=2735&sVerifyCode=ABCD&sDataType=JSON&iListNum=20&totalpage=0&page={page}&iOrder=0&iSortNumClose=1&iAMSActivityId=51991&_everyRead=true&iTypeId=2&iFlowId=267733&iActId=2735&iModuleId=2735&_=1580125769997".format(page=x) #从worklist中获取,一共两个worklist,不对就换一个
page_queue.put(page_url)
for x in range(3):
th = Producer(page_queue, image_queue,name="生产者%d号" % x)
th.start()
for x in range(5):
th = Consumer(image_queue, name="消费者%d号" % x)
th.start()
if__name__ == '__main__':
main()
上一篇文章 第五章 爬虫进阶(十一) 2020-01-28 地址:
https://www.jianshu.com/p/49e11ba54a3b
上一篇文章 第五章 爬虫进阶(十三) 2020-01-30 地址:
https://www.jianshu.com/p/42f852ebdcaa
以上资料内容来源网络,仅供学习交流,侵删请私信我,谢谢。