第五章 爬虫进阶(十一) 2020-01-28

十一、 实战-高速下载全套王者荣耀高清壁纸(4


多线程示例代码:


import requests

from urllib import parse

import os

from urllib import request

import threading

import queue

 

headers= {

    "User-Agent": "Mozilla/5.0(Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/79.0.3945.130 Safari/537.36",

    "Referer":"http://pvp.qq.com/web201605/wallpaper.shtml",

    "Cookie":"tvfe_boss_uuid=488a8e48c1abea6d; pgv_pvi=7952056320; pgv_pvid=8762704384;pac_uid=0_5df3abeb9d26a; _ga=GA1.2.307520198.1578669164;Qs_lvt_323937=1578669166; Qs_pv_323937=3014627143395298000; isHostDate=18287;isOsSysDate=18287; isOsDate=18287; PTTuserFirstTime=1579996800000;PTTosSysFirstTime=1579996800000; PTTosFirstTime=1579996800000;pgv_info=ssid=s4291900342; ts_last=pvp.qq.com/web201605/wallpaper.shtml;ts_uid=5989698568; weekloop=0-0-0-5; eas_sid=h1h5v8R0g0W4z763z3Q9p4z3d4;gpmtips_cfg=%7B%22iSendApi%22%3A0%2C%22iShowCount%22%3A0%2C%22iOnlineCount%22%3A0%2C%22iSendOneCount%22%3A0%2C%22iShowAllCount%22%3A0%2C%22iHomeCount%22%3A0%7D;25ccfec4f8bd9940e1abeafb17ed5209=1;pvpqqcomrouteLine=wallpaper_wallpaper_wallpaper_wallpaper;PTTDate=1580048552956"

}

 

 

class Producer(threading.Thread):

    def __init__(self, page_queue, image_queue,*args, **kwargs):  # *args代表任意位置参数,**kwargs 代表任意关键参数

        super(Producer, self).__init__(*args,**kwargs)

        self.page_queue = page_queue

        self.image_queue = image_queue

 

    def run(self) -> None:

        while self.page_queue.empty():

            page_url = self.page_queue.get()

            resp = requests.get(page_url,headers=headers)

            result = resp.json()

            datas = result['List']

            for data in datas:

                image_urls =extract_images(data)

                name =parse.unquote(data['sProdName']).replace("1:1","").strip()  #报错,用replace删去1:1与用strip()删去空格

                # images/猪八戒-年年有余1.jpg

                dir_path =os.path.join("images", name)

                if notos.path.exists(dir_path):

                    os.mkdir(dir_path)

                for index, image_url inenumerate(image_urls):

                   self.image_queue.put({"image_url":image_url,"image_path":os.path.join(dir_path,"%d.jpg" %(index+1))})

 

 

class Consumer(threading.Thread):

    def __init__(self, image_queue, *args,**kwargs):

        super(Consumer, self).__init__(*args,**kwargs)

        self.image_queue = image_queue

 

    def run(self) -> None:

        while not self.image_queue.empty():

            image_obj = self.image_queue.get()

            image_url =image_obj.get("image_url")

            image_path =image_obj.get("image_path")

            request.urlretrieve(image_url,image_path)

            print(image_path+"下载完成!")

 

 

def extract_images(data):

    image_urls = []

    for x in range(1, 9):

        image_url =parse.unquote(data['sProdImgNo_%d'% x]).replace("200", "0")

        image_urls.append(image_url)

    return image_urls

 

 

def main():

    page_queue = queue.Queue(18)

    image_queue = queue.Queue(1000)

    for x in range(1, 18):

        page_url ="http://apps.game.qq.com/cgi-bin/ams/module/ishow/V1.0/query/workList_inc.cgi?activityId=2735&sVerifyCode=ABCD&sDataType=JSON&iListNum=20&totalpage=0&page={page}&iOrder=0&iSortNumClose=1&iAMSActivityId=51991&_everyRead=true&iTypeId=2&iFlowId=267733&iActId=2735&iModuleId=2735&_=1580125769997".format(page=x)  #从worklist中获取,一共两个worklist,不对就换一个

        page_queue.put(page_url)

 

    for x in range(3):

        th = Producer(page_queue, image_queue,name="生产者%d号" % x)

        th.start()

 

    for x in range(5):

        th = Consumer(image_queue, name="消费者%d号" % x)

        th.start()

 

 

if__name__ == '__main__':

    main()



上一篇文章 第五章 爬虫进阶(十) 2020-01-27 地址:

https://www.jianshu.com/p/2b5aa471058c

下一篇文章 第五章 爬虫进阶(十二) 2020-01-29 地址:

https://www.jianshu.com/p/87c35f2266e0



以上资料内容来源网络,仅供学习交流,侵删请私信我,谢谢。

你可能感兴趣的:(第五章 爬虫进阶(十一) 2020-01-28)