多线程爬取高清壁纸图片

直接上源码

import requests
from lxml import etree
import os
import re
import time
from threading import Thread
def gaoqing(src):
    string=src
    pattern=re.compile(r'(.*?)\?',re.S)
    gqsrc = re.findall(pattern,string)
    return gqsrc
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
try:
    os.mkdir('E:\\自然高清图片')
except:
    pass
path= 'E:\\自然高清图片\\'
o=0
num=0
urls = ['https://www.pexels.com/search/nature%20wallpaper'+'/?page={}'.format(str(i)) for i in range(1,100)]
def list():
    for url in urls:
        res = requests.get(url, headers=headers,timeout=10)
        html = etree.HTML(res.text)
        infos = html.xpath('//div[@class="photos__column"]/div')
        for info in infos:
            img = info.xpath('article/a[1]/img/@src')
            if len(img) == 1:
                img = img[0]
                global o
                o = o + 1
                print(img + '在下载%s张' % o)
                img = str(img)
                # print(type(img))
                src = gaoqing(img)
                th = str(o) + '线程'
                th = Thread(target=down, args=(src, th,))#重点!target调用的方法,不用带()  args 方法中传的参数 参数后边必须带  ','
                th.start()
                # time.sleep(5)
def down(src,name):#下载
        data = requests.get(src[0], headers=headers)
        f = open(path + str(name) + '.jpeg', 'wb')
        f.write(data.content)
        f.close()

if __name__ == '__main__':
    list()



你可能感兴趣的:(爬虫)