爬虫多线程下载图片
import requests
import os
from lxml import etree
from multiprocessing.dummy import Pool
def save_img(url):
name = url[-14:]
print(name)
date = requests.get(url)
with open(c+'/'+name, 'wb') as f:
f.write(date.content)
if __name__ == '__main__':
a = int(input('输入开始页: '))
b = int(input('输入结束页: '))
c = input('输入目录名: ')
os.makedirs(c)
for i in range(a,b + 1):
target='http://www.meizitu.org/page/'+str(i)+'/'
req = requests.get(url=target)
soup = etree.HTML(req.text)
url_list = soup.xpath('//*[@class="thumb"]/img/@src')
pool = Pool(12)
pool.map(save_img,url_list)
pool.close()
pool.join()
print('\n'+'....下载完成....')
动态加载图,另类爬取方法
import requests
import os
import re
from multiprocessing.dummy import Pool
def get_img_url(a,b,c):
list_url = 'https://huaban.com/boards/'+a+'/?jxy8h0iv&max='+b+'&limit='+str(c)+'&wfl=1'
list_req = requests.get(list_url).text
list_req = re.findall('category_name([\W\w]*?)app._csr',list_req)[0]
url_pins = re.findall('pin_id":(.*?), "user_id',list_req)
url_pins +=[b]
pool = Pool(14)
pool.imap(get_date,url_pins)
pool.close()
pool.join()
def get_date(url):
new_url = 'https://huaban.com/pins/'+url+'/'
get_req = requests.get(url=new_url).text
get_req = re.findall('page(.*?)type',get_req)[0]
img_url = re.findall('"key":"(.*?)"',get_req)[0]
img_url = 'http://hbimg.huabanimg.com/'+ img_url
save_img(img_url)
def save_img(url):
name = url[-30:-13]
print(url+'\n')
date = requests.get(url)
with open(d+'/'+name+'.jpg', 'wb') as f:
f.write(date.content)
if __name__ == '__main__':
print('仅支持:https://huaban.com/boards/*/'+'\n')
a = input('输入要下载的链接码:')
target='https://huaban.com/boards/'+a+'/'
req = requests.get(url=target).text
req = re.findall('category_name([\W\w]*?)app._csr',req)[0]
b = re.findall('pin_id":(.*?), "user_id',req)[0]
c = int(input('须要下载前多少张图片:'))-1
d = input('创建目录名: ')
os.makedirs(d)
get_img_url(a,b,c)
print('\n'+'....下载完成....')