爬取防盗链图片
import urllib.request
from lxml import etree
import time
import os
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
#防盗链图片必须填写首页地址信息
'Referer': 'http://www.mmjpg.com/',
}
#随机名字函数
def random_string(length=5):
import random
base_dir = 'zxcvbnmasdfghjkqwertyuio123456789'
return ''.join(random.choice(base_dir) for i in range(length))
def parse_img(img,page):
tree = etree.HTML(img)
img_urls = tree.xpath('//div[@class="main"]/div[@class="pic"]/ul/li/a/img/@src')
for url in img_urls:
print("开始爬取...")
# 构建请求对象
request = urllib.request.Request(url=url, headers=headers)
#发送请求,得到响应
response = urllib.request.urlopen(request)
#图片名
image_name = random_string()
#拼接文件名,图片名+后缀
filename = image_name + '.' + url.split('.')[-1]
#存入文件夹路径
dirname = 'meitu'
#拼接路径
path = os.path.join(dirname, filename)
#写入
with open(path, 'wb') as fp:
fp.write(response.read())
print("结束爬取",filename)
#停留2秒,过快可能导致封IP
time.sleep(2)
time.sleep(1)
#构建请求对象
def handle_request(url,page):
url= url.format(page)
request = urllib.request.Request(url=url,headers=headers)
return request
#主函数
def main():
start_page = int(input("起始页输入2"))
end_page = int(input('结束页,每页15张图片,目前不知有多少页'))
#换页路径
url = 'http://www.mmjpg.com/home/{}'
for page in range(start_page,end_page+1):
request = handle_request(url,page)
img = urllib.request.urlopen(request).read()
img_urls = parse_img(img,page)
if __name__ == '__main__':
main()