堆糖网上储存着许多照片,现在就利用python来下载照片。
打开搜索栏,搜索book,发现有许多照片,打开链接,再点击照片,得到类似https://b-ssl.duitang.com/uploads/item/201205/02/20120502002005_Aja53.jpeg的网址,这个就是照片的真正地址。
网页已经找到,但是搜索结果多么多图片,不能每一张都点进去,寻找真正网址来下载,这样没有效率了。
打开inspect,切换到Network选项,刷新一下网页。
Name里面就会出现许多文件。这个就是组成网页的所有文件。
打开其中一张照片,点击Preview,发现可以预览照片。但是看照片大小,看网址格式都和我们要寻找的真正网址都不同。
滑动网页,Name里就会不断增加新的文件,点击类似
?kw=book&type=feed&include_fields=top_comments%2Cis_root%2Csource_link%2Citem%2Cbuyable%2Croot_id%2Cstatus%2Clike_count%2Clike_id%2Csender%2Calbum%2Creply_count%2Cfavorite_blog_id&type=&start=24&=1549346071315的json文件,发现有类似path: "https://b-ssl.duitang.com/uploads/item/201801/02/20180102151225_twrmN.jpeg"的字段,自此就能断定,这是一个由ajax加载生成的网页。
构造获取json文件的链接
def get_html(self):
url = 'https://www.duitang.com/napi/blog/list/by_search/?'
params = {
'kw': self.kw,
'type': 'feed',
'include_fields':'top_comments%2Cis_root%2Csource_link%2Citem%2Cbuyable%2Croot_id%2Cstatus%2Clke_count%2Clike_id%2Csender%2Calbum%2Creply_count%2Cfavorite_blog_id',
'_type': '',
'start': self.start
}
headers = {
'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/67.0.3396.99 Safari/537.36'
}
try:
response = requests.get(url, params=params, headers=headers)
if response.status_code == 200:
return response.text
except requests.ConnectionError as e:
print(e)
pass
判定字段的有效性:
def test(self, response):
result = json.loads(response)
data = result.get('data')
if data:
object_list = data.get('object_list')
if object_list:
for i in object_list:
items = {}
photo = i.get('photo')
if photo:
path = photo.get('path')
if path:
items['path'] = path
yield items
再次用requests去链接网页,这次是下载照片:
def get_html_2(self, items):
try:
url = items.get('path')
headers = {
'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/67.0.3396.99 Safari/537.36'
}
if 'gif_jpeg' in url:
response = requests.get(url[:-5], headers=headers)
if response.status_code == 200:
return ('gif', response)
elif 'png' in url:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return ('png', response)
elif 'jpg' or 'jpeg' in url:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return ('jpg', response)
else:
print('Unknown format.')
pass
except requests.ConnectionError as e:
print(e)
pass
最后本地存储照片:
def write_into_file(self, format, response):
if not os.path.exists(os.path.join(DIST_DIR, self.kw)):
os.makedirs(os.path.join(DIST_DIR, self.kw))
if format == 'gif':
file_path = '{0}/{1}/{2}.{3}'.format(
DIST_DIR, self.kw,
md5(response.content).hexdigest(), 'gif')
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(response.content)
else:
print('Already Downloaded {0}.gif'.format(
md5(response.content).hexdigest()))
elif format == 'png':
file_path = '{0}/{1}/{2}.{3}'.format(
DIST_DIR, self.kw,
md5(response.content).hexdigest(), 'png')
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(response.content)
else:
print('Already Downloaded {0}.png'.format(
md5(response.content).hexdigest()))
elif format == 'jpg':
file_path = '{0}/{1}/{2}.{3}'.format(
DIST_DIR, self.kw,
md5(response.content).hexdigest(), 'jpg')
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(response.content)
else:
print('Already Downloaded {0}.jpg'.format(
md5(response.content).hexdigest()))
所有的整理一下:
import json
import os
import time
from hashlib import md5
import requests
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DIST_DIR = os.path.join(BASE_DIR, 'dist')
class Spider:
def __init__(self, kw, start=0):
self.kw = kw
self.start = start
def get_html(self):
url = 'https://www.duitang.com/napi/blog/list/by_search/?'
params = {
'kw': self.kw,
'type': 'feed',
'include_fields': 'top_comments%2Cis_root%2Csource_link%2Citem%2Cbuyable%2Croot_id%2Cstatus%2Clike_count%2Clike_id%2Csender%2Calbum%2Creply_count%2Cfavorite_blog_id',
'_type': '',
'start': self.start
}
headers = {
'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
try:
response = requests.get(url, params=params, headers=headers)
if response.status_code == 200:
return response.text
except requests.ConnectionError as e:
print(e)
pass
def test(self, response):
result = json.loads(response)
data = result.get('data')
if data:
object_list = data.get('object_list')
if object_list:
for i in object_list:
items = {}
photo = i.get('photo')
if photo:
path = photo.get('path')
if path:
items['path'] = path
yield items
def get_html_2(self, items):
try:
url = items.get('path')
headers = {
'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
if 'gif_jpeg' in url:
response = requests.get(url[:-5], headers=headers)
if response.status_code == 200:
return ('gif', response)
elif 'png' in url:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return ('png', response)
elif 'jpg' or 'jpeg' in url:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return ('jpg', response)
else:
print('Unknown format.')
pass
except requests.ConnectionError as e:
print(e)
pass
def write_into_file(self, format, response):
if not os.path.exists(os.path.join(DIST_DIR, self.kw)):
os.makedirs(os.path.join(DIST_DIR, self.kw))
if format == 'gif':
file_path = '{0}/{1}/{2}.{3}'.format(
DIST_DIR, self.kw,
md5(response.content).hexdigest(), 'gif')
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(response.content)
else:
print('Already Downloaded {0}.gif'.format(
md5(response.content).hexdigest()))
elif format == 'png':
file_path = '{0}/{1}/{2}.{3}'.format(
DIST_DIR, self.kw,
md5(response.content).hexdigest(), 'png')
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(response.content)
else:
print('Already Downloaded {0}.png'.format(
md5(response.content).hexdigest()))
elif format == 'jpg':
file_path = '{0}/{1}/{2}.{3}'.format(
DIST_DIR, self.kw,
md5(response.content).hexdigest(), 'jpg')
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(response.content)
else:
print('Already Downloaded {0}.jpg'.format(
md5(response.content).hexdigest()))
def main():
print('Enter the keyowrd: ', end='')
kw = input()
# kw = 'book'
start_time = time.time()
counter = 0
for start in range(0, 3600, 24):
spider = Spider(kw, start=start)
response = spider.get_html()
items = spider.test(response)
if items:
for item in items:
format, response = spider.get_html_2(item)
if format == 'gif':
print('Downloading: {0} It costs {1}s.'.format(
item['path'][:-5], time.time() - start_time))
else:
print('Downloading: {0} It costs {1}s.'.format(
item['path'], time.time() - start_time))
counter += 1
spider.write_into_file(format, response)
else:
break
print('Get {0}. It costs {1}s'.format(counter, str(time.time() - start_time)))
if __name__ == '__main__':
main()
这样堆糖网的照片就可以下载下来了。