程序不是我写的,我给加了点注释而已.
这个爬虫爬取了ebook上的七千多本书,可以使用迅雷批量下载.也可以选自己喜欢的下载.
注释我写的很细,基础不好的可以看看.
from requests_html import HTMLSession # 用于构建游览器
import requests
import time
import json
import random # 随机产生agent
import sys # 下载文件
import os
session = HTMLSession() # 构建一个游览器
list_url = 'http://www.allitebooks.com/page/' # 待爬url
# 构建一个agent连列表用于模拟浏览器
USER_AGENTS = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
# 用于获取书籍详情链接
def get_list(url):
response = session.get(url) # 打开url
all_link = response.html.find('.entry-title a') # 获取详情链接列表
for link in all_link:
details_link = link.attrs['href']
getBookUrl(details_link)
# 用于获取下载链接
def getBookUrl(details_link):
response = session.get(details_link) # 打开详情页
dll = response.html.find('.download-links a', first=True) # 选择下载链接
if dll is not None: # 判断下载链接是否有效
dll = dll.attrs['href']
print(dll + "")
# 保存这本书的下载链接
savelink(dll)
#下载这本书
download(dll) # 可以先不执行下载,速度太慢.
#导出图书下载地址清单
def savelink(url):
#获取文件名
filename = 'data/' + "link.txt"
if ".pdf" in url:
with open(filename, 'a') as f:
f.write(url+'\n')
# 文件下载
def download(url):
# 随机浏览器 User-Agent
headers = {"User-Agent": random.choice(USER_AGENTS)}
# 获取文件名
filename = url.split('/')[-2]
# 如果 url 里包含 .pdf
if ".pdf" in url:
file = 'data/' + filename
with open(file, 'wb') as f:
print("正在下载 {}".format(filename))
response = requests.get(url, stream=True, headers=headers)
# 获取文件大小
total_length = response.headers.get('content-length')
# 如果文件大小不存在,则返回
if total_length is None:
f.write(response.content)
else:
# 下载进度条
dl = 0
total_length = int(total_length) # 文件大小
fsize = total_length/1024
print("文件大小:{}k,正在下载...".format(fsize))
for data in response.iter_content(chunk_size=4096): # 每次响应获取 4096 字节
dl += len(data)
f.write(data)
done = int(100 * dl / total_length)
sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (100 - done))) # 打印进度条
sys.stdout.write("已下载:{}k".format(dl/1024))
sys.stdout.flush()
print(filename + '下载完成!')
if __name__ == '__main__':
# 创建一个文件夹,存放链接
if not os.path.exists('data'):
os.makedirs('data')
# 开始处理
for x in range(0, 755):
print('当前页面: ' + str(x))
get_list(list_url + str(x))
print("处理完成")