爬取《放开那个女巫》小说
还是采用高并发的协程来进行开启下载。
其实,到现在为止,会了并发技术之后,诸多爬虫比较的,已经不再是用什么库,之类的问题了。
而是,开始研究爬虫的策略问题了。
比如,这里,我的策略就是,要保证每一章必须要爬取到,否则就要接着等下去。(每次爬取200章,然后必须要等所有的都已经爬取完成之后才开始合并,之后再接着爬取接下来的200章。这个策略虽然保证的健壮性,但是在速度上却是不敢恭维。下一步,我们将对这个策略进行改进!)
import requests
import os
import gevent
from gevent import monkey
import random
import re
from lxml import etree
monkey.patch_all(select=False)
from urllib import parse
import time
IPs = [{'HTTPS': 'HTTPS://182.114.221.180:61202'},
{'HTTPS': 'HTTPS://60.162.73.45:61202'},
{'HTTPS': 'HTTPS://113.13.36.227:61202'},
{'HTTPS': 'HTTPS://1.197.88.101:61202'}]
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': '__cfduid=d820fcba1e8cf74caa407d320e0af6b5d1518500755; UM_distinctid=1618db2bfbb140-060057ff473277-4323461-e1000-1618db2bfbc1e4; CNZZDATA1272873873=2070014299-1518497311-https%253A%252F%252Fwww.baidu.com%252F%7C1520689081; yjs_id=5a4200a91c8aa5629ae0651227ea7fa2; ctrl_time=1; jieqiVisitTime=jieqiArticlesearchTime%3D1520693103'
}
def setDir():
if 'Noval' not in os.listdir('./'):
os.mkdir('./Noval')
def getNoval(url, id):
while True:
try:
headers = HEADERS
IP = random.choice(IPs)
res = requests.get(url, headers=headers, proxies=IP)
res.encoding = 'GB18030'
html = res.text.replace(' ', ' ') # 替换掉这个字符 换成空格~ 意思是一样的
page = etree.HTML(html)
content = page.xpath('//div[@id="content"]')
ps = page.xpath('//div[@class="bookname"]/h1')
if len(ps) != 0:
s = ps[0].text + '\n'
s = s + content[0].xpath("string(.)")
with open('./Noval/%d.txt' % id, 'w', encoding='gb18030', errors='ignore') as f:
f.write(s)
except Exception:
continue
else:
break
def getContentFile(url):
headers = HEADERS
IP = random.choice(IPs)
res = requests.get(url, headers=headers, proxies=IP)
res.encoding = 'GB18030'
page = etree.HTML(res.text)
bookname = page.xpath('//div[@id="info"]/h1')[0].xpath('string(.)')
dl = page.xpath('//div[@id="list"]/dl/dd/a')
splitHTTP = parse.urlsplit(url)
url = splitHTTP.scheme + '://' + splitHTTP.netloc
return list(map(lambda x: url + x.get('href'), dl)), bookname
def BuildGevent(baseurl):
content, bookname = getContentFile(baseurl) # version2
steps = 200
beginIndex, length = steps, len(content)
count = 0
name = "%s.txt" % bookname
while (count - 1) * steps < length:
WaitigList = [gevent.spawn(getNoval, content[i + count * steps], i + count * steps) for i in range(steps) if
i + count * steps < length]
gevent.joinall(WaitigList)
NovalFile = list(filter(lambda x: x[:x.index('.')].isdigit(), os.listdir('./Noval')))
NovalFile.sort(key=lambda x: int(re.match('\d+', x).group()))
String = ''
for dirFile in NovalFile:
with open('./Noval/' + dirFile, 'r', encoding='gb18030', errors='ignore') as f:
String = String + '\n' + f.read()
os.remove('./Noval/%s' % dirFile)
if count == 0:
with open('./Noval/' + name, 'w', encoding='gb18030', errors='ignore') as ff:
ff.write(String)
else:
with open('./Noval/' + name, 'a', encoding='gb18030', errors='ignore') as ff:
ff.write(String)
count += 1
if __name__ == '__main__':
starttime = time.time()
setDir()
url = 'http://www.biquge.com.tw/16_16588/'
BuildGevent(url)
endtime = time.time()
print("Total use time: %.6f" % (endtime - starttime))