爬虫第四天:爬取我要看的小说

前言:平时看小说都是手机上找个免费软件看的,但是现在的APP要么是只给你几天免费,然后就收钱;要么就是免费但是不断出现广告,忍无可忍!PC端可以找到可以看的免费小说,但是一来必须坐到电脑前不方便,二来总是会自动弹到广告页面,手机浏览器上网看小说也是一样。
所以决定还是自己写个程序把要看的小说爬下来吧~
不多说了,代码如下:

2019年3月27日更新:笔趣阁变更URL了,而且搞了一些http和https之类的变化,导致以前的不能用了,所以更新一下,同时增加了断点续爬的代码,后续计划还要增加代理地址库、分布式爬虫等手段(或者直接用spider等框架?)
2018年11月10日更新:增加了selenium的代码,可以通过输入书名和最近看的章节名字,自动爬取需要的小说章节
注:selenium的使用必须安装相应的浏览器控件,比如我这里用了Chrome,所以首先必须安装Chrome浏览器,然后还要下载版本相应的chromedriver

import requests
from bs4 import BeautifulSoup
import re
import datetime
from selenium import webdriver
import time
import os

#首先通过chrome获得headers,包括user-agent和cookie
headers = {
     
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
    'Cookie': '__jsluid=2e1d5b068aad24f7ba11f9a858d4e6e9; jieqiVisitTime=jieqiArticlesearchTime%3D1553649546',
    'Host': 'www.biquyun.com'
}

def get_bookurl(book_name):
    '''通过selenium获得书名对应的链接'''
    browser = webdriver.Chrome()
    url = "http://www.biquyun.com/"
    browser.get(url)
    input = browser.find_element_by_xpath('//*[@id="wd"]') # 点击“搜索栏”
    input.send_keys(book_name)
    button = browser.find_element_by_xpath('//*[@id="sss"]') # 点击“搜索栏”
    button.click()
    time.sleep(1)
    #因为这个网站使用的是弹出新窗口,所以需要跳转到新的页面才能获得url
    windows = browser.current_window_handle
    all_handers = browser.window_handles
    for handle in all_handers:
        if handle != windows:
            browser.switch_to_window(handle)
    time.sleep(1)
    current_url = browser.current_url
    browser.quit()
    print(current_url)
    book_id = re.search('^https://www.biquyun.com/(.*?)/',current_url).group(1)
    print(book_id)
    return book_id

def get_novel(href,book_id):
    '''获得小说的标题和正文'''
    url = r"http://www.biquyun.com/"+book_id+"/"+href+".html"
    r = requests.get(url=url, verify=False)
    r.encoding = 'gbk'
    # time.sleep(1)
    soup = BeautifulSoup(r.text, 'lxml')
    #获取小说的标题
    novel_title = soup.find(name='div',attrs={
     'class':'bookname'}).h1.text.replace(u'\xa0', u' ').replace(u'\ufffd', u' ')
    # print(novel_title)
    #获取小说的正文
    novel_content = soup.find(name='div',attrs={
     'id':'content'}).text.replace(u'\xa0', u' ').replace(u'\ufffd', u' ').strip()
    # print(novel_content)
    return novel_title, novel_content

def make_novel(novel_title,novel_content,book_name):
    '''导出小说'''
    with open(book_name+'.txt', 'a', encoding='gbk') as file:
        file.write(novel_title)
        file.write('\r\n')
        file.write(novel_content)
        file.write('\r\n')
        file.write('\r\n')

def get_noveltitle(novel_title):
    '''导出已爬取小说章节的名字,以便后续断点续爬'''
    with open('已爬取小说章节.txt', 'a', encoding='gbk') as file:
        file.write('\n')
        file.write(novel_title)

def get_last_line(filename):
    '''获取文本内的最后一行'''
    try:
        filesize = os.path.getsize(filename)
        if filesize == 0:
            return None
        else:
            with open(filename, 'rb') as fp:  # to use seek from end, must use mode 'rb'
                offset = -50  # initialize offset
                while True:  # offset cannot exceed file size
                    fp.seek(offset, 2)  # read # offset chars from eof(represent by number '2')
                    lines = fp.readlines()  # read from fp to eof
                    if len(lines) >= 2:  # if contains at least 2 lines
                        return lines[-1].decode('gbk')  # then last line is totally included
                    else:
                        offset *= 2  # enlarge offset
    except FileNotFoundError:
        print(filename + ' not found!')
        return None

# print(get_last_line('已爬取小说章节.txt'))

def crawler(last_chapter, n):
    '''爬取小说,支持断点续爬!'''
    if n <= 5:
        n = n + 1
        dd = soup.find(name='a', text=re.compile(last_chapter)).find_all_next(name='dd')
        for i in range(len(dd)):
            href_temp = str(dd[i])
            # print(href_temp)
            href = re.search('^
.*?
'
, href_temp).group(1) # print(href) try: novel_title = str(get_novel(href, book_id)[0]).strip() novel_content = get_novel(href, book_id)[1] get_noveltitle(novel_title) if novel_title.startswith('第'): make_novel(novel_title, novel_content, book_name) print('已爬取小说:《{}》'.format(novel_title)) else: print('《{}》不是小说正文,已忽略'.format(novel_title)) except: print('第{}次出现错误,开始断点续爬!'.format(n)) last_chapter = get_last_line('已爬取小说章节.txt') print('从《{}》继续开始爬'.format(last_chapter)) time.sleep(3) crawler(last_chapter, n) else: print('已经迭代5次了,可能被ban ip了,请检查!') return None if __name__=='__main__': starttime = datetime.datetime.now() book_name = input("请输入书名:") last_chapter = input('请输入最近看的一章的名称(如果新书直接回车):') book_id = get_bookurl(book_name) url = r"https://www.biquyun.com/" + book_id + "/" requests.packages.urllib3.disable_warnings() r = requests.get(url=url, verify=False) r.encoding = 'gbk' soup = BeautifulSoup(r.text, 'lxml') n = 1 crawler(last_chapter, n) endtime = datetime.datetime.now() total_time = (endtime - starttime).seconds print("小说爬取完毕,总共耗时{}秒".format(total_time))

下完后是一个txt文件,导入到手机比较好的阅读类APP中即可!

你可能感兴趣的:(爬虫,Python,爬虫,小说)