python爬虫-源码

import os
import requests
from bs4 import BeautifulSoup as bs
import time, random  # 增加随机休息的时间来避免对于运营方的网络特别大的影响,也避免被封ip


class worm():
    def __init__(self):
        self.book_code = 782
        self.book_code_list = [782, 801]
        self.testFlag = True
        self.testRange = range(0, 10)
        self.basic_url = "https://www.xinshuhaige.org/"  # 一定要有http
        self.headers = [
            'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.76 Mobile Safari/537.36',
            'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
            'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
            'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
            'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:22.0) Gecko/20130328 Firefox/22.0",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36",
            "Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36",
            "Mozilla/5.0 (Windows NT 5.0; rv:21.0) Gecko/20100101 Firefox/21.0",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36",
            "Opera/9.80 (Windows NT 5.1; U; cs) Presto/2.7.62 Version/11.01",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36",
            "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130331 Firefox/21.0",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36",
            "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36",
            "Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/4E423F",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"
        ]  # 随便找的假的headers

    def open_url(self, url):
        respon = requests.get(url, headers={
     "user-agent": random.choice(self.headers)}).content
        respon_decoded = respon.decode("utf-8")
        return respon_decoded

    def get_chapter_list(self, respon):
        soup = bs(respon, 'html.parser')
        chapter_list = soup.find('div', class_="novel_list", id="novel" + str(self.book_code)).find_all('a')
        chapter_list_url = []
        for chapter in chapter_list:
            chapter_list_url.append(chapter['href'])
        return chapter_list_url

    def get_chapter_page(self, basic_respons):
        soup = bs(basic_respons, "html.parser")
        pagelinks = soup.find("div", class_="pagelink", id="pagelink").find_all('a')
        pagelinks_url = []
        for pagelink in pagelinks:
            pagelinks_url.append(pagelink['href'])
        return pagelinks_url

    def get_content(self, respon):
        soup = bs(respon, "html.parser")
        title = soup.find("div", class_="read_title").text
        content = '\n\t'.join(soup.find("div", class_="content").text.split(' '))
        return title, content

    def open_file_and_write(self, name, content, format='w', encoding='utf-8', retry=False):
        f = None
        try:
            f = open("./text/%s/%s.txt" % (self.book_code, name), format, encoding=encoding)
            f.write(str(content))
        except FileNotFoundError:
            if retry == True:
                raise FileExistsError("Cannot handle this Error Please Check!")
            os.mkdir("./text/%s/" % self.book_code)
            if self.testFlag:
                print("Created New Folder %s"%self.book_code)
            self.open_file_and_write(name, content, retry=True)
        finally:
            if f is not None:
                f.close()

    def file_walker(self):
        name_list = []
        for root, dirs, files in os.walk("./text/%s/" % self.book_code):
            for file in files:
                name_list.append(file.split('.')[0].split('_')[0])
        return name_list

    def manual_access(self):
        if self.testFlag:
            print("init getting basic url for book_code")
        respon = self.open_url(self.basic_url + str(self.book_code) + "/")
        pagelinks = self.get_chapter_page(respon)

        chapter_list = []
        for pagelink in pagelinks:
            if self.testFlag:
                print("getting page list and chapters")
            time.sleep(random.randint(1, 2) + random.random())
            chapter_list.extend(self.get_chapter_list(self.open_url(self.basic_url[:-1] + pagelink)))

        # print(len(chapter_list), chapter_list)

        chapter_name = self.file_walker()

        if self.testFlag:
            index = self.testRange
        else:
            index = range(len(chapter_list))
        for i in index:
            if str(i + 1) in chapter_name:
                continue
            else:
                print("getting chapter" + str(i + 1))
                title, content = self.get_content(self.open_url(self.basic_url[:-1] + chapter_list[i]))
                self.open_file_and_write(str(i + 1) + "_" + title, title + content)
                time.sleep(random.randint(1, 2) + random.random())

    def auto_update(self):
        self.testFlag = False
        for book_code in self.book_code_list:
            self.book_code = book_code
            self.manual_access()

if __name__ == "__main__":
    worm().manual_access()
    

你可能感兴趣的:(python,爬虫)