python 3爬取100个百度百科词条内容

#utf-8

# 引入各个模块,及创建各个模块的类

from baike_spiderimport url_manage, html_downloader, html_parse,html_outputer

class SpiderMain (object):

# 对各模块各个函数进行初始化

    def __init__(self):

self.urls = url_manage.UrlManage()# 初始化url管理器的类

        self.downloader = html_downloader.HtmlDownLoader()#下载器

        self.parse = html_parse.HtmlParse()#url解析器

        self.outputer = html_outputer.HtmlOutputer()#输出器

    def craw(self, root_url):# 爬虫调度函数

        count =1

        self.urls.add_new_url (root_url)# 将url添加进url管理器中

        while self.urls.has_new_url():

try:

new_url =self.urls.get_new_url()# 添加单个url

                print ("craw 第 %d : %s" % (count, new_url))

html_cont =self.downloader.download(new_url)

new_urls, new_data =self.parse.parse(new_url, html_cont)

self.urls.add_new_urls(new_urls)# 添加批量URL

                print(new_urls)

self.outputer.collect_data(new_data)

if count >=10:

break

                count +=1

            except Exception as e:

print (str (e))#输出发生的错误

            self.outputer.html_output()

if __name__ =="__main__":# 创建main函数

    root_url ="https://baike.baidu.com/item/Python"  # 入口url

    obj_spider = SpiderMain()# 创建调度程序

    obj_spider.craw(root_url)# 创建craw函数,启动爬虫

class UrlManage(object):

def __init__(self):#对原始变量进行初始化

        self.new_urls =set()

self.old_urls =set()

def add_new_url(self,url):#获取新的url

        if urlis None:

return

        if urlnot in self.new_urlsand urlnot in self.old_urls:

self.new_urls.add(url)

def add_new_urls(self,urls):#批量增加新的urls

        if urlsis None or len(urls) ==0:#判断urls是否有url

            return

        for urlin urls:

self.add_new_url(url)

def has_new_url(self):#判断是否有新的url

        return len(self.new_urls) !=0

    def get_new_url(self):#获取新的url

        new_url =self.new_urls.pop()#对新的url进行删除

        self.old_urls.add(new_url)#将已经爬取过的url增加至旧的urL中

        return new_url#返回新的url


import urllib.request

class HtmlDownLoader(object):

def download(self,url):

if urlis None:

return None

        response = urllib.request.urlopen(url)

if response.getcode() !=200:

return None

        return response.read()


import urllib.parse#python3parse存在与urllib 库中

import re

from bs4import BeautifulSoup

class HtmlParse(object):

def _get_new_urls(self,page_url,soup):

# view/123.html

        new_urls =set()

links = soup.find_all("a",href = re.compile(r"/item/"))

for linkin links:

new_url = link["href"]

new_full_url = urllib.parse.urljoin(page_url,new_url)

new_urls.add(new_full_url)

return new_urls

def _get_new_data(self,page_url,soup):

res_data = {}

res_data["url"] = page_url

#

Python

        title_node = soup.find("dd",class_ ="lemmaWgt-lemmaTitle-title").find("h1")

res_data["title"] = title_node.get_text()

#

        summary_node = soup.find('div',class_ ="lemma-summary")

if summary_nodeis None:

return

        res_data["summary"] = summary_node.get_text()

return res_data

def parse(self,page_url,html_cont):

if page_urlis None or html_contis None:

return

        soup = BeautifulSoup(html_cont,'html.parser', from_encoding='utf-8')

new_urls =self._get_new_urls(page_url,soup)

new_data =self._get_new_data(page_url,soup)

return new_urls,new_data

class HtmlOutputer(object):

def __init__(self):

self.datas  = []

def collect_data(self,data):

if datais None:

return

        self.datas.append(data)

def html_output(self):

export =open("output.html","w",encoding="utf-8")

export.write("")

export.write ("")

export.write("

")

for data in self.datas:

export.write ("

")

export.write ("

")

export.write ("

%s/"%data["url"])

export.write ("

%s/" % data["title"])

export.write ("

%s/" % data["summary"])

export.write ("

")

export.write ("")

export.write ("")

你可能感兴趣的:(python 3爬取100个百度百科词条内容)