python爬虫学习一

课程目标

  • 爬取百度百科Python词条相关多个页面的词条和简述
  • 并将数据以表格的形式保存成html

程序包含五个模块

1. spider_main.py

爬虫主调度器: 负责调度其他的模块,使程序能够正常运行
代码如下:

import url_manager
import html_downloader
import html_parser
import html_outputer

class SpiderMain(object):
    """爬虫的主调度器"""
    def __init__(self):
        self.urlManager = url_manager.UrlManager()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = html_outputer.HtmlOutputer()

    def craw(self, url):
        # 添加root_url即第一个url
        count = 1
        self.urlManager.add_new_url(url)
        while self.urlManager.has_new_url:
            try:
                new_url = self.urlManager.get_new_url()
                print('{0}: craw {1}'.format(count, new_url))
                html_cont = self.downloader.download(new_url)
                new_urls, new_data = self.parser.parser(new_url, html_cont)
                self.urlManager.add_new_urls(new_urls)
                self.outputer.collect_data(new_data)

                if count == 100:
                    break

                count += 1
            except:
                print('{0}: craw {1} failed'.format(count, new_url))

        self.outputer.output()

if __name__ == '__main__':
    root_url = "http://baike.baidu.com/item/Python"
    spiderMain = SpiderMain()
    spiderMain.craw(root_url)

2. url_manager.py

url管理器: 管理解析到的url,避免重复爬取
代码如下:

class UrlManager(object):
    """url 管理器,避免重复爬取"""
    def __init__(self):
        self.new_urls = set()
        self.old_urls = set()

    def get_new_url(self):
        """获取待爬取的新url"""
        new_url = self.new_urls.pop()
        self.old_urls.add(new_url)      #新的URL爬取后,添加到old_urls中
        return new_url

    def add_new_url(self, url):
        """添加解析的新url"""
        if url is None:
            return
        if url not in self.new_urls and url not in self.old_urls:
            self.new_urls.add(url)

    def add_new_urls(self, urls):
        """添加解析到的新urls列表"""
        if urls is None or len(urls) == 0:
            return
        for url in urls:
            self.add_new_url(url)

    def has_new_url():
        """判断是否有新的待爬取的url"""
        return len(self.new_urls) != 0

3. html_downloader.py

html下载器:下载url对应的html文档
代码如下:

import urllib.request

class HtmlDownloader(object):
    """html下载器"""

    def download(self, url):
        """下载url对应的html文档"""
        if url is None:
            return
        response = urllib.request.urlopen(url)
        if response.getcode() != 200:
            return
        return response.read()

4. html_parser.py

html解析器: 解析html文档,抽取需要的数据
用到的第三方库: beautifulsoup4
代码如下:

import re
from bs4 import BeautifulSoup

class HtmlParser(object):
    """html解析器"""

    def parser(self, url, html):
        """解析html,返回new_urls集合和new_data字典"""
        if html is None or url is None:
            return

        soup = BeautifulSoup(html, 'html.parser')
        new_urls = self._get_new_urls(soup)
        new_data = self._get_new_data(url, soup)

        return new_urls, new_data

    def _get_new_urls(self, soup):
        # Guido van Rossum
        # '/item/[\w %]+' 匹配 '/item/' 后面跟着多于一个的字母或空格或%的字符串
        links = soup.find_all('a', href = re.compile(r'/item/[\w %]+'))

        new_urls = set()
        for link in links:
            new_url = link['href']
            new_full_url = 'http://baike.baidu.com' + new_url
            new_urls.add(new_full_url)

        return new_urls

    def _get_new_data(self, url, soup):
        # 

Python

....
#
....
new_data = {} new_data['url'] = url title_node = soup.find('dd', class_ = 'lemmaWgt-lemmaTitle-title') if title_node is not None: new_data['title'] = title_node.get_text() else: new_data['title'] = 'Oops' brief_node = soup.find('div', class_ = 'lemma-summary') if brief_node is not None: new_data['brief'] = brief_node.get_text() else: new_data['brief'] = 'Oops' return new_data

5. html_outputer.py

html输出器: 收集数据,将数据以表格的形式在浏览器中呈现出来
代码如下:

class HtmlOutputer(object):
    """将爬取到数据包装成html"""
    def __init__(self):
        self.datas = []

    def collect_data(self, new_data):
        if new_data is None:
            return
        self.datas.append(new_data)

    def output(self):
        # 设置utf-8,防止中文乱码
        fout = open('output.html', 'w', encoding = 'utf-8')

        fout.write('')
        fout.write('')
        fout.write('')
        fout.write('')
        fout.write('')
        fout.write('')
        fout.write('')

        for data in self.datas:
            fout.write(''.format(data['title'], data['brief']))

        fout.write('
词条简述
{0}{1}
') fout.write('') fout.write('') # 不要忘了close() fout.close()

下一步学习

  1. 熟悉python基础语法
  2. 学习urllib库和beautifulsoup4库的使用

你可能感兴趣的:(python爬虫学习一)