Python简单爬虫学习

本文源于慕课网爬虫学习笔记:http://www.imooc.com/learn/563

爬虫:一段自动抓取互联网信息的程序。

主要组成:爬虫调度器,URL管理器,网页下载器,网页解析器

爬虫调度器:程序入口,主要负责爬虫程序的控制

URL管理器:管理带抓取URL集合和已抓取的URL集合。

URL实现的功能有:

1.添加新的URL到待爬去集合

2.判断待添加URL是否已存在

3.判断是否还有待爬的URL,将URL从待爬集合移动到已爬集合

URL的存储方式:Python内存即set()集合,关系数据库,缓存数据库

网页下载器:根据URL获取网页内容,实现由有urllib2和request

网页解析器:从网页中提取出有价值的数据,实现方法有:正则表达式、html.parser、BeautifulSoup、lxml


程序思路:主程序从初始URL开始,先通过URL管理器将初始URL放入待爬集合,再循环从待爬集合中获取URL,使用网页下载器获取URL指向网页,再对网页进行解析获取价值数据和关联URL,关联URL经判断再由URL管理器存入待爬集合,继续进行循环直到复合跳出条件或待爬集合为空。


URL管理器代码:

class UrlManager(object):
    def __init__(self):
        self.new_urls=set();#待爬的URL集合
        self.old_urls=set();#已爬的URL集合
    
    def add_new_url(self,url):#往待爬集合添加新的URL
        if url is None:
            return
        if url not in self.new_urls and url not in self.old_urls: #需要判断URL是否已存在或已爬
            self.new_urls.add(url)

    def add_new_urls(self,urls):#将解析获得的URL批量导入待爬集合
        if urls is None or len(urls)==0:
            return
        for url in urls:
            self.add_new_url(url)
    
    def has_new_url(self):#判断是否还有待爬的URL
        return len(self.new_urls) !=0

    
    def get_new_url(self):#在待爬集合中获取一个新的URL返回,交由网页下载器
        new_url = self.new_urls.pop()
        self.old_urls.add(new_url)
        return new_url
网页下载器:

import urllib2

class HtmlDownloader(object):#返回URL指向网页的内容
    
    def download(self,url):
        if url is None:
            return None
        
        response = urllib2.urlopen(url)
        if response.getcode()!=200:
            return None
        
        return response.read()
网页解析器:

from bs4 import BeautifulSoup
import re
import urlparse

class HtmlParser(object):
    def _get_new_urls(self, page_url, soup):#从网页中获取包含的URL
        new_urls=set()
        links=soup.find_all('a',href=re.compile(r"/view/\d+\.htm"))
        for link in links:
            new_url=link['href']
            new_full_url=urlparse.urljoin(page_url, new_url)
            new_urls.add(new_full_url)
        return new_urls
       
    def _get_new_data(self, page_url, soup):#从网页中提取价值数据
        res_data={}
        
        #url
        res_data['url']=page_url
        
        #<dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1>
        title_node=soup.find('dd',class_="lemmaWgt-lemmaTitle-title")
        res_data['title']=title_node.get_text()
    
        #<div class="lemma-summary" >
        summary_node=soup.find('div',class_="lemma-summary")
        res_data['summary']=summary_node.get_text()
        return res_data
        
    def parser(self,page_url,html_cont):#调用两个函数进行网页解析
        if page_url is None or html_cont is None:
            return 

        soup=BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8')
        new_urls=self._get_new_urls(page_url,soup)
        new_data=self._get_new_data(page_url,soup)
        return new_urls,new_data
        
数据输出:

class HtmlOutputer(object):
    def __init__(self):
        self.datas=[]
    
    def collect_data(self,data):
        if data is None:
            return None
        self.datas.append(data)

    
    def outputer_html(self):
        fout = open('output.html','w')
        
        fout.write("<html>")
        fout.write("<body>")
        fout.write("<table>")
        
        for data in self.datas:
            fout.write("<tr>")
            fout.write("<td>%s</td>" % data['url'].encode('utf-8'))
            fout.write("<td>%s</td>" % data['title'].encode('utf-8'))
            fout.write("<td>%s</td>" % data['summary'].encode('utf-8'))
            fout.write("</tr>")
        
        fout.write("</table>")
        fout.write("</body>")
        fout.write("</html>")

爬虫总调度程序:

from baike_spider import html_downloader,url_manager,html_outputer,html_parser


class SpiderMain(object):
    def __init__(self):
        self.urls=url_manager.UrlManager()
        self.downloader=html_downloader.HtmlDownloader()
        self.parser=html_parser.HtmlParser()
        self.outputer=html_outputer.HtmlOutputer()
    
    def craw(self, root_url):
        count=1
        self.urls.add_new_url(root_url)
        while self.urls.has_new_url():
            try:
                new_url=self.urls.get_new_url()
                print 'craw %d :%s'%(count,new_url)
                html_cont=self.downloader.download(new_url)
                new_urls,new_data=self.parser.parser(new_url,html_cont)
                self.urls.add_new_urls(new_urls)
                self.outputer.collect_data(new_data)
                
                if count == 1000:
                    break
                
                count=count+1
            except:
                print 'craw faild'
        
        self.outputer.outputer_html()
       
if __name__=="__main__":
    #原始url
    root_url="http://baike.baidu.com/view/21087.htm"
    obj_spider=SpiderMain()
    obj_spider.craw(root_url)




你可能感兴趣的:(python,爬虫)