Python学习笔记-第十六天

** 爬取糗百的段子 **
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import url_lib
import urllib2
import re

page = 1
url = 'http://www.qiushibaike.com/hot/page/' + str(page)
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent': user_agent}
try:
    request = urllib2.Request(url, headers=headers)
    response = urllib2.urlopen(request)
    content = response.read().decode('utf-8')
    pattern = re.compile(
        '
.*?href.*?(.*?).*?
(.*?)
.*?(.*?)', re.S) items = re.findall(pattern, content) for item in items: for it in item: print it.encode('utf-8') except urllib2.URLError, e: if hasattr(e, 'code'): print e.code if hasattr(e, 'reason'): print e.reason
** 爬取猫眼榜单电影 **
import json
from multiprocessing import Pool
import requests
from requests.exceptions import RequestException
import re


def get_one_page(url):
    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        return None


def parse_one_page(html):
    pattern = re.compile('
.*?board-index.*?>(\d+).*?data-src="(.*?)".*?name">(.*?).*?star">(.*?)

.*?releasetime">(.*?)

' + '.*?integer">(.*?).*?fraction">(.*?).*?
', re.S) items = re.findall(pattern, html) for item in items: yield { 'index': item[0], 'image': item[1], 'title': item[2], 'actor': item[3].strip()[3:], 'time': item[4].strip()[5:], 'score': item[5] + item[6] } def write_to_file(content): with open('result.txt', 'a', encoding='utf-8') as f: f.write(json.dumps(content, ensure_ascii=False) + '\n') f.close() def main(offset): url = 'http://maoyan.com/board/4?offset=' + str(offset) html = get_one_page(url) for item in parse_one_page(html): print item['title'].encode('utf-8'), item['actor'].encode('utf-8'), item['score'].encode('utf-8'), item['time'].encode('utf-8') # write_to_file(item) if __name__ == '__main__': pool = Pool() pool.map(main, [i * 10 for i in range(10)]) pool.close() pool.join()

你可能感兴趣的:(Python学习笔记-第十六天)