爬虫实战代码(Requests+Xpath)

import requests
from lxml import etree
import time
import json


def get_one_page(url):
    try:
        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'}
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            return response.text
        else:
            return None
    except:
        return None


def parse_one_page(text):  # 电影名称,时间,评分,图片,主演,上映时间
    content = []

    html = etree.HTML(text)
    title = html.xpath('//dd//div//a[@data-act="boarditem-click"]/text()')
    time = html.xpath('//dd//p[@class="releasetime"]/text()')
    score = html.xpath('//dd//div//p[@class="score"]//text()')
    actor = html.xpath('//dd//div//p[@class="star"]/text()')
    index = html.xpath('//dd//i[contains(@class,"board-index")]/text()')

    for i in range(len(title)):
        temp = {}
        temp['index'] = index[i]
        temp['title'] = title[i]
        temp['time'] = time[i][5:15]
        temp['score'] = score[2*i]+score[2*i+1]
        temp['actor'] = actor[i].strip()[3:]
        content.append(temp)

    return content


def write_to_file(content):
    for i in range(len(content)):
        with open('content.json', 'a+', encoding='utf-8') as fp:
            fp.write(json.dumps(content[i], ensure_ascii=False) + '\n')  # 将字典转化为str


if __name__ == '__main__':
    for i in range(10):
        html = get_one_page('https://maoyan.com/board/4?offset='+str(10*i))
        write_to_file(parse_one_page(html))
        time.sleep(1)  # 每次停一秒,防止被封IP

PS:我在爬取电影图片时遇到了问题,有兴趣的朋友可以尝试一下,如果成功了,可以交流一下。

你可能感兴趣的:(爬虫实战代码(Requests+Xpath))