python3抓取微博信息

爬取自己微博上面的之前转发或者发表的一些内容。

import requests

from urllib.parse import urlencode

from pyquery import PyQuery as pq

import json

base_url = 'https://m.weibo.cn/api/container/getIndex?'

headers = {

    'Host': 'm.weibo.cn',

    'Referer': 'https://m.weibo.cn/u/5537970095',

    'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:59.0) Gecko/20100101 Firefox/59.0',


    'X-Requested-With' : 'XMLHttpRequest',

}

def get_page(page):

    params = {

        'type': 'uid',

        'value': '5537970095',

        'containerid': '1076035537970095',

        'page': page

    }

    url = base_url + urlencode(params)

    try:

        response = requests.get(url, headers=headers)

        if response.status_code == 200:

            return response.json()

    except requests.ConnectionError as e:

        print('Error', e.args)

def parse_page(json):

    if json:

        items = json.get('data').get('cards')

        for item in items:

            item = item.get('mblog')

            weibo = {}

            weibo['id'] = item.get('id')

            weibo['text'] = pq(item.get('text')).text()


            weibo['attitudes'] = item.get('attitudes_count')

            weibo['comments'] = item.get('comments_count')

            weibo['reposts'] = item.get('reposts_count')

            yield weibo


if __name__ == '__main__':

    for page in range(1,10):

        json = get_page(page)

        results = parse_page(json)

        for result in results:

            print(result)


仅供参考

你可能感兴趣的:(python3抓取微博信息)