Python爬虫学习笔记(开发者工具中的Ajax+实例:爬取个人微博主页)

Ajax文件的Type是xhr,点击浏览器的开发者工具中NetWork选项下的XHR就可以筛选出所有的ajax请求,RequestHeader中有一个信息为X-Requestsed-with:XMLHttpRequest,意思代表此请求是ajax请求

 

爬取个人微博主页前10条微博的
ID、内容、点赞数、评论数、转发数
存入MongoDB数据库


from urllib.parse import urlencode
import requests
from pyquery import PyQuery as pq
from pymongo import MongoClient

base_url = 'https://m.weibo.cn/api/container/getIndex?'#请求的url的前半部分,getIndex是发表的微博索引

#注意更换自己浏览器现实的headers和params!!
headers = {
    'authority': 'm.weibo.cn',
    'pragma': 'no-cache',
    'cache-control': 'no-cache',
    'accept': 'application/json, text/plain, */*',
    'mweibo-pwa': '1',
    'x-xsrf-token': '972c17',
    'x-requested-with': 'XMLHttpRequest',
    'sec-fetch-dest': 'empty',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-mode': 'cors',
    'referer': 'https://m.weibo.cn/u/2830678474',
    'accept-language': 'zh-CN,zh;q=0.9',
    'cookie': '_T_WM=23688964088; WEIBOCN_FROM=1110006030; MLOGIN=0; SL_GWPT_Show_Hide_tmp=1; SL_wptGlobTipTmp=1; M_WEIBOCN_PARAMS=luicode%3D10000011%26lfid%3D1076032830678474%26fid%3D1076032830678474%26uicode%3D10000011; XSRF-TOKEN=972c17',
}

client = MongoClient()
db = client['weibo'] #连接库
collection = db['weibo'] #连接表


def get_page(page):
    params = (
        ('type', 'uid'),
        ('value', '2830678474'),
        ('containerid', '1076032830678474'),
        ('since_id', '4470351662951286'),
    )
    url = base_url+urlencode(params) #将参数转化为URLGET请求参数并与base_url拼接
    try:
        response = requests.get(url, headers=headers, params=params)
        if response.status_code == 200:
            return response.json() #将内容解析为JSON返回
    except requests.ConnectionError as e:
        print('REEOR',e.args)


def parse_page(json):
    if json:
        items = json.get('data').get('cards') #遍历cards,获取mblog中的各个信息,赋值为一个新的字典返回
        for item in items:
            item = item.get('mblog')
            weibo={}
            weibo['id'] = item.get('id')
            weibo['text'] = pq(item.get('text')).text() #去掉正文中的HTML标签
            weibo['attitudes'] = item.get('attitudes_count')
            weibo['comments'] = item.get('comments_count')
            weibo['reposts'] = item.get('reporsts_count')
            yield weibo

def save_to_monge(result):
    if collection.insert_one(result):
        print('Saved to Mongo')

if __name__ == '__main__':
    for page in range(1,11):
        json = get_page(page)
        results = parse_page(json)
        for result in results:
            print(result)
            save_to_monge(result)

你可能感兴趣的:(Python爬虫)