使用fiddler抓包工具,抓取斗鱼接口,爬取数据

使用fiddler抓包工具,分析斗鱼接口信息找到斗鱼接口url,进行接口分析,提取数据。由于斗鱼直播数据和整个页面页码都是根据开播来实时更新的。所以我们提取数据是要判断。拿到实时的页码。

import requests
import random

'''
url = https://www.douyu.com/gapi/rkc/directory/0_0/list

'''
def get_douyu_api(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
    }
    proxies_list = [
         {
             'http': 'http://218.95.37.252:3128',
             'https': 'https://182.61.162.160:3128'
         },
         {
             'http': 'http://163.125.19.53:8888',
             'https': 'https://221.229.252.98:8080'
         },
    

    ]
    proxies = random.choice(proxies_list)
    print(proxies)
    response = requests.get(url, headers=headers,proxies=proxies).json()
    # 一个接口里120条数据(1页)
    for i in range(120):
        try:
            # 直播标题
            title = response['data']['rl'][i]['rn']
            # print(title)
        except:
            title = '无标题'
        try:
            # 直播类型
            type1 = response['data']['rl'][i]['c2name']
            # print(type1)
        except:
            type1 = '无类型'
        try:
            # 主播名
            name = response['data']['rl'][i]['nn']
            # print(name)
        except:
            name = '无用户'
        try:
            # 热度
            hot = response['data']['rl'][i]['ol']
            # print(hot)
        except:
            hot = '无热度'
        info = f"直播类型:{type1},直播标题:{title},主播名:{name},热度:{hot}"
        print(info)
        with open('斗鱼.txt','a',encoding='utf-8')as fp:
            fp.write(info+'\n')
    # 总页数
    pgcnt = response['data']['pgcnt']
    return pgcnt
    # print(pgcnt)

if __name__ == '__main__':
    i = 1
    while True:
        url = f'https://www.douyu.com/gapi/rkc/directory/0_0/{i}'
        pgcnt = get_douyu_api(url)
        i += 1
        if i > pgcnt:
            break

你可能感兴趣的:(python爬虫)