爬虫——爬取疫情数据

此次疫情数据来源于网易:
https://news.163.com/special/epidemic/

爬取当天疫情数据。

import requests
import pandas as pd
import json

def get_page(url):    #解析网页
    headers={
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
    }
    try:
        r=requests.get(url,headers=headers)
        r.raise_for_status()
        r.encoding=r.apparent_encoding
        return r.json()
    except Exception as e:
        print("error",e)
        return ""
    #print(r.status_code)
    #print(r.text)
def parse_page(data):    #爬取数据
    casesdata=data['data']['areaTree'][0]['children']
    caseslist=[]
    for i in casesdata:
        if i['name']=='未明确地区':
            continue
        temp={}
        temp['city']=i['name']
        temp['confirmNum']=i['today']['confirm']
        temp['suspectNum']=i['today']['suspect']
        temp['deadNum']=i['today']['dead']
        temp['healNum']=i['today']['heal']
        caseslist.append(temp)   #
        for j in i['children']:
            if j['name']=='未明确地区':
                continue
            temp={}
            temp['city']=j['name']
            temp['confirmNum']=j['today']['confirm']
            temp['suspectNum']=j['today']['suspect']
            temp['deadNum']=j['today']['dead']
            temp['healNum']=j['today']['heal']
            caseslist.append(temp)   #城市
    result=pd.DataFrame(caseslist)
    print(caseslist)
    return result
def save_file(info):   #保存
    df =pd.DataFrame(info)
    print(df)
    df.to_csv('casesdata.csv',index=False,encoding='gb2312')   #采用编码方式“gb2312”不会出现乱码

if __name__ == '__main__':
    url='https://c.m.163.com/ug/api/wuhan/app/data/list-total?t=316578012887'
    cases_json=get_page(url)
    #print(cases_json['data']['areaTree'][0]['children'])
    casesinfo=parse_page(cases_json)
    save_file(casesinfo)

你可能感兴趣的:(爬虫——爬取疫情数据)