【Python实战】1997-2019年教育统计数据爬取并导出excel

基于以往的经验,用python将教育部官网的1997-2019年的教育统计数据爬下来,保存到excel里。

数据来源:教育部官网【moe.gov.cn/】-文献-教育统计数据

举例来说:

教育统计数据.png
教育统计数据2.png
教育统计数据3.png

这次相较于之前,就多用了个函数pandas.read_html,将网页表格转成数据框,进而导出excel。

按照官网的层级建立文件夹,结果示例:

教育统计数据文件夹.png
教育统计数据文件夹1.png
教育统计数据文件夹2.png
教育统计数据4.png

过程呢,基本上也就是先分析网页情况结构,所有年份中,只有2010-2012年这3年没有分类,其他的都有2级文件夹。

因为害怕爬取太频繁了,就设立了随机睡眠时间,时间还挺长的,在爬取整个数据的过程中就可以去干其他的事情了。

具体代码:

import pandas as pd
from urllib import request
import time,random,re,os
import urllib.request
from lxml import etree


# 随机获取headers
def getheaders():
    user_agent_list = [ \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36"
    ]
    UserAgent = random.choice(user_agent_list)
    header = {'User-Agent':UserAgent}
    return header

# 获取页面html
def get_page(url):
    headers = getheaders()
    req = urllib.request.Request(url = url, headers = headers) 
    html = urllib.request.urlopen(req).read().decode('utf_8')
    time.sleep(random.random()*3)
    return html

# 获取每年链接及标题
def get_every_year_title_url_ls():
    url = 'http://www.moe.gov.cn/s78/A03/moe_560/jytjsj_2019/'
    html = get_page(url)
    selector = etree.HTML(html)
    every_year_title_url_li = selector.xpath('/html/body/div[1]/div/div[5]/div[1]/ul/li')
    every_year_title_url_ls = []
    for li in every_year_title_url_li:
        every_year_title_url = []
        # 获取链接标题
        every_year_title = li.xpath('a/text()')[0]
        every_year_title_url.append(every_year_title)

        # 获取链接
        every_year_url = li.xpath('a/@href')[0]
        if '../' in every_year_url:
            final_url = 'http://www.moe.gov.cn/s78/A03/moe_560/' + every_year_url.replace('./','').replace('.','')
        else:
            final_url = url
        every_year_title_url.append(final_url)

        every_year_title_url_ls.append(every_year_title_url)

    return every_year_title_url_ls

# 获取当年的分类链接及标题[2010-2012年这3年没有分类链接]
def get_category_title_url_ls(url):
    html = get_page(url)
    selector = etree.HTML(html)
    category_title_url_li = selector.xpath('//*[@id="list"]/li')
    category_title_url_ls = []
    for li in category_title_url_li:
        category_title_url = []
        # 获取链接标题
        category_title = li.xpath('a/text()')[0]
        category_title_url.append(category_title)

        # 获取链接
        category_url = li.xpath('a/@href')[0].replace('./','')
        category_title_url.append(url + category_url)

        category_title_url_ls.append(category_title_url)

    return category_title_url_ls

# 获取每个分类链接的页面数量
def get_page_num(url):
    html = get_page(url)
    item_num = int(re.findall(r'var recordCount = (.+?);',html)[0])
    print('共' + str(item_num) + '条信息')
    if item_num < 20:
        page_num = 1
    elif item_num%20 == 0:
        page_num = item_num // 20
    else:
        page_num = item_num // 20 + 1

    return page_num

# 获取每个分类链接下每页页面的链接及标题
def get_page_url_ls(url):
    page_num = get_page_num(url)
    page_url_ls = [url]
    for i in range(page_num - 1):
        page_url = url + 'index_'+ str(i + 1) + '.html'
        page_url_ls.append(page_url)
    return page_url_ls       

# 获取每页内的所有链接及标题
def get_item_title_url_ls(url):
    html = get_page(url)
    selector = etree.HTML(html)
    item_title_url_li = selector.xpath('//*[@id="list"]/li')
    item_title_url_ls = []
    for li in item_title_url_li:
        item_title_url = []
        # 获取链接标题
        item_title = li.xpath('a/text()')[0]
        item_title_url.append(item_title)

        # 获取链接
        item_url = li.xpath('a/@href')[0].replace('./','')
        if 'index_' in url:
            new_url = '/'.join(url.split('/')[:-1]) + '/'
            final_url = new_url + item_url
        else:
            final_url = url + item_url
        item_title_url.append(final_url)

        item_title_url_ls.append(item_title_url)
    return item_title_url_ls

# 获得每个item的df
def get_df(url):
    html = get_page(url)
    df = pd.read_html(html,skiprows = 6, header = 0)[0]
    df = df.drop(df.tail(6).index)
    return df

# 将df导入excel
def to_excel(excel_path, title_url):
    title = title_url[0].replace('\r','').replace('\n','').replace('\u3000','').replace('\t','')
    item_url = title_url[1]
    df = get_df(item_url)
    if not os.path.exists(excel_path):
        os.makedirs(excel_path)
    excel_name = excel_path + title + '.xlsx'
    df.to_excel(excel_name,index = 0)


if __name__ == "__main__":
    every_year_title_url_ls = get_every_year_title_url_ls()
    for n,i in enumerate(every_year_title_url_ls):
        print('【文件夹】',str(2019 - n) + '年' )
        fold1 = i[0]
        # 2010,2011,2012年只有1级文件夹
        if 2019-n == 2010 or 2019-n == 2011 or 2019-n == 2012: 
            for n in get_page_url_ls(i[1]):
                for l in get_item_title_url_ls(n):
                    print(l)
                    excel_path = 'D:\\2_study\\4_实战\\python\\jyb_sta\\data\\' + fold1 + '\\'
                    to_excel(excel_path, l)

        else:
            category_title_url_ls = get_category_title_url_ls(i[1])
            for j in category_title_url_ls:
                print('【文件夹】',j)
                fold2 = j[0]
                # 2004年的非“附表”有3级文件夹
                if 2019-n == 2004 and '附表' != j[0]: 
                    for z in get_category_title_url_ls(j[1]):
                        print('【文件夹】',z)                          
                        fold3 = z[0]
                        for m in get_page_url_ls(z[1]):
                            for k in get_item_title_url_ls(m):
                                print(k)
                                excel_path = 'D:\\2_study\\4_实战\\python\\jyb_sta\\data\\' + fold1 + '\\' + fold2 + '\\' + fold3 + '\\'
                                to_excel(excel_path, k)
                else:
                    for m in get_page_url_ls(j[1]):
                        for k in get_item_title_url_ls(m):
                            print(k)
                            excel_path = 'D:\\2_study\\4_实战\\python\\jyb_sta\\data\\' + fold1 + '\\' + fold2 + '\\'
                            to_excel(excel_path, k)
            

当然,爬取下来的数据还需要根据具体需要进行整理了,比如,把某类数据按照年份合并起来,这个当然也可以用python来执行啦~


GZ号:amazingdata (数据格子铺)
后台回复:教育统计数据,可下载所有的excel数据

你可能感兴趣的:(【Python实战】1997-2019年教育统计数据爬取并导出excel)