爬取网站表格数据到csv文件

from bs4 import BeautifulSoup
import requests
import csv
import bs4

num = 0

# 检查url地址
def check_link(url):
    try:

        r = requests.get(url)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        print('无法链接服务器!!!')


# 爬取资源
def get_contents(ulist, rurl):
    soup = BeautifulSoup(rurl, 'lxml')
    trs = soup.find_all('tr')
    for tr in trs:
        ui = []
        for td in tr:
            ui.append(td.string)
        ulist.append(ui)


# 保存资源
def save_contents(urlist):
    # with open("D:/2016年中国企业500强排行榜.csv", 'w') as f:
    global num
    if num == 0:
        with open(r"C:\Users\hero\Desktop\table.csv", 'a', newline="") as f:
            writer = csv.writer(f)
            # writer.writerow(['2016年中国企业500强排行榜'])
            for i in range(0, len(urlist)):
                writer.writerow([urlist[i][1], urlist[i][3], urlist[i][5], urlist[i][7],
                                 urlist[i][9], urlist[i][11], urlist[i][13]])
        num += 1
    else:
        with open(r"C:\Users\hero\Desktop\table.csv", 'a', newline="") as f:  # newline防止输出时隔一行加一个空行
            writer = csv.writer(f)
            # writer.writerow(['2016年中国企业500强排行榜'])
            for i in range(1, len(urlist)):
                writer.writerow([urlist[i][1], urlist[i][3], urlist[i][5], urlist[i][7],
                                 urlist[i][9], urlist[i][11], urlist[i][13]])


def main(p):
    urli = []
    # url = "http://www.maigoo.com/news/463071.html"
    url = "http://192.168.91.43:8000/table/?p={}".format(p)
    rs = check_link(url)
    get_contents(urli, rs)
    save_contents(urli)


for x in range(1, 3):
    main(x)

 

你可能感兴趣的:(爬虫)