import pandas as pd
from selenium import webdriver
from bs4 import BeautifulSoup
url = 'https://voice.baidu.com/act/newpneumonia/newpneumonia/?from=osari_pc_3'
driver = webdriver.Chrome(executable_path='chromedriver.exe')
driver.get(url)
text = driver.page_source
soup = BeautifulSoup(text, 'lxml')
# 获取表格,写class的属性具体到哪一个
table = soup.find('table', {'class': 'VirusTable_1-1-348_38pQEh'})
results = table.find_all('tr')
rows = []
for result in results:
data = result.find_all("td")
if len(data) == 0:
continue
area = data[0].find_all('span')[1].getText()
confirm = data[1].getText()
asymptomatic = data[2].getText()
confirmSum = data[3].getText()
riskArea = data[4].getText()
rows.append([area, confirm, asymptomatic, confirmSum, riskArea])
df = pd.DataFrame(rows)
# 设置列名
df.columns = ["省市地区", "新增确诊", "新增无症状", "累计确诊", "风险地区"]
print(df)
# 输出到excel
df.to_excel('outputSum.xlsx', index=None)
这种方法需要去访问url,所以速度较慢,适用于数据不多的场景