import requests
from bs4 import BeautifulSoup
if __name__ == '__main__':
# 请求头,模拟浏览器访问
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 SLBrowser/9.0.0.10191 SLBChan/128'}
url = 'https://www.tianqi.com/air/'
# 存储结果的列表
result = []
# 循环爬取所有页面的数据
for i in range(0, 50, 25):
# 拼接URL
url_page = url + str(i)
# 发送请求
response = requests.get(url, headers=headers) # 修改这里,使用url_page作为参数
# 解析HTML
soup = BeautifulSoup(response.text, 'html.parser')
city_list = soup.find('ul', class_='aqi_ranklist').find_all('li', class_="clearfix")
# 遍历空气质量排行列表
for city in city_list:
# 获取排名、城市、省份、空气质量指数、空气质量状况信息
ranking = city.find('span').text
citys = city.find('a').text
province = city.find('span').text
index = city.find('span').text
condition = city.find('i').text
# 将结果存入列表
result.append([ranking, citys, province, index, condition])
# 输出结果
for item in result:
print('排名:', item[0])
print('城市:', item[1])
print('省份:', item[2])
print('空气质量指数:', item[3])
print('空气质量状况:', item[4])
print('-----------------------------')
爬取效果: