python爬虫获取天气数据

#encoding:utf-8
import requests
from bs4 import BeautifulSoup
import urllib.request
import random

#设置header 防止产生403forbidden
my_headers = [
    "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
    "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
    'Opera/9.25 (Windows NT 5.1; U; en)',
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
    'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
    'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
    'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
    "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
    "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "
]

#抓取网页信息
def get_content(url, headers):
    '''''
    @获取403禁止访问的网页
    '''
    random_header = random.choice(headers)
    req = urllib.request.Request(url)
    req.add_header("User-Agent", random_header)
    req.add_header("Host", "lishi.tianqi.com")
    req.add_header("Referer", "http://lishi.tianqi.com/")
    req.add_header("GET", url)

    content = urllib.request.urlopen(req).read()
    return content

# 三个月份天气的链接
urls = ["http://lishi.tianqi.com/wuhan/201707.html",
        "http://lishi.tianqi.com/wuhan/201706.html",
        "http://lishi.tianqi.com/wuhan/201705.html"]

file = open('wuhan_weather.csv','w')
for url in urls:
    response=get_content(url, my_headers)
    soup = BeautifulSoup(response, 'html.parser')
    weather_list = soup.select('ul[class="thrui"]')

    for weather in weather_list:
        ul_list = weather.select('li')
        for ul in ul_list:
            li_list= ul.select('div')
            str=""
            for li in li_list:
                str += li.string + ','
            file.write(str+'\n')
file.close()

爬取网页的dom结构:
python爬虫获取天气数据_第1张图片
通过上述结构我们可以看见,我们只需要借助BeatuifulSoup工具库的select功能便可以获取到特定dom结构中的内容。对于此处的数据我们只需要三次select即可,select的dom元素依次是ul[class=“thrui”]、li和div便能获取到所需要的数据。

爬取结果如下图所示:
python爬虫获取天气数据_第2张图片

你可能感兴趣的:(python,爬虫,python)