学习python第四天

爬虫

读取本地html文件数据

#  使用lxml提取 h1标签中的内容
from lxml import html
# 读取html文件
with open('./index.html', 'r', encoding='utf-8') as f:
    html_data = f.read()
    # print(html_data)
    # 解析html文件,获得selector对象
    selector = html.fromstring(html_data)
    # selector中调用xpath方法
    # 要获取标签中的内容,末尾要添加text()
    h1 = selector.xpath('/html/body/h1/text()')
    print(h1[0])

    # // 可以代表从任意位置出发、
    # //标签1[@属性=属性值]/标签2[@属性=属性值]..../text()
    a = selector.xpath('//div[@id="container"]/a/text()')
    print(a)
    # 获取 p标签的内容

1.导入lxml

from lxml import html
  1. 读取html文件
with open('./index.html','r',enconding='utf-8') as f:
      html_data = f.read()
  1. 解析html文件,获得selector对象,selector中调用xpath方法
h1 = selector.xpath('/html/body/h1/text()') #要获取标签中的内容,末尾要加上text()
    # // 可以代表从任意位置出发、
    # //标签1[@属性=属性值]/标签2[@属性=属性值]..../text()

读取网站数据

#  200 ok  404   500
# 没有添加请求头的知乎网站
# resp = requests.get('https://www.zhihu.com/')
# print(resp.status_code)
# 使用字典定义请求头
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"}
resp = requests.get('https://www.zhihu.com/', headers = headers)
print(resp.status_code)

1.需要加入请求头

headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"}
  1. 访问
resp = requests.get('https://www.zhihu.com/', headers = headers)

3.返回值为200标识成功
4.提取当当网书籍信息,可以将html页面写入本地

import requests
def spider_dangdang(isbn):
    # 目标站点地址
    url = 'http://search.dangdang.com/?key={}&act=input'.format(isbn)
    # print(url)
    # 获取站点str类型的响应
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"}

    resp = requests.get(url, headers=headers)
    html_data = resp.text
    #  将html页面写入本地
    # with open('dangdang.html', 'w', encoding='utf-8') as f:
    #     f.write(html_data)

    # 提取目标站的信息
spider_dangdang('9787115428028')

.eg 读取网站https://movie.douban.com/cinema/later/chongqing/最近上映电影信息,并且根据国家比例做饼图,以及人们想看数做条形统计图

import requests
from lxml import html
import pandas as pd
from matplotlib import pyplot as plt
plt.rcParams["font.sans-serif"] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

def movie():
    movie_list = []
    # 目标站点地址
    url = 'https://movie.douban.com/cinema/later/chongqing/'
    # print(url)
    # 获取站点str类型的响应
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36"}

    resp = requests.get(url, headers=headers)
    html_data = resp.text
    #  将html页面写入本地
    # with open('dangdang.html', 'w', encoding='utf-8') as f:
    #     f.write(html_data)

    # 提取目标站的信息
    selector = html.fromstring(html_data)
    ul_list = selector.xpath('//div[@id="showing-soon"]/div')
    print('您好,共有{}部电影'.format(len(ul_list)))

    # 遍历 ul_list
    for li in ul_list:
        #  电影名称
        title = li.xpath('//div[@id="showing-soon"]/div/div[@class="intro"]/h3/a/text()')
        # print(title)
        #  上映日期
        link = li.xpath('//div[@id="showing-soon"]/div/div[@class="intro"]/ul/li[1]/text()')
        # print(link)
        #  类型
        price = li.xpath('//div[@id="showing-soon"]/div/div[@class="intro"]/ul/li[2]/text()')

        #国家
        country = li.xpath('//div[@id="showing-soon"]/div/div[@class="intro"]/ul/li[3]/text()')
        coNum = country
        # 想看人数
        store = li.xpath('//div[@id="showing-soon"]/div/div[@class="intro"]/ul/li[@class="dt last"]/span/text()')
        want = []
        for i in store:
            # print(i)
            i = int(i.replace('人想看',''))
            want.append(i)

        # 添加每一个电影的信息
    for i in range(22):
        movie_list.append({
            'name':title[i],
            'type':price[i],
            'date':link[i],
            'want':want[i],
            'country':country[i]
        })

    counts = {}
    lk = []
    ll =[]
    print(coNum)
    for word in coNum:
        counts[word] = counts.get(word, 0) + 1
    print(len(counts))
    count = list(counts.items())
    for i in range(len(counts)):
        countrys, cons = count[i]
        cons = int(cons)
        countrys = str(countrys)
        lk.append(countrys)
        ll.append(cons)
        print(countrys)
        print(cons)

    explode = [0.1, 0, 0, 0]
    # colors = ['red', 'purple','blue', 'yellow','gray','green','bl']
    plt.pie(ll, shadow=True, explode=explode, labels=lk, autopct='%1.1f%%')
    plt.legend(loc=2)
    plt.axis('equal')
    plt.show()


    # 按想看人数进行排序
    movie_list.sort(key=lambda x:x['want'] ,reverse=True)

    # # 遍历booklist
    # for movie in movie_list:
    #     print(movie)
    #
    # # 展示最想看的前5个电影 柱状图
    # 电影名称
    top5_movie = [movie_list[i] for i in range(5)]
    # x = []
    # for store in top10_store:
    #     x.append(store['store'])
    x = [x['name'] for x in top5_movie]
    # print(x)
    # 想看人数
    y = [x['want'] for x in top5_movie]
    # print(y)
    # for i in range(5):
    #     print(top5_movie[i])
    plt.barh(x, y)
    plt.show()

    # # 存储成csv文件

    df = pd.DataFrame(movie_list)
    df.to_csv('dangdang.csv')

movie()
  1. 统计国家出现次数,并且制作饼图,饼图以及条形统计图在上一篇文章讲过
    counts = {}
    lk = []
    ll =[]
    print(coNum)
    for word in coNum:
        counts[word] = counts.get(word, 0) + 1
    print(len(counts))
    count = list(counts.items())
    for i in range(len(counts)):
        countrys, cons = count[i]
        cons = int(cons)
        countrys = str(countrys)
        lk.append(countrys)
        ll.append(cons)
        print(countrys)
        print(cons)

    explode = [0.1, 0, 0, 0]
    # colors = ['red', 'purple','blue', 'yellow','gray','green','bl']
    plt.pie(ll, shadow=True, explode=explode, labels=lk, autopct='%1.1f%%')
    plt.legend(loc=2)
    plt.axis('equal')
    plt.show()
  1. 根据想看人数前五进行话条形统计图
 top5_movie = [movie_list[i] for i in range(5)]
    # x = []
    # for store in top10_store:
    #     x.append(store['store'])
    x = [x['name'] for x in top5_movie]
    # print(x)
    # 想看人数
    y = [x['want'] for x in top5_movie]
    # print(y)
    # for i in range(5):
    #     print(top5_movie[i])
    plt.barh(x, y)
    plt.show()

    # # 存储成csv文件

    df = pd.DataFrame(movie_list)
    df.to_csv('dangdang.csv')

你可能感兴趣的:(学习python第四天)