「Python」解析

xpath

  1. xpath使用
    1. 安装lxml库 pip install lxml -i https://pypi.douban.com/simple
    2. 导入lxml.etree from lxml import etree
    3. 解析本地文件 html_tree=etree.parse('XX.html')
    4. 服务器响应文件 html_tree=etree.HTML(response.read().decode('utf‐8')
    5. html_tree.xpath(xpath路径
  2. xpath基本语法
    1. 路径查询
      1. //:查找所有子孙节点,不考虑层级关系
      2. /:找直接子节点
    2. 谓词查询
      1. //div[@id]
      2. //div[@id=“maincontent”]
    3. 属性查询
      1. //@class
    4. 模糊查询
      1. //div[contains(@id, “he”)]
      2. //div[starts‐with(@id, “he”)]
    5. 内容查询
      1. //div/h1/text()
    6. 逻辑运算
      1. //div[@id=“head” and @class=“s_down”]
      2. //title | //price
"""
xpath

Author:binxin
Date:2023/11/26 11:12
"""
from lxml import etree

# 本地文件
tree = etree.parse('demo1.html')
# 查找ul下的li
# li_list = tree.xpath('//body/ul/li')

# 查找有id属性的li标签
# text()获取标签内容
# li_list = tree.xpath('//ul/li[@id]/text()')

# 找到id为l1的li标签
# li_list = tree.xpath('//ul/li[@id="l1"]/text()')

# 查找id为l1的li标签的class的属性值
# li=tree.xpath('//ul/li[@id="l1"]/@class')

# id中包含l的li标签
# li_list=tree.xpath('//ul/li[contains(@id,"l")]/text()')

# id的值以l开头的li标签
# li_list = tree.xpath('//ul/li[starts-with(@id,"l")]/text()')

# id为l1和class为c1的li标签
# li_list = tree.xpath('//ul/li[@id="l1" and @class="c1"]/text()')

li_list = tree.xpath('//ul/li[@id="l1"]/text() | //ul/li[@id="l2"]/text()')

print(li_list)
# 判断列表长度
print(len(li_list))

# 服务器响应文件 response.read().decode('utf-8')

"""
xpath实践——百度一下

Author:binxin
Date:2023/11/26 19:52
"""
import urllib.request
from lxml import etree

url = 'https://www.baidu.com/'

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36'
}

request = urllib.request.Request(url=url, headers=headers)

response = urllib.request.urlopen(request)

content = response.read().decode('utf-8')

tree = etree.HTML(content)

result = tree.xpath('//input[@id="su"]/@value')[0]

print(result)

"""
下载图片

Author:binxin
Date:2023/11/26 20:06
"""
import urllib.request
from lxml import etree


# https://sc.chinaz.com/tupian/renwutupian.html
# https://sc.chinaz.com/tupian/renwutupian_2.html

def create_request(page):
    base_url = "https://sc.chinaz.com/tupian/renwutupian"
    if page == 1:
        url = base_url + '.html'
    else:
        url = f'{base_url}_{page}.html'

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36'
    }

    request = urllib.request.Request(url=url, headers=headers)

    return request


def get_content(request):
    response = urllib.request.urlopen(request)

    content = response.read().decode('utf-8')

    return content


def down_load(content):
    tree = etree.HTML(content)
    src_list = tree.xpath("//img[@class='lazy']/@data-original")
    name_list = tree.xpath("//img[@class='lazy']/@alt")
    for src, name in zip(src_list, name_list):
        urllib.request.urlretrieve(f'https:{src}', f'./demo3/{name}.jpg')


if __name__ == '__main__':
    start_page = int(input("起始页码:"))
    end_page = int(input("结束页码:"))

    for page in range(start_page, end_page + 1):
        request = create_request(page)

        content = get_content(request)

        down_load(content)

JsonPath

语法参考

"""
jsonpath

Author:binxin
Date:2023/11/27 15:07
"""
import json
import jsonpath

obj = json.load(open('demo4.json', 'r', encoding='utf-8'))

# 书店所有的书
# author_list = jsonpath.jsonpath(obj, '$.store.book[*].author')
# print(author_list)

# 所有的作者
# author_list = jsonpath.jsonpath(obj, '$..author')
# print(author_list)

# store下的所有元素
# tag_list = jsonpath.jsonpath(obj, '$.store.*')
# print(tag_list)

# store下所有的price
# price_list = jsonpath.jsonpath(obj, '$.store..price')
# print(price_list)

# 第三个书
# book_3 = jsonpath.jsonpath(obj, '$.store.book[2')
# print(book_3)

# 最后一本书
# book_end = jsonpath.jsonpath(obj, '$..book[(@.length-1)]')
# print(book_end)

# 前面的两本书
# book_list = jsonpath.jsonpath(obj, '$..book[0,1]')
# book_list = jsonpath.jsonpath(obj, '$..book[:2]')
# print(book_list)

# 过滤出所有的包含isbn的书
# book_list = jsonpath.jsonpath(obj, '$..book[?(@.isbn)]')
# print(book_list)

# 过滤出价格低于10的书
# book_list = jsonpath.jsonpath(obj, '$..book[?(@.price<10)]')
# print(book_list)

# 所有元素
e_list = jsonpath.jsonpath(obj, '$..*')
print(e_list)

"""
jsonpath 解析淘票票

Author:binxin
Date:2023/11/27 15:26
"""
import urllib.request
import json
import jsonpath

url = 'https://dianying.taobao.com/cityAction.json?activityId&_ksTS=1701070258998_104&jsoncallback=jsonp105&action=cityAction&n_s=new&event_submit_doGetAllRegion=true'

headers = {
    # ':authority': 'dianying.taobao.com',
    # ':method': 'GET',
    # ':path': '/cityAction.json?activityId&_ksTS=1701071322520_104&jsoncallback=jsonp105&action=cityAction&n_s=new&event_submit_doGetAllRegion=true',
    # ':scheme': 'https',
    'Accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
    # 'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Bx-V': '2.5.5',
    'Cookie': '_m_h5_tk=5e975e3548c9d6e5d6463f91db59a4d9_1700975872462; _m_h5_tk_enc=7710ed77e01a9cbbd33dbdb7d9c6360e; v=0; _samesite_flag_=true; cookie2=1ad7f9fcf9cdcd8634cb10379c879543; t=27f6a6a05aebe181481642298dfaec21; _tb_token_=5d73533e7793; tfstk=dYHvnyfjjUYDU8BxYZdkbqWZ76Kktxn2mqoCIP4c143-8qK4sscm6NU-AnzD_IutwVgzirMtbOEsfV-4SnRo0myaCeYnWpmq01lw-ef7E98uQRTH-_X_gpeZJAmVTGKRNQYkutMjLoOLunMot1oYDSUOIz6tQY63Mys0PO6_PoBwWvDdhvb3JlfX23CN_SZyCgciV; l=fBEg8NVPPJmjWMJhBO5IFurza779nIRb4PVzaNbMiIEGa6I5tFGDCNCT2DwkSdtjgTCxVeKyMAhYGdLHR3AgCc0c07kqm0S-3xvtaQtJe; isg=BJ-foZ6wBjK0UgIolxPukyW5LvMpBPOmsIAYtDHsuc6VwL9CONQn9iWeglC-3cse',
    'Referer': 'https://dianying.taobao.com/',
    'Sec-Ch-Ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
    'Sec-Ch-Ua-Mobile': '?0',
    'Sec-Ch-Ua-Platform': '"Windows"',
    'Sec-Fetch-Dest': 'empty',
    'Sec-Fetch-Mode': 'cors',
    'Sec-Fetch-Site': 'same-origin',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest',
}

request = urllib.request.Request(url=url, headers=headers)

response = urllib.request.urlopen(request)

content = response.read().decode('utf-8')

content = content.split('(')[1].split(')')[0]

with open('demo5.json', 'w', encoding='utf-8') as fp:
    fp.write(content)


obj=json.load(open('demo5.json','r',encoding='utf-8'))

city_list=jsonpath.jsonpath(obj,'$..regionName')

print(city_list)

BeautifulSoup

  1. 导入 from bs4 import BeautifulSoup
  2. 创建对象
    1. 服务器响应的文件生成对象 soup = Beautifulsoup(response.read().decode(),'lxml')
    2. 本地文件生成对象 soup = Beautifulsoup(open('1.html'),'lxml')

默认打开文件的编码格式gbk所以需要指定打开编码格式

  1. 节点定位
    1. 根据标签名查找节点
      1. soup.a:只能找到第一个a
      2. soup.a.name
      3. soup.a.attrs:获取标签的属性和属性值
    2. 函数
      1. find:返回一个对象
        1. find(‘a’):只找到第一个a标签
        2. find(‘a’,title=‘名字’)
        3. find(‘a’,class_='名字"):class需要添加下划线
      2. find_all:返回一个列表
        1. find_all(‘a’):查找到所有的a
        2. find_all([‘a’,‘span’]):返回所有的a和span
        3. find_all(‘a’,limit=2):只找前两个a
      3. select:根据选择器得到节点对象
        1. element
          1. p
        2. .class
          1. firstname
        3. #id
          1. #firstname
        4. 属性选择器
          1. [attribute]
            1. li = soup.select(‘li[class]’)
          2. [attribute=value]
            1. li = soup.select(‘li[class=“hengheng1”]’)
        5. 层级选择器
          1. element element
            1. div p
          2. element > element
            1. div > p
          3. element,element
            1. div,p
  2. 节点信息
    1. 获取节点内容:适用于标签中嵌套标签的结构
      1. obj.string
      2. obj.get_text()
    2. 节点的属性
      1. tag.name:获取标签名
      tag=find('li')
      print(tag.name)
      
      1. tag.attrs:将属性值作为一个字典返回
    3. 获取节点属性
      1. obj.attrs.get(‘title’)
      2. obj.get(‘title’)
      3. obj[‘title’]
"""
bs4基本使用

Author:binxin
Date:2023/11/27 18:23
"""
from bs4 import BeautifulSoup

# 解析本地文件
# 默认打开文件的编码格式为gbk
soup = BeautifulSoup(open('demo6.html', encoding='utf-8'), 'lxml')

# 根据标签名查找节点
# 找到的是第一个符合条件的数据
# print(soup.a)
# 获取标签的属性和属性值
# print(soup.a.attrs)

# bs4的函数
# find
# 返回第一个符合条件的数据
# print(soup.find('a'))

# 根据title的值来找到对应的标签对象
# print(soup.find('a',title='a2'))

# 根据class的值来找到对应的标签对象 注意的是class需要添加下划线
# print(soup.find('a', class_="a1"))

# find_all
# 返回所有a标签,返回的是一个列表
# print(soup.find_all('a'))

# 返回所有的a和span
# print(soup.find_all(['a', 'span']))

# 查找前几个数据
# print(soup.find_all('li',limit=2))

# select
# 返回一个列表,多个数据
# print(soup.select('a'))

# 通过.代表class
# print(soup.select('.a2'))

# print(soup.select('#l1'))

# 属性选择器
# li标签中包含id标签
# print(soup.select('li[id]'))

# 查找li标签中id=l2
# print(soup.select('li[id="l2"]'))

# 层级选择器
# 后代选择器
# div下的li
# print(soup.select('div li'))

# 子代选择器 第一级子标签
# print(soup.select('div > ul > li'))

# 找到a标签和li标签
# print(soup.select('a,li'))

# 节点信息
# 获取节点内容
# obj = soup.select('#d1')[0]
# 如果标签对象中,除了内容还有标签,那么string获取不到内容,但是get_text()可以
# print(obj.get_text())
# print(obj.string)

# 节点属性
obj = soup.select('#p1')[0]
# name是标签名
# print(obj.name)
# 将属性值作为一个字典返回
# print(obj.attrs)

# 获取节点属性
obj = soup.select('#p1')[0]

print(obj.attrs.get('class'))
print(obj.get('class'))
print(obj['class'])

"""
bs4解析

Author:binxin
Date:2023/11/28 10:37
"""
import urllib.request
from bs4 import BeautifulSoup

url = 'https://ssr1.scrape.center/'

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36'
}

request = urllib.request.Request(url=url, headers=headers)

response = urllib.request.urlopen(request)

content = response.read().decode('utf-8')

soup = BeautifulSoup(content, 'lxml')

# //h2[@class='m-b-sm']/text()
name_list = soup.select('h2[class="m-b-sm"]')

for name in name_list:
    print(name.get_text())

你可能感兴趣的:(python,windows,linux)