find all搜索的是全部节点,find搜索的是满足条件的第一个节点
思路如下
# Python
# 根据 HTML 网页字符串创建 BeautifulSoup 对象
soup = BeautifulSoup(
html_doc, # HTML 文档字符串
'html.parser', # HTML 解析器
from_encoding='utf8') # HTML 文档的编码
# 查找所有标签为 a 的节点
soup.find_all('a')
# 查找所有标签为 a,链接符合 /view/123.html 形式的节点
soup.find_all('a', href='/view/123.htm')
soup.find_all('a', href=re.compile(r'/view/\d+\.htm'))
# 查找所有标签为div,class,为 abc, 文字为 Python 的节点
soup.find_all('div', class_='abc', string='Python')
# 得到节点 Python
# 获取查找到的节点的标签名称
node.name
# 获取查找到的节点的href 属性
node['href']
# 获取查找到的节点的链接文字
node.get_text()
https://www.crummy.com/software/BeautifulSoup/bs4/doc/
html_doc = """
<html><head><title>The Dormouse's storytitle>head>
<body>
<p class="title"><b>The Dormouse's storyb>p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsiea>,
<a href="http://example.com/lacie" class="sister" id="link2">Laciea> and
<a href="http://example.com/tillie" class="sister" id="link3">Tilliea>;
and they lived at the bottom of a well.p>
<p class="story">...p>
"""
运行示例
soup = BeautifulSoup(html_doc, "html.parser", from_encoding='utf8')
print("获取所有的链接")
links = soup.find_all('a')
for link in links:
print(link.name, link['href'], link.get_text())
运行结果
获取所有的链接
a http://example.com/elsie Elsie
a http://example.com/lacie Lacie
a http://example.com/tillie Tillie
获取单一链接数据
print("获取 http://example.com/lacie 的链接")
link_node = soup.find('a', href="http://example.com/lacie")
print(link_node.name, link_node['href'], link_node.get_text())
运行示例
获取 http://example.com/lacie 的链接
a http://example.com/lacie Lacie
使用正则表达式
print("正则表达式")
link_node = soup.find('a', href=re.compile(r'sie'))
print(link_node.name, link_node['href'], link_node.get_text())
运行结果
正则表达式
a http://example.com/elsie Elsie
根据 p 段落 class 的内容
print("根据 p 段落 class 的内容")
# class_ 需要加下划线
p_node = soup.find('p', class_="title")
print(p_node.name, p_node.get_text())
根据 p 段落 class 的内容
p The Dormouse's story
# coding:utf8
from bs4 import BeautifulSoup
import re
html_doc = """
The Dormouse's story
The Dormouse's story
Once upon a time there were three little sisters; and their names were
Elsie,
Lacie and
Tillie;
and they lived at the bottom of a well.
...
"""
soup = BeautifulSoup(html_doc, "html.parser", from_encoding='utf8')
print("获取所有的链接")
links = soup.find_all('a')
for link in links:
print(link.name, link['href'], link.get_text())
print("获取 http://example.com/lacie 的链接")
link_node = soup.find('a', href="http://example.com/lacie")
print(link_node.name, link_node['href'], link_node.get_text())
print("正则表达式")
link_node = soup.find('a', href=re.compile(r'sie'))
print(link_node.name, link_node['href'], link_node.get_text())
print("根据 p 段落 class 的内容")
# class_ 需要加下划线
p_node = soup.find('p', class_="title")
print(p_node.name, p_node.get_text())
运行结果
a http://example.com/lacie Lacie
a http://example.com/tillie Tillie
获取 http://example.com/lacie 的链接
a http://example.com/lacie Lacie
正则表达式
a http://example.com/elsie Elsie
根据 p 段落 class 的内容
p The Dormouse's story