Bs4(beautifulsoup4):是一个可以从 HTML 或 XML 文件中提取数据的网页信息提取库。
官方文档
Bs4 与 XPath 区别
XPath:根据路径找数据
Bs4:使用封装好的方法获取数据
安装第三方库
pip install beautifulSoup4
pip install lxml
# 导入
from bs4 import BeautifulSoup
# 创建对象,网页源码,解析器
soup = BeautifulSoup(html_doc, 'lxml')
# 导入库
from bs4 import BeautifulSoup
# 模拟数据
html_doc = """
The Dormouse's story
The Dormouse's story
Once upon a time there were three little sisters; and their names were
Elsie,
Lacie and
Tillie;
and they lived at the bottom of a well.
...
"""
# features 指定解析器
soup = BeautifulSoup(html_doc, features='lxml')
# soup.title:查找的是 title 标签
print(soup.title) # The Dormouse's story
print(type(soup.title)) #
print(soup.p) # The Dormouse's story
print(soup.a) # Elsie
# 标签里面的文本内容
title_tag = soup.title
print(title_tag.string) # The Dormouse's story
print(type(title_tag.string)) #
print(type(soup)) #
html = ''
soup2 = BeautifulSoup(html, "lxml")
print(soup2.b.string) # 好好坚持学习python
print(type(soup2.b.string)) #
# 导入库
from bs4 import BeautifulSoup
# 模拟数据
html_doc = """
The Dormouse's story
The Dormouse's story
Once upon a time there were three little sisters; and the
ir names were
Elsie,
Lacie and
Tillie;
and they lived at the bottom of a well.
...
"""
# features 指定解析器
soup = BeautifulSoup(html_doc, features='lxml')
1)contents:返回的是一个所有子节点的列表
print(soup.head.contents) # [The Dormouse's story ]
2)children:返回的是一个子节点的迭代器
# 通过循环取出迭代器里面的内容
for i in soup.head.children:
print(i) # The Dormouse's story
3)descendants:返回的是一个生成器遍历子子孙孙
for i in soup.head.descendants:
print(i)
# The Dormouse's story
# The Dormouse's story
1)string:获取标签里面的内容
# 只能获取单个
print(soup.title.string) # The Dormouse's story
print(soup.head.string) # The Dormouse's story
print(soup.html.string) # None
2)strings:返回的是一个生成器对象用过来获取多个标签内容
# 返回生成器
print(soup.html.strings) #
# 通过循环取出生成器里面的内容,使用 strings,会出现多个换行符
for i in soup.html.strings:
print(i)
# The Dormouse's story
#
#
#
#
# The Dormouse's story
#
#
# Once upon a time there were three little sisters; and the
# ir names were
#
# Elsie
# ,
#
# Lacie
# and
#
# Tillie
# ;
# and they lived at the bottom of a well.
#
#
# ...
#
#
3)stripped_strings:和 strings 基本一致,但是它可以把多余的空格去掉
print(soup.html.stripped_strings) #
# 通过循环取出生成器里面的内容
for i in soup.html.stripped_strings:
print(i)
# The Dormouse's story
# The Dormouse's story
# Once upon a time there were three little sisters; and the
# ir names were
# Elsie
# ,
# Lacie
# and
# Tillie
# ;
# and they lived at the bottom of a well.
# ...
1)parent:直接获得父节点
print(soup.title.parent) # The Dormouse's story
# 在 Bs4 中,html的父节点是 BeautifulSoup 对象
print(soup.html.parent)
# The Dormouse's story
#
# The Dormouse's story
# Once upon a time there were three little sisters; and the
# ir names were
# Elsie,
# Lacie and
# Tillie;
# and they lived at the bottom of a well.
# ...
#
print(type(soup.html.parent)) #
2)parents:获取所有的父节点
# parents:获取所有的父节点
print(soup.a.parents) #
for i in soup.a.parents:
print(i.name, soup.name)
# p [document]
# body [document]
# html [document]
# [document] [document]
# 导入库
from bs4 import BeautifulSoup
# 模拟数据
html_doc = '''
bbbccc ddd
'''
# features 指定解析器
soup = BeautifulSoup(html_doc, features='lxml')
1)next_sibling:下一个兄弟节点
# 紧挨着的
print(soup.b.next_sibling) # ccc
2)previous_sibling:上一个兄弟节点
print(soup.c.previous_sibling) # bbb
3)next_siblings:下一个所有兄弟节点
for i in soup.b.next_siblings:
print(i)
# ccc
# ddd
4)previous_siblings:上一个所有兄弟节点
for i in soup.d.previous_siblings:
print(i)
# ccc
# bbb
# 导入库
from bs4 import BeautifulSoup
# 模拟数据
html_doc = """
职位名称
职位类别
人数
地点
发布时间
22989-金融云区块链高级研发工程师(深圳)
技术类
1
深圳
2017-11-25
22989-金融云高级后台开发
技术类
2
深圳
2017-11-25
SNG16-腾讯音乐运营开发工程师(深圳)
技术类
2
深圳
2017-11-25
SNG16-腾讯音乐业务运维工程师(深圳)
技术类
1
深圳
2017-11-25
TEG03-高级研发工程师(深圳)
技术类
1
深圳
2017-11-24
TEG03-高级图像算法研发工程师(深圳)
技术类
1
深圳
2017-11-24
TEG11-高级AI开发工程师(深圳)
技术类
4
深圳
2017-11-24
15851-后台开发工程师
技术类
1
深圳
2017-11-24
15851-后台开发工程师
技术类
1
深圳
2017-11-24
SNG11-高级业务运维工程师(深圳)
技术类
1
深圳
2017-11-24
"""
# features 指定解析器
soup = BeautifulSoup(html_doc, features='lxml')
1、find_all()
以列表形式返回所有的搜索到的标签数据
2、find()
返回搜索到的第一条数据
# 1、获取第一个 tr 标签
tr = soup.find('tr') # 默认查找一个
print(tr)
# 2、获取所有的 tr 标签
trs = soup.find_all('tr') # 返回列表,每一组 tr 存放在列表
print(trs)
# 3、获取第二个 tr 标签
tr2 = soup.find_all('tr')[1] # 返回列表,取下标即可
print(tr2)
# 4、获取 class="odd" 的标签
# 方法一
odd = soup.find_all('tr', class_='odd')
for tr in odd:
print(tr)
# 方法二
odd2 = soup.find_all('tr', attrs={'class': 'odd'})
for tr in odd2:
print(tr)
# 5、获取所有 a 标签里面的 href 属性值
lst = soup.find_all('a')
for a in lst:
print(a['href'])
# 6、获取所有的岗位信息
lst_data = soup.find_all('a')
for a in lst_data:
print(a.string)
# 导入库
from bs4 import BeautifulSoup
# 模拟数据
html_doc = """
睡鼠的故事
睡鼠的故事
从前有三个小姐妹;他们的名字是
Elsie、
Lacie 和
Tillie;
他们住在井底。
...
"""
# features 指定解析器
soup = BeautifulSoup(html_doc, features='lxml')
相关语法:http://www.w3cmap.com/cssref/css-selectors.html
# 获取 class 为 sister 的数据
print(soup.select('.sister'))
# 获取 id 为 link1 的数据
print(soup.select('#link1'))
# 获取 title 标签里面的文本内容
print(soup.select('title')[0].string)
# 获取 p 标签下的 a 标签
print(soup.select('p a'))
目标网站:http://www.weather.com.cn/textFC/hb.shtml
需求:获取全国的天气,包括城市名称和最低温度,并将数据保存到 csv 文件当中
# 导入
import requests
from bs4 import BeautifulSoup
import csv
# 表格数据
lst = []
# 获取网页源码
def get_html(url):
# 发请求
html = requests.get(url)
# 发现乱码,处理编码
html.encoding = 'utf-8'
# 得到网页源码
html = html.text
# 返回到函数调用处
return html
# 解析网页数据
def parse_html(html):
# 创建对象
soup = BeautifulSoup(html,'html5lib')
# 解析
conMidtab = soup.find('div', class_='conMidtab')
# print(conMidtab)
tables = conMidtab.find_all('table')
for table in tables:
trs = table.find_all('tr')[2:]
for index, tr in enumerate(trs):
dic = {}
# 拿到对应的标签
if index == 0: # 判断是否是第一个城市
# 第一个城市
city_td = tr.find_all('td')[1]
else:
# 其他城市
city_td = tr.find_all('td')[0]
temp_td = tr.find_all('td')[-2]
# print(city_td,temp_td)
# 对应的标签里面拿文本内容
dic['city'] = list(city_td.stripped_strings)[0]
dic['temp'] = temp_td.string
lst.append(dic)
# 保存数据
def save_data():
# 规定表头
head = ('city','temp')
# csv 文件写入
with open('weather.csv','w',encoding='utf-8-sig',newline='') as f:
# 创建 csv 对象
writer = csv.DictWriter(f, fieldnames=head)
# 写入表头
writer.writeheader()
# 写入数据
writer.writerows(lst)
# 获取不同地区 url
def area(link):
# 获取网页源码
link = get_html(link)
# 创建对象
soup = BeautifulSoup(link, 'html5lib')
# 解析
conMidtab = soup.find('ul', class_='lq_contentboxTab2')
# 找到 a 链接
tagas = conMidtab.find_all('a')
# url 列表
hrefs = []
# 循环获取 url
for i in tagas:
hrefs.append('http://www.weather.com.cn' + i.get('href'))
# 打印 url 列表
# print(hrefs)
# 返回函数值
return hrefs
# 处理主逻辑
def main():
# 确定 url
link = 'http://www.weather.com.cn/textFC/hb.shtml'
# 不同地区 url
lst = area(link)
# print(lst)
for i in lst:
url = i
# 获取网页源码
html = get_html(url)
# 数据解析
parse_html(html)
# 保存内容
save_data()
# 运行主程序
main()
记录学习过程,欢迎讨论交流,尊重原创,转载请注明出处~