昨日回顾:
一 爬取豆瓣电音TOP250
1.爬取电影页
2.解析提取电影信息
3.保存数据
二 Selenium请求库
驱动浏览器往目标网站发送请求,获取响应数据。
- 不需要分析复杂的通信流程
- 执行js代码
- 获取动态数据
三 selenium使用
driver = webdriver.Chrome() 打开驱动浏览器
# 隐式等待
driver.get('网站') 往某个网站发送请求
# 显式等待
driver.close()
四 选择器
element: 查找一个
elements: 查找多个
by_id
by_class_name
by_name
by_link_text
by_partial_link_text
by_css_selector
**************************************
今日内容:
一 Selenium剩余部分
获取京东商品信息:
初级版:
import time from selenium import webdriver #web驱动 from selenium.webdriver.common.keys import Keys#键盘按键操作 driver=webdriver.Chrome() try: driver.implicitly_wait(10) driver.get('http://www.jd.com') #点击,清除 input_tag=driver.find_element_by_id('key') input_tag.send_keys('墨菲定律') input_tag.send_keys(Keys.ENTER) time.sleep(5) good_list=driver.find_elements_by_class_name('gl-item') for good in good_list: good_name=good.find_element_by_css_selector('.p-name em').text good_url=good.find_element_by_css_selector('.p-name a').get_attribute('href') good_price=good.find_element_by_class_name('p-price').text good_commit=good.find_element_by_class_name('p-commit').text good_content=f''' 商品名称:{good_name} 商品链接:{good_url} 商品价格:{good_price} 商品评价:{good_commit} \n ''' print(good_content) with open('jd.text','a',encoding='utf-8')as f: f.write(good_content) print('商品已经录入完成!') finally: driver.close()
狂暴版:
import time from selenium import webdriver #web驱动 from selenium.webdriver.common.keys import Keys#键盘按键操作 def get_good(driver): num=1 try: time.sleep(5) js_code=''' window.scrollTo(0,5000) ''' driver.execute_script(js_code) time.sleep(5) good_list=driver.find_elements_by_class_name('gl-item') for good in good_list: # 商品名称 good_name=good.find_element_by_css_selector('.p-name em').text # 商品链接 good_url=good.find_element_by_css_selector('.p-name a').get_attribute('href') # 商品价格 good_price=good.find_element_by_class_name('p-price').text # 商品评价 good_commit=good.find_element_by_class_name('p-commit').text good_content=f''' num:{num} 商品名称:{good_name} 商品链接:{good_url} 商品价格:{good_price} 商品评价:{good_commit} \n ''' print(good_content) with open('jd.text','a',encoding='utf-8')as f: f.write(good_content) num+=1 print('商品信息已经录入完成!') next_tag=driver.find_element_by_class_name('pn-next') next_tag.click() time.sleep(5) get_good(driver) finally: driver.close() if __name__ == '__main__': driver = webdriver.Chrome() try: driver.implicitly_wait(10) driver.get('http://www.jd.com') # 点击,清除 input_tag = driver.find_element_by_id('key') input_tag.send_keys('墨菲定律') input_tag.send_keys(Keys.ENTER) get_good(driver) finally: driver.close()
二 BeautifulSoup4
一 Selenium剩余部分
1.元素交互操作:
- 点击、清除
click
clear
- ActionChains
是一个动作链对象,需要把driver驱动传给它。
动作链对象可以操作一系列设定好的动作行为。
- iframe的切换
driver.switch_to.frame('iframeResult')
- 执行js代码
execute_script()
##点击、清除:
from selenium import webdriver from selenium.webdriver import ActionChains from selenium.webdriver.common.keys import Keys # 键盘按键操作 import time driver = webdriver.Chrome() try: driver.implicitly_wait(10) driver.get('https://www.jd.com/') time.sleep(5) # 点击、清除 input = driver.find_element_by_id('key') input.send_keys('围城') # 通过class查找搜索按钮 search = driver.find_element_by_class_name('button') search.click() # 点击搜索按钮 time.sleep(3) input2 = driver.find_element_by_id('key') input2.clear() # 清空输入框 time.sleep(1) input2.send_keys('墨菲定律') input2.send_keys(Keys.ENTER) time.sleep(10) finally: driver.close()
##ActionChains: 动作链
from selenium import webdriver from selenium.webdriver import ActionChains from selenium.webdriver.common.keys import Keys # 键盘按键操作 import time driver = webdriver.Chrome() try: driver.implicitly_wait(10) driver.get('http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable') time.sleep(5) # 遗弃方法 # driver.switch_to_frame() # 新方法 driver.switch_to.frame('iframeResult') time.sleep(1) # 获取动作链对象 action = ActionChains(driver) # 起始方块id: draggable source = driver.find_element_by_id('draggable') # 目标方块id: droppable target = driver.find_element_by_id('droppable')
方式一: 秒移
# 起始方块瞬间移动到目标方块中 # 拟定好一个动作,需要调用执行方法perform # action.drag_and_drop(source, target).perform() time.sleep(10) finally: driver.close()
方式二: 一点一点移动
print(source.size) # 大小 print(source.tag_name) # 标签名 print(source.text) # 文本 print(source.location) # 坐标: X与Y轴 # 找到滑动距离 distance = target.location['x'] - source.location['x'] # 摁住起始滑块 ActionChains(driver).click_and_hold(source).perform() s = 0 while s < distance: # 获取动作链对象 # 每一次位移s距离 ActionChains(driver).move_by_offset(xoffset=2, yoffset=0).perform() s += 2 time.sleep(0.1) # 松开起始滑块 ActionChains(driver).release().perform() time.sleep(10) finally: driver.close()
##执行js代码:
from selenium import webdriver import time driver = webdriver.Chrome() try: driver.implicitly_wait(10) driver.get('https://www.baidu.com/') driver.execute_script( ''' alert("浙江万里学院,是浙江最牛皮的学院!") ''' ) time.sleep(10) finally: driver.close()
二 BeautifulSoup4
BS4
1.什么BeautifulSoup?
bs4是一个解析库,可以通过某种(解析器)来帮我们提取想要的数据。
2.为什么要使用bs4?
因为它可以通过简洁的语法快速提取用户想要的数据内容。
3.解析器的分类
- lxml
- html.parser
4.安装与使用
安装解析器:
pip3 install lxml
安装解析库:
pip3 install bs4
html_doc = """The Dormouse's story $37
Once upon a time there were three little sisters; and their names were Elsie, Lacie and Tillie; and they lived at the bottom of a well.
...
""" from bs4 import BeautifulSoup # python自带的解析库 # soup = BeautifulSoup(html_doc, 'html.parser') # 调用bs4得到一个soup对象 soup = BeautifulSoup(html_doc, 'lxml') # bs4对象 print(soup) # bs4类型 print(type(soup)) # 美化功能 html = soup.prettify() print(html)
html_doc = """The Dormouse's story $37
Once upon a time there were three little sisters; and their names were Elsie, Lacie and Tillie; and they lived at the bottom of a well.
...
""" from bs4 import BeautifulSoup soup = BeautifulSoup(html_doc, 'lxml')
- 遍历文档树
1、直接使用 *****
print(soup.html) print(type(soup.html)) print(soup.a) print(soup.p)
2、获取标签的名称
print(soup.a.name)
3、获取标签的属性 *****
print(soup.a.attrs) # 获取a标签中所有的属性 print(soup.a.attrs['href'])
4、获取标签的文本内容 *****
print(soup.p.text) # $37
5、嵌套选择
print(soup.html.body.p)
6、子节点、子孙节点
print(soup.p.children) # 返回迭代器对象 print(list(soup.p.children)) # [$37]
7、父节点、祖先节点
print(soup.b.parent) print(soup.b.parents) print(list(soup.b.parents))
8、兄弟节点 (sibling: 兄弟姐妹)
print(soup.a) # 获取下一个兄弟节点 print(soup.a.next_sibling) # 获取下一个的所有兄弟节点,返回的是一个生成器 print(soup.a.next_siblings) print(list(soup.a.next_siblings)) # 获取上一个兄弟节点 print(soup.a.previous_sibling) # 获取上一个的所有兄弟节点,返回的是一个生成器 print(list(soup.a.previous_siblings))
- 搜索文档树
find: 找第一个
find_all: 找所有
标签查找与属性查找:
name 属性匹配
html_doc = """The Dormouse's story $37
Once upon a time there were three little sisters; and their names wereElsieLacie andTillieand they lived at the bottom of a well.
...
""" from bs4 import BeautifulSoup soup = BeautifulSoup(html_doc, 'lxml')
name 标签名
attrs 属性查找匹配
text 文本匹配
find与find_all搜索文档
1、字符串过滤器
p = soup.find(name='p') p_s = soup.find_all(name='p') print(p) print(p_s)
name + attrs
p = soup.find(name='p', attrs={"id": "p"}) print(p)
name + text
tag = soup.find(name='title', text="The Dormouse's story") print(tag)
name + attrs + text
tag = soup.find(name='a', attrs={"class": "sister"}, text="Elsie") print(tag)
2、 正则过滤器
re模块匹配
import re
name
# 根据re模块匹配带有a的节点 a = soup.find(name=re.compile('a')) print(a) a_s = soup.find_all(name=re.compile('a')) print(a_s)
attrs
a = soup.find(attrs={"id": re.compile('link')}) print(a)
3、列表过滤器
列表内的数据匹配
print(soup.find(name=['a', 'p', 'html', re.compile('a')])) print(soup.find_all(name=['a', 'p', 'html', re.compile('a')]))
4、bool过滤器
True匹配
print(soup.find(name=True, attrs={"id": True}))
5、方法过滤器
用于一些要的属性以及不需要的属性查找。
def have_id_not_class(tag): # print(tag.name) if tag.name == 'p' and tag.has_attr("id") and not tag.has_attr("class"): return tag # print(soup.find_all(name=函数对象)) print(soup.find_all(name=have_id_not_class))
补充知识点:
id:
a = soup.find(id='link2') print(a)
class:
p = soup.find(class_='sister') print(p)
数据格式:
json数据:
{ "name": "tank" }
XML数据:
tank
HTML:
生成器: yield 值(把值放进生成器中)
def f(): # return 1 yield 1 yield 2 yield 3 g = f() print(g) for line in g: print(line)
##模拟浏览器的前进后退:
import time from selenium import webdriver browser = webdriver.Chrome() browser.get('https://www.baidu.com') browser.get('https://www.taobao.com') browser.get('http://www.sina.com.cn/')
回退
browser.back()
time.sleep(5)
前进
browser.forward() time.sleep(3) browser.close()