我们在使用爬虫模拟浏览器时候,可以用find_elements以及presence_of_element_located提取相关元素并且进行一些操作
此例子可以实现,B站中,循环点击列表中的每一个按钮
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
driver.get("https://www.bilibili.com/video/av24004208/")
driver.implicitly_wait(20) # 隐式设置20秒,应该可以让全部的元素加载到dom中
num = len(driver.find_elements(by=By.XPATH, value='//*[@class="list-box"]/li'))
# print(num)
for i in range(num):
try:
path = '//*[@class="list-box"]/li['+str(i+1)+']'
driver.find_element(by=By.XPATH, value=path).click()
time.sleep(2)
except Exception as e:
print(e)
finally:
pass
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
driver.get("https://www.bilibili.com/video/av24004208/")
try:
element = WebDriverWait(driver, 10, 0.2).until(
EC.presence_of_element_located((By.XPATH, '//*[@class="list-box"]/li/a[@title="p3"]'))
)
print(element)
element.click()
time.sleep(1)
except Exception as e:
print(e)
finally:
pass