day03以及作业

我的随笔四

昨日回顾:

一、爬取豆瓣电影TOP250

1、爬取电影页

2、解析提取电影信息

3、保存数据

二、selenium请求库

驱动浏览器往目标网站发送请求,获取响应数据。

--不需要分析复杂的通信流程

--执行js代码

--获取动态数据

三、selenium使用

driver=webdriver.Chrome()打开驱动浏览器

#隐式等待

driver.get(‘网站’)往某个网站发送请求

#显式等待

driver.close()

四、选择器

element:查找一个

elements:查找多个

 

by_id

by_class_name

by_partial_link_text

by_link_text

by_name

by_css_selector

by_tag_name

 

今日内容:

一、selenium剩余内容

二、beautifulSoup4

一、selenium剩余部分

--点击、清除

--Actions Chains

--frame的切换

--执行js代码

- ActionChains
是一个动作链对象,需要把driver驱动传给它。
动作链对象可以操作一系列设定好的动作行为。

- iframe的切换
driver.switch_to.frame('iframeResult')

- 执行js代码
execute_script()

示例1代码:

from selenium import  webdriver# web驱动
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import ActionChains
import  time

driver=webdriver.Chrome()
try:
    driver.implicitly_wait(10)
    driver.get('https://www.jd.com/')
    time.sleep(5)

    #点击、清除
    input=driver.find_element_by_id('key')
    input.send_keys('围城')

    #通过class查找搜索按钮
    search=driver.find_element_by_class_name('button')
    search.click()#点击按钮

    time.sleep(3)

    input2 = driver.find_element_by_id('key')
    input2.clear()

    time.sleep(1)
    input2.send_keys('asdw')
    input2.send_keys(Keys.ENTER)

    time.sleep(10)

finally:
    driver.close()

京东商品信息示例代码

 

import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys


def get_good(driver):
    num = 1
    try:
        time.sleep(5)

        # 下拉滑动5000px
        js_code = '''
            window.scrollTo(0, 5000)
        '''
        driver.execute_script(js_code)

        # 等待5秒,待商品数据加载
        time.sleep(5)
        good_list = driver.find_elements_by_class_name('gl-item')
        for good in good_list:
            # 商品名称
            good_name = good.find_element_by_css_selector('.p-name em').text

            # 商品链接
            good_url = good.find_element_by_css_selector('.p-name a').get_attribute('href')

            # 商品价格
            good_price = good.find_element_by_class_name('p-price').text

            # 商品评价
            good_commit = good.find_element_by_class_name('p-commit').text

            good_content = f'''
            num: {num}
            商品名称: {good_name}
            商品链接: {good_url}
            商品价格: {good_price}
            商品评价: {good_commit}
            \n
            '''
            print(good_content)
            with open('jd.txt', 'a', encoding='utf-8') as f:
                f.write(good_content)
            num += 1

        print('商品信息写入成功!')

        # 找到下一页并点击
        next_tag = driver.find_element_by_class_name('pn-next')
        next_tag.click()

        time.sleep(5)
        # 递归调用函数本身
        get_good(driver)

    finally:
        driver.close()


if __name__ == '__main__':
    driver = webdriver.Chrome()
    try:
        driver.implicitly_wait(10)
        # 往京东发送请求
        driver.get('https://www.jd.com/')
        # 往京东主页输入框输入墨菲定律,按回车键
        input_tag = driver.find_element_by_id('key')
        input_tag.send_keys('墨菲定律')
        input_tag.send_keys(Keys.ENTER)

        # 调用获取商品信息函数
        get_good(driver)

    finally:
        driver.close()

 

 

滑块移动示例代码

from selenium import  webdriver# web驱动
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import ActionChains
import  time

driver=webdriver.Chrome()
try:
    driver.implicitly_wait(10)
    driver.get('https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
    time.sleep(5)
    #遗弃方法
    #driver.switch_to.frame()
    #新方法
    driver.switch_to.frame('iframeResult')
    time.sleep(1)

    #起始方块id:draggable
    source=driver.find_element_by_id("draggable")

    #目标方块id:droppable
    target=driver.find_element_by_id("droppable")

    print(source.size)
    print(source.tag_name)
    print(source.text)
    print(source.location)

    #找到滑动距离
    distance = target.location['x'] - source.location['x']
    #摁住起始滑块
    ActionChains(driver).click_and_hold(source).perform()

    #方式二:一点一点移动
    s=0
    while s<distance:
        #获取动作对象
        #每一次位移s距离
        ActionChains(driver).move_by_offset(xoffset=2,yoffset=0).perform()
        s +=2

        time.sleep(0.1)
    ActionChains(driver).release().perform()
    time.sleep(10)
finally:
    driver.close()

在当前窗口/框架 同步执行js

from selenium import webdriver
import time
driver=webdriver.Chrome()
try:
    driver.implicitly_wait(10)
    driver.get('https://www.baidu.com/')
    driver.execute_script(
        '''
        alert("awdwdxcw")
        '''
    )
    time.sleep(10)
finally:
    driver.close()

 

二 BeautifulSoup4

BS4

1.什么BeautifulSoup?
bs4是一个解析库,可以通过某种(解析器)来帮我们提取想要的数据。

2.为什么要使用bs4?
因为它可以通过简洁的语法快速提取用户想要的数据内容。

3.解析器的分类
- lxml
- html.parser

4.安装与使用

''''''
'''
安装解析器:
pip3 install lxml

安装解析库:
pip3 install bs4
'''
html_doc = """
The Dormouse's story

$37

Once upon a time there were three little sisters; and their names were Elsie, Lacie and Tillie; and they lived at the bottom of a well.

...

""" from bs4 import BeautifulSoup # python自带的解析库 # soup = BeautifulSoup(html_doc, 'html.parser') # 调用bs4得到一个soup对象 soup = BeautifulSoup(html_doc, 'lxml') # bs4对象 print(soup) # bs4类型 print(type(soup)) # 美化功能 html = soup.prettify() print(html)

 


- 遍历文档树

html_doc = """
The Dormouse's story

$37

Once upon a time there were three little sisters; and their names were Elsie, Lacie and Tillie; and they lived at the bottom of a well.

...

""" from bs4 import BeautifulSoup soup = BeautifulSoup(html_doc, 'lxml') # print(soup) # print(type(soup)) # 遍历文档树 # 1、直接使用 ***** print(soup.html) print(type(soup.html)) print(soup.a) print(soup.p) # 2、获取标签的名称 print(soup.a.name) # 3、获取标签的属性 ***** print(soup.a.attrs) # 获取a标签中所有的属性 print(soup.a.attrs['href']) # 4、获取标签的文本内容 ***** print(soup.p.text) # $37 # 5、嵌套选择 print(soup.html.body.p) # 6、子节点、子孙节点 print(soup.p.children) # 返回迭代器对象 print(list(soup.p.children)) # [$37] # 7、父节点、祖先节点 print(soup.b.parent) print(soup.b.parents) print(list(soup.b.parents)) # 8、兄弟节点 (sibling: 兄弟姐妹) print(soup.a) # 获取下一个兄弟节点 print(soup.a.next_sibling) # 获取下一个的所有兄弟节点,返回的是一个生成器 print(soup.a.next_siblings) print(list(soup.a.next_siblings)) # 获取上一个兄弟节点 print(soup.a.previous_sibling) # 获取上一个的所有兄弟节点,返回的是一个生成器 print(list(soup.a.previous_siblings))

- 搜索文档树

 

''''''
'''
find: 找第一个
find_all: 找所有

标签查找与属性查找:
name 属性匹配

    name 标签名
    attrs 属性查找匹配
    text 文本匹配
            
    标签:
        - 字符串过滤器   
            字符串全局匹配
            
        - 正则过滤器
            re模块匹配
            
        - 列表过滤器
            列表内的数据匹配
            
        - bool过滤器
            True匹配
            
        - 方法过滤器
            用于一些要的属性以及不需要的属性查找。
    属性:
        - class_
        - id
'''
html_doc = """
The Dormouse's story

$37

Once upon a time there were three little sisters; and their names wereElsieLacie andTillieand they lived at the bottom of a well.

...

""" from bs4 import BeautifulSoup soup = BeautifulSoup(html_doc, 'lxml') name 标签名 attrs 属性查找匹配 text 文本匹配 find与find_all搜索文档 ''' 字符串过滤器 ''' p = soup.find(name='p') p_s = soup.find_all(name='p') print(p) print(p_s) name + attrs p = soup.find(name='p', attrs={"id": "p"}) print(p) # name + text tag = soup.find(name='title', text="The Dormouse's story") print(tag) # name + attrs + text tag = soup.find(name='a', attrs={"class": "sister"}, text="Elsie") print(tag) ''' - 正则过滤器 re模块匹配 ''' import re # name # 根据re模块匹配带有a的节点 a = soup.find(name=re.compile('a')) print(a) a_s = soup.find_all(name=re.compile('a')) print(a_s) attrs a = soup.find(attrs={"id": re.compile('link')}) print(a) - 列表过滤器 列表内的数据匹配 print(soup.find(name=['a', 'p', 'html', re.compile('a')])) print(soup.find_all(name=['a', 'p', 'html', re.compile('a')])) - bool过滤器 True匹配 print(soup.find(name=True, attrs={"id": True})) - 方法过滤器 用于一些要的属性以及不需要的属性查找。 def have_id_not_class(tag): # print(tag.name) if tag.name == 'p' and tag.has_attr("id") and not tag.has_attr("class"): return tag # print(soup.find_all(name=函数对象)) print(soup.find_all(name=have_id_not_class)) # 补充知识点: # id a = soup.find(id='link2') print(a) # class p = soup.find(class_='sister') print(p)

 

补充知识点:

数据格式:

json数据:
{
"name": "tank"
}

XML数据:
tank

HTML:

生成器: yield 值(把值放进生成器中)
def f():

# return 1
yield 1
yield 2
yield 3

g = f()
print(g)

for line in g:
print(line)

 

作业:

 

from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import requests
import  re
driver=webdriver.Chrome()
try:
    driver.implicitly_wait(10)
    #往京东方式请求
    driver.get('https://www.wandoujia.com/category/6001')
    time.sleep(3)
    #js code='''
    #window
    #'''
    num=1
    app_list = driver.find_elements_by_class_name('card')
    for app in app_list:
        # app名称
        app_name = app.find_element_by_css_selector('.name ').text

        # app链接
        detail_url = app.find_element_by_css_selector('.app-title-h2 a').get_attribute('href')

        # 下载人数
        download_num = app.find_element_by_css_selector('div.meta  span.install-count').text

        # app大小
        app_size =app.find_element_by_xpath('//*[@id="j-tag-list"]/li['+str(num)+']/div[2]/div[1]/span[3]').text

        app_content = f'''
          app名称: {app_name}
          app链接: {detail_url}
          下载人数: {download_num}
          app大小: {app_size}
          \n
          '''
        num += 1
        print(app_content)
        with open('app.txt', 'a', encoding='utf-8') as f:
            f.write(app_content)

    print('app信息写入成功!')

finally:
    driver.close()

 

 

 

结果:

day03以及作业_第1张图片

转载于:https://www.cnblogs.com/yslg/p/11129635.html

你可能感兴趣的:(day03以及作业)