Python爬虫-bs4遍历文档树-bs4搜索文档树-css选择器

文章目录

        • response属性
        • 请求函数的参数详解
        • BeautifulSoup 遍历文档树
        • BeautifulSoup 搜索文档树
        • CSS选择器
        • bs4 爬取汽车之家新闻

response属性

import requests
url = "https://www.baidu.com/s"

resp = requests.get(url,params={"wd":"egon"},headers={"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3538.77 Safari/537.36"})

# print(resp.text)
# print(resp.content)
# print(resp.status_code)
# print(resp.url) # 当前地址
# print(resp.cookies) #获取返回的cookies信息
# print(resp.cookies.get_dict()) #获取返回的cookies信息
# print(resp.json()) # 将结果进行反序列化
# print(resp.request) #请求方式
# print(resp.apparent_encoding) # 从文档中获取编码
# print(resp.encoding)
# print(resp.headers) # 查看响应头
print(resp.history) # 重定向历史


resp = requests.get(url,
                    params={"wd":"egon"},
                    headers={"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3538.77 Safari/537.36"},
                    stream=True)
# 以流的方式读取原始数据,没有经过HTTP协议解析的数据,一般不常用
print(resp.raw.read(100))

# 当文档的编码方式response解码方式不同时,需要手动指定
resp2 = requests.get("http://www.autohome.com/news")
# 以文档声明的方式来解码
resp2.encoding = resp2.apparent_encoding
print(resp2.text)

请求函数的参数详解

import requests
# 给服务器传参数
requests.get('url',params={'key':'value'})

# post请求 data和json都能传参数
requests.post("url",data={"name":"jerry","pwd":"123"},json={"name":"jerry","pwd":"123"})
# data 拼接为:name=jerry&pwd=123
# json 直接序列化成字符串 {"name":"jerry","pwd":"123"}

# 超时时间 第一个表示连接超时时间,2表示响4应超时时间
requests.post("https://www.baidu.com",timeout=(2,2))

# 代理池,对于爬虫是相当重要的参数
ps = ["121.228.240.101:9999","121.228.240.101:9999","121.228.240.101:9999","121.228.240.101:9999"]
import random
# 使用代理服务器请求
resp = requests.post("http://news.baidu.com/?tn=news",proxies={"HTTP":random.choice(ps)})
with open('new_baidu.html','wb') as f:
    f.write(resp.content)

# 上传文件
f = open(r"D:\aa.png","rb")

# 接收一个字典,key是服务器用于提取文件的字段名,f是要上传的文件对象
resp = requests.post("http://httpbin.org/post",files={"img":f})
print(resp.status_code)

BeautifulSoup 遍历文档树

from bs4 import BeautifulSoup

# 要解析的文档内容
html_doc = """

The Dormouse's story

hhhh Once upon a time there were three little sisters; and their names were Elsie, Lacie and Tillie; and they lived at the bottom of a well.

...

"""
# 第一个参数为要解析的文档数据 soup = BeautifulSoup(html_doc,'lxml') # tag = soup.body # print(type(tag)) # print(tag.name) # print(tag.text) # print(tag.attrs) # 使用点语法查找标签,只能找到第一个名字匹配的标签 # tag = soup.a # print(tag.attrs.get('href')) # 嵌套选择 # print(soup.p.a.text) # 获取子节点 # print(list(soup.p.children)) # 返回一个迭代器 # for i in soup.head.children: # print(i) # print(soup.p.contents) # 返回一个列表 # for i in soup.head.contents: # print(i) # 获取父标签 # print(soup.p.parent) # 获取所有的父辈标签 # print(list(soup.p.parents)) # for i in soup.p.parents: # print(i.name) # print(list(soup.p.descendants)) # 获取所有子孙标签,会把所有子孙全部拆出来 包括文本内容 # for i in soup.p.descendants: # print(i) # 获取兄弟标签,文本也被当做是一个节点 # 下一个兄弟 # print(soup.a.next_sibling.next_sibling) # 之后的兄弟们 # print(list(soup.a.next_siblings)) # 上一个兄弟 # print(soup.a.previous_sibling) # 之前的兄弟们 # print(list(soup.a.previous_siblings))

BeautifulSoup 搜索文档树

from bs4 import BeautifulSoup
import re

# 要解析的文档内容
html_doc = """

The Dormouse's story

CSS选择器

from bs4 import BeautifulSoup

# 要解析的文档内容
html_doc = """

The Dormouse's story

bs4 爬取汽车之家新闻

import requests
from bs4 import BeautifulSoup

url = "https://www.autohome.com.cn/news/{page}/"

# 过滤标签
def filter(tag):
    return tag.name =='li' and tag.has_attr("data-artidanchor")

# 获取新闻列表
def get_list_paget(url):
    print(url)
    resp = requests.get(url, headers={
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"})
    soup = BeautifulSoup(resp.text,'lxml')
    lis = soup.find_all(filter)
    for t in lis:
        print('https:'+t.a.attrs.get('href'))
        print('https:'+t.img.attrs.get('src'))
        print(t.h3.text)
        print(t.span.text)
        print(t.em.text)
        print(t.p.text)

get_list_paget(url.format(page=1))

你可能感兴趣的:(Python爬虫)