小总结-坑坑

post 的请求参数的转换

- 第一步 导入
import urllib.parse
实例: data = urllib.parse.urlencode().decode(data)  # 编码 + 转为二进制

json解析的使用

- 首先同样的还是要导入 
import json
content_dict = json.loads(content) # json 解析

urllib下的文件存储

urllib.request.urlretrieve(cover_url, 'images1/%s.png'%title)
urllib.request.urlcleanup()
##注意## 要在当前目录创建相应的文件存放地址

正则的正确使用

获取岗位数量

# 使用正则:re
# 正则处理的是:字符串
# re.match()
# re.search()
# re.findall()

# (.*): 贪婪
# + 贪婪
# ? 非贪婪
#  str: "
abc
def
" # re:
(.*)
=> ["abc
def"] # re:
(.*?)
=> ["abc", "def"] #
- 第一步  导入
import re
- 第二步 创建好正则表达式
imgReg1 = '.*'
- 第三步 匹配正则
    img1_com = re.compile(imgReg1)
    img2_com = re.compile(imgReg2, re.S)

    img1_list = img1_com.findall(html)
    img2_list = img2_com.findall(html)

文件操作

    with open('ali.txt', 'a', encoding='utf-8') as fp:

        for job in datas:
            degree = job.get('degree') # 学历
            departmentName = job.get('departmentName') # 部门
            description = job.get('description') # 岗位要求
            firstCategory = job.get('firstCategory') # 类型
            workExperience = job.get('workExperience') # 要求

            job_str = str((degree, departmentName, description, firstCategory, workExperience)) + "\n"
            fp.write(job_str)
            fp.flush()

handeler 和 open

import urllib
from urllib import request

# urlopen: 特殊的打开器opener
# urllib.request.urlopen(url)

# 处理cookie或代理 需要用到自定义打开器

# 处理器对象hander
http = urllib.request.HTTPHandler()  # http处理器
# http = urllib.request.HTTPHandler(debuglevel=1)  # debuglevel=1 调试级别,可以在控制台输出日志
# print(http)

# 创建打开器对象opener
opener = urllib.request.build_opener(http)  # 要传入handler对象

# 设置opener为全局打开器
# 后面的urlopen也会使用opener去打开url
urllib.request.install_opener(opener)


# 打开url
response = opener.open("http://www.baidu.com")
print(response)
print(response.read().decode())

# urlopen()
# response = request.urlopen('http://www.baidu.com')
# print(response.read().decode())

代理IP的使用

import random
import urllib
from urllib import request

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36"
}

# 使用代理
# proxy = {'http': "61.135.155.82:443"}

# 使用ccproxy
proxy = {'http': 'http://user1:[email protected]:808'}
# proxy = {'http': 'http://10.20.154.59:808'}


# IP代理池
proxy_list = [
    {'http': "61.135.155.82:443"},
    {'http': "61.183.233.6:54896"},
    {'https': "218.249.45.162:35586"},
    {'https': "14.118.135.10:808"},
    #  ...
]

# ua池(user-agent池 )
user_agent_list=[
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko",
    "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
    "Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Mobile Safari/537.36"
]


# 设置代理
# proxy = random.choice(proxy_list)  # 从代理池中随机获取一个代理ip
# print(proxy)
proxy_handler = request.ProxyHandler(proxies=proxy)
opener = request.build_opener(proxy_handler)


url = "http://www.ifeng.com/"
req = request.Request(url, headers=headers)
req.add_header("User-Agent", random.choice(user_agent_list))  # 从ua池中随机获取一个ua

res = opener.open(req)

print(res.read().decode())

你可能感兴趣的:(小总结-坑坑)