请求
import requests
url = "https://www.baidu.com"
# 发送get请求
response = requests.get(url)
print(response.text)
类型: str
解码类型:requests模块自动根据HTTP头部对响应的编码作出有根据的推测,推测的文本编码
类型:bytes
解码类型:没有指定
对response.content进行decode,解决中文乱码
response.content,decode()
默认utf-8
import requests
url = "https://www.baidu.com"
# 发送get请求
response = requests.get(url)
response.encoding = 'utf8'
print(response.content)
print(response.content.decode())
import requests
url = "https://www.baidu.com"
# # 发送get请求
response = requests.get(url)
response.encoding = 'utf8'
# 响应url
print(response.url)
# https://www.baidu.com/
# 状态码
print(response.status_code) # 200
# 请求头
print(response.request.headers)
# {'User-Agent': 'python-requests/2.25.1',
# 'Accept-Encoding': 'gzip, deflate', 'Accept': '*/*',
# 'Connection': 'keep-alive'}
# 响应头
print(response.headers)
# {'Cache-Control': 'private, no-cache, no-store,
# proxy-revalidate, no-transform', 'Connection': 'keep-alive',
# 'Content-Encoding': 'gzip', 'Content-Type': 'text/html',
# 'Date': 'Mon, 01 Nov 2021 13:51:59 GMT',
# 'Last-Modified': 'Mon, 23 Jan 2017 13:24:18 GMT',
# 'Pragma': 'no-cache', 'Server': 'bfe/1.0.8.18',
# 'Set-Cookie': 'BDORZ=27315; max-age=86400;
# domain=.baidu.com; path=/', 'Transfer-Encoding': 'chunked'}
# 答应响应设置cookie
print(response.cookies)
# ]>
requests.get(url,headers=headers)
import requests
url = 'https://www.baidu.com'
# 定制请求字典
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Edg/95.0.1020.40'
}
# 发送( 披着羊皮的狼——伪装浏览器
response = requests.get(url,headers= headers)
print(response.request.headers)
import requests
url = 'https://github.com/M1kaelson'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0'
}
temp = ''
temp=temp.encode("utf-8").decode("latin-1")
cookie_list = temp.split(';')
cookies = {
}
for cookie in cookie_list:
# cookies是一个字典,里面的值是
cookies[cookie.split('=')[0]] = cookie.split('=')[-1]
print(cookies)
response = requests.get(url , headers=headers,cookies=cookies,timeout=60)
with open("github_with_cookies_.html","wb")as f:
f.write(response.content)
cookies = {"name":"value"}
import requests
url = ''
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0'
}
temp = ''
cookie_list = temp.split(';')
cookies = {
}
for cookie in cookie_list:
# cookies是一个字典,里面的值是
cookies[cookie.split('=')[0]] = cookie.split('=')[-1]
print(cookies)
response = requests.get(url , headers=headers,cookies=cookies)
with open("github_with_cookies_.html","wb")as f:
f.write(response.content)
response = requests.get (url,timeout=3)
socks只是简单的传递数据包,不关心应用层协议
socks费时比http https少
socks代理可以转发http https请求
response= requests.get(url,proxies=proxies)
# proxies的形式:字典
import requests
url = 'http://www.google.com'
proxies = {
'http':'http://ip:port'
}
response = requests.get(url,proxies=proxies)
print(response.text)
response = request.get(url,verify=False)
response= request.post(url,data)
data
参数接收一个字典# url
# headers
# data字典
# 发送请求 获取响应
# 数据解析
#coding:utf-8
import requests
import json
class King(object):
def __init__( self,word ):
# url
self.url = "http://ifanyi.iciba.com/index.php?c=trans&m=fy&client=6&auth_user=key_ciba&sign=37218aa29f55fdcc"
# headers
self.headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0'
}
# data
self.data ={
"from": "zh",
"to": "en",
"q": word
}
def get_data(self):
response =requests.post(self.url,data=self.data,headers=self.headers)
return response.content.decode('unicode-escape')
def run(self):
response = self.get_data()
print(response)
if __name__ == '__main__':
King = King('字典')
King.run()
# with open("fanyi.html", "wb")as f:
# f.write(Youdao.run())