Python爬虫-request的用法

import requests


if __name__ == '__main__':
    #基本用法
    #response = requests.get("http://httpbin.org/get")
    #print(response.text)


    #带参数的get
    #data = {
    #    "name":"wu",
    #    "age":21
    #}

    #response = requests.get("http://httpbin.org/get",params=data)
    #print(response.text)

    #解析json
    #print(response.json)

    #获取二进制信息
    #response = requests.get("http://github.com/favicon.ico")
    #print(response.content)
    #with open("favicon.ico","wb") as f:
    #    f.write(response.content)
    #    f.close()

    #添加headers
    #headers = {
    #    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
    #}
    #response = requests.get("http://www.zhihu.com/explore",headers = headers)
    #print(response.text)


    #post请求
    #data = {
    #    "name":"wu",
    #    "age":21
    #}

    #headers = {
    #    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
    #}

    #response = requests.post("http://www.httpbin.org/post",data = data,headers = headers)

    #状态码判断
    #response = requests.get("http://www.baidu.com")
    #exit() if not response.status_code == requests.codes.ok else print("OK")


    #文件上传
    #...

    #cookie
    #...

    #会话维持(模拟登陆验证)
    #s = requests.Session() #相当于在一个浏览器中
    #s.get("http://httpbin.org/cookies/set/number/123456789")
    #response = s.get("http://httpbin.org/cookies")
    #print(response.text)

    #证书验证(https)
    #response = get("http://www.12306.cn",verify = False)
    #print(response.status_code)
    #指定证书...


    #代理
    #proxies = {
    #    "http":
    #    "https"
    #}

    #response = requests.get("http://www.12306.cn",proxies = proxies)

    #超时设置
    #认证设置

转载于:https://www.cnblogs.com/amojury/p/9127561.html

你可能感兴趣的:(Python爬虫-request的用法)