爬虫中requests方法封装post和get原理

from urllib import request,parse
from urllib.error import HTTPError,URLError


#从下面的urlrequest详细封装了post方法函数
def post(url,form=None,headers=None):

    return urlrequests(url,form,headers)
#从下面的urlrequest详细封装了get方法函数
def get(url,headers=None):

    return urlrequests(url,headers=headers)
#定义总方法
def urlrequests(url,form=None,headers=None):
    html_bytes = b''
    #自定义请求名字
    user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
    if not headers:
        headers = {
            'User-Agent':user_agent
        }
    try:
        #判断是否给他传递了form参数
        if form:
        #把form装换成字符串
            form_str = parse.urlencode(form)
            #把其转换成bytes
            form_bytes = form_str.encode('utf-8')
            #调用函数
            response = request.urlopen(url,data=form_bytes)
        else:
            req = request.Request(url,headers=headers)

            response = request.urlopen(req)

        html_bytes = response.read()
    #异常捕获
    except HTTPError as e:
        print(e)
    except URLError as e:
        print(e)
    return html_bytes

if __name__ == '__main__':
    # url = 'http://www.baidu.com'
    url = 'http://fanyi.baidu.com/sug'
    # res = get(url)
    # with open('baidudu.html','wb') as e:
    #     e.write(res)
    form={
        'kw':'你好'
    }
    res = post(url,form)
    print(res)

你可能感兴趣的:(第一个爬虫python脚本)