urllib

urllib.request

urllib.request模块定义函数和类用来打开URLs

urllib.request.urlopen(url, data=None, [timeout, ]*, cafile=None, capath=None, cadefault=False, context=None)

  • url:可以是一个字符串连接,也可以是一个Request对象
  • data:是访问URL时要传送的数据
  • timeout:访问超时设置
#-*-coding:UTF-8-*-
import urllib.request   #导入模块
response = urllib.request.urlopen('https://baidu.com') #打开网页
print(response.read())    #输出内容

构造Request实例进行访问

class urllib.request.Request(url, data=None, headers={}, origin_req_host=None, unverifiable=False, method=None)

#--coding:UTF-8--
import urllib.request
request = urllib.request.Request('https://baidu.com')
response = urllib.request.urlopen(request)
print(response.read())

GET方式传递数据
http://www.guancha.cn/Search/?k=一带一路
这是在观察者网搜索一带一路内容的,浏览器上显示网址

#-*-coding:UTF-8-*-
import urllib.request
values = {}
values['k'] = '一带一路'
data = urllib.parse.urlencode(values)
print(data)   #k=%E4%B8%80%E5%B8%A6%E4%B8%80%E8%B7%AF
url = 'http://www.guancha.cn/Search/?'+data
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
con = open('g.html','wb')
co = response.read();
con.write(co)
con.close()
###
则g.html里的内容为




    
    
    搜索结果页
    
    
    

观察者网-中国关怀 全球视野    
    
    


......

POST请求
传递参数需要用到 前文中data参数 (data must be a bytes object)

import urllib.request,urllib.parse
url = 'http://www.xxx.com'
postdata = urllib.parse.urlencode({
    'name':'diyinqianchang',
    'pass':'88888'
    }).encode('UTF-8')      #注意传输参数是一个bytes
req = urllib.request.Request(url,postdata)
data = urllib.request.urlopen(req).read()
print(data)

设置Headers

import urllib.request,urllib.parse
url = 'http://www.zhihu.com/#signin'
postdata = urllib.parse.urlencode({
    'username':'188\*\*\*\*8091',
    'password':'88888888'
    }).encode('UTF-8')
req = urllib.request.Request(url,postdata)
req.add_header('User_Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36')  #添加Agent
req.add_header('Referer','https://www.zhihu.com/') #添加反盗链
data = urllib.request.urlopen(req).read()
con = open('zhihu.html','wb')
con.write(data)
con.close()
print(data)

另外一种设置方式

headers = {
    'User_Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
data = urllib.parse.urlencode(values)
request = urllib.request.Request(url, data.encode('utf-8'), headers)
#直接利用Request的第三个参数heards = {}

代理服务器的设置

#coding:UTF-8
import urllib.request
"""
If proxies is given, it must be a dictionary mapping protocol names to URLs of proxies
"""
def use_proxy(proxy_addr,url):
    proxy = urllib.request.ProxyHandler({'http':proxy_addr})
    opener = urllib.request.build_opener(proxy)
    urllib.request.install_opener(opener)
    data = urllib.request.urlopen(url).read().decode('utf-8')
    return data
proxy_addr = '119.\*\*.\*\*.60:7777'
data = use_proxy(proxy_addr,'https://www.baidu.com')
print(len(data))
###
227

URLError

#coding:UTF-8

"""
处理网络错的连个类URLError和HTTPError。后者是前者的子类,在抛出异常中,前者能够处理的异常较多
URLError能够处理的异常有:连接补上服务器、远程URL不存在、无网络、HTTPError
"""
import urllib.request
import urllib.error
try:
    urllib.request.urlopen('http://blog.baiduss.net')
except urllib.error.HTTPError as e:
    print(e.code)
    print(e.reason)
except urllib.error.URLError as e:
    print(e.reason)

你可能感兴趣的:(urllib)