urllib2下载网页的三种方法:
''' Created on 2016-4-14 Python爬虫下载网页的三种方法 @author: developer '''
''' In Python 3.2, urllib2 is renamed urllib.request, and cookielib is renamed http.cookiejar. So you rename it as urllib.request and http.cookijar '''
import urllib.request
import http.cookiejar
print("第一种方法")
url='http://www.baidu.com'
response1 = urllib.request.urlopen(url)
print(response1.getcode())
print(len(response1.read()))
print("第二种方法")
request = urllib.request.Request(url)
request.add_header("User-Agent", "Mozilla/5.0")
response2 = urllib.request.urlopen(request)
print(response2.getcode())
print(len(response2.read()))
''' 网页访问 HTTPCookieProcessor:需要用户登录 ProxyHandler:需要代理 HTTPSHandler:Https加密访问 HTTPRedirectHandler:url存在相互自动跳转的关系 '''
print("第三种方法")
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
urllib.request.install_opener(opener)
response3 = urllib.request.urlopen(url)
print(response3.getcode())
print(cj)
print(response3.read())