urlopen(url, data, timeout)
第一个参数url即为URL,第二个参数data是访问URL时要传送的数据,第三个timeout是设置超时时间
import urllib2
response = urllib2.urlopen("http://www.baidu.com")
print response.read()
推荐写法(因为在构造请求时还需要加入好多内容,通过构建一个request,服务器响应请求得到应答)
import urllib2
request = urllib2.Request("http://www.baidu.com")
response = urllib2.urlopen(request)
print response.read()
POST
import urlilb
import urllib2
values = {"username": "[email protected]", "password":"XXX"}
data = urllib.urlencode(values)
url = "https://passport.csdn.net/account/login?from=http://my.csdn.net/my/mycsdn"
request = urllib2.Request(url, data)
response = urllib2.urlopen(request)
print response.read()
GET
import urlilb
import urllib2
values = {"username": "[email protected]", "password":"XXX"}
data = urllib.urlencode(values)
url = "http://passport.csdn.net/account/login"
geturl = url + "?" + data
request = urllib2.Request(geturl, data)
response = urllib2.urlopen(request)
print response.read()
Headers
import urllib
import urllib2
url = "http://www.server.com/login"
user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
values = {"username":"[email protected]", "password":"XXX"}
headers = {"User-Agent": user_agent}
data = urllib.urlencode(values)
request = urllib2.Reqeust(url, data, headers)
response = urllib2.urlopen(request)
page = response.read()
反盗链:对付防盗链,服务器会识别headers中的referer是不是它自己,如果不是,有的服务器不会响应,在headers中加入referer来应付防盗链
headers = {
"User-Agent":Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36,
"Referer":"http://www.zhihu.com/articles"
}
Proxy
加入一个网站它会检测某一段时间某个IP的访问次数,如果访问次数过多,它会禁止你的访问。解决办法:设置一些代理服务器来每隔一段时间换一个代理。
import urllib2
enable_proxy = True
proxy_handler = urllib2.ProxyHandler({"http": "http://some-proxy.com:8080"})
null_proxy_handler = urllib2.ProxyHandler({})
if enable_proxy:
opener = urllib2.build_opener(proxy_handler)
else:
opener = urllib2.build_opener(null_proxy_handler)
urllib2.install_opener(opener)
Timeout
import urllib2
response = urllib2.urlopen("http://www.baidu.com", timeout = 10)
response = urllib2.urlopen("http://www.baidu.com", data, 10)
URLError
import urllib2
request = urllib2.Request("http://www.xxxxx.com")
try:
urllib2.urlopen(request)
except urllib2.URLError, e:
print e.reason
# [Errno 11004] getaddrinfo failed
HTTPError
HTTPError是URLError的子类
import urllib2
request = urllib2.Request("http://blog.csdn.net/cqcre")
try:
urllib2.urlopen(request)
except urllib2.HTTPError, e:
print e.code
print e.reason
import urllib2
request = urllib2.Request("http://blog.csdn.net/cqcre")
try:
urllib2.urlopen(request)
except urllib2.HTTPError, e:
print e.code
except urllib2.URLError, e:
print e.reason
else:
print "OK"