Python学习笔记-第十五天

Python学习笔记-第十五天_第1张图片
Python爬虫学习大纲.png
** demo **
#!/usr/bin/python
# -*- coding: UTF-8 -*-

import urllib2

# urlopen(url, data, timeout)
# 第一个参数url即为URL,第二个参数data是访问URL时要传送的数据,第三个timeout是设置超时时间。
response = urllib2.urlopen("http://www.baidu.com")

# response对象有一个read方法,可以返回获取到的网页内容。
print response.read()


# 其实上面的urlopen参数可以传入一个request请求,它其实就是一个Request类的实例,构造时需要传入Url,Data等等的内容。比如上面的两行代码,我们可以这么改写
request = urllib2.Request("http://www.baidu.com")
response = urllib2.urlopen(request)
print response.read()
** get **
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import urllib
import urllib2


# values = {}
# values['username'] = "[email protected]"
# values['password'] = "XXXX"
values = {"username": "[email protected]", "password": "1234124"}
# url编码处理 eg: blank -> %20
data = urllib.urlencode(values)

url = 'http://passport.csdn.net/account/login?'+data
request = urllib2.Request(url='http://www.baidu.com')
response = urllib2.urlopen(request)
# print response.read()

** post **
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import urllib
import urllib2


# values = {}
# values['username'] = "[email protected]"
# values['password'] = "XXXX"
values = {"username": "[email protected]", "password": "1234124"}
data = urllib.urlencode(values)

url = 'https://passport.csdn.net/account/login?from=http://my.csdn.net/my/mycsdn'
request = urllib2.Request(url='http://www.baidu.com', data=None)
response = urllib2.urlopen(request)
print response.read()

**head**
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import urllib
import urllib2

url = 'http://www.baidu.com'
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
values = {'username': 'cqc',  'password': 'XXXX'}
headers = {'User-Agent': user_agent}
data = urllib.urlencode(values)
request = urllib2.Request(url, data, headers)
response = urllib2.urlopen(request)
print response.read()

**proxy**
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import urllib2
enable_proxy = True
proxy_handler = urllib2.ProxyHandler({"http": 'http://some-proxy.com:8080'})
null_proxy_handler = urllib2.ProxyHandler({})
if enable_proxy:
    opener = urllib2.build_opener(proxy_handler)
else:
    opener = urllib2.build_opener(null_proxy_handler)
urllib2.install_opener(opener)

**timeout**
import urllib2
import urllib

values = {'username': 'cqc',  'password': 'XXXX'}
data = urllib.urlencode(values)
response = urllib2.urlopen('http://www.baidu.com', timeout=10)
response = urllib2.urlopen('http://www.baidu.com', data, 10)

**exception**
#!/usr/bin/python
# -*- coding: UTF-8 -*-

import urllib2

request = urllib2.Request('http://www.xxxasjdiofhadsoifhasx.com')
try:
    urllib2.urlopen(request)
except urllib2.URLError, e:
    print e.reason


req = urllib2.Request('http://blog.csdn.net/cqcre')
try:
    urllib2.urlopen(req)
except urllib2.HTTPError, e:
    print e.code
    print e.reason

# 我们知道,HTTPError的父类是URLError,
# 我们知道,HTTPE根据编程经验,父类的异常应当写到子类异常的后面,
# 如果子类捕获不到,那么可以捕获父类的异常,所以上述的代码可以这么改写
req = urllib2.Request('http://blog.csdn.net/cqcre')
try:
    urllib2.urlopen(req)
except urllib2.HTTPError, e:
    print e.code
except urllib2.URLError, e:
    print e.reason
else:
    print "OK"

# 加入属性判断
req = urllib2.Request('http://blog.csdn.net/cqcre')
try:
    urllib2.urlopen(req)
except urllib2.URLError, e:
    if hasattr(e, "reason"):
        print e.reason
else:
    print "OK"

参考资料

你可能感兴趣的:(Python学习笔记-第十五天)