python urllib2的使用案例

使用urllib2,太强大了

试了下用代理登陆拉取cookie,跳转抓图片......

文档:http://docs.python.org/library/urllib2.html

 

直接上demo代码了

包括:直接拉取,使用Reuqest(post/get),使用代理,cookie,跳转处理

    #!/usr/bin/python 
    # -*- coding:utf-8 -*- 
    # urllib2_test.py 
    # author: wklken 
    # 2012-03-17 [email protected] 
     
     
    import urllib,urllib2,cookielib,socket 
     
    url = "http://www.testurl....."#change yourself 
    #最简单方式 
    def use_urllib2(): 
      try: 
        f = urllib2.urlopen(url, timeout=5).read() 
      except urllib2.URLError, e: 
        print e.reason 
      print len(f) 
     
    #使用Request 
    def get_request(): 
      #可以设置超时 
      socket.setdefaulttimeout(5) 
      #可以加入参数  [无参数,使用get,以下这种方式,使用post] 
      params = {"wd":"a","b":"2"} 
      #可以加入请求头信息,以便识别 
      i_headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5", 
                 "Accept": "text/plain"} 
      #use post,have some params post to server,if not support ,will throw exception 
      #req = urllib2.Request(url, data=urllib.urlencode(params), headers=i_headers) 
      req = urllib2.Request(url, headers=i_headers) 
     
      #创建request后,还可以进行其他添加,若是key重复,后者生效 
      #request.add_header('Accept','application/json') 
      #可以指定提交方式 
      #request.get_method = lambda: 'PUT' 
      try: 
        page = urllib2.urlopen(req) 
        print len(page.read()) 
        #like get 
        #url_params = urllib.urlencode({"a":"1", "b":"2"}) 
        #final_url = url + "?" + url_params 
        #print final_url 
        #data = urllib2.urlopen(final_url).read() 
        #print "Method:get ", len(data) 
      except urllib2.HTTPError, e: 
        print"Error Code:", e.code 
      except urllib2.URLError, e: 
        print"Error Reason:", e.reason 
     
    def use_proxy(): 
      enable_proxy = False 
      proxy_handler = urllib2.ProxyHandler({"http":"http://proxyurlXXXX.com:8080"}) 
      null_proxy_handler = urllib2.ProxyHandler({}) 
      if enable_proxy: 
        opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler) 
      else: 
        opener = urllib2.build_opener(null_proxy_handler, urllib2.HTTPHandler) 
      #此句设置urllib2的全局opener 
      urllib2.install_opener(opener) 
      content = urllib2.urlopen(url).read() 
      print"proxy len:",len(content) 
     
    class NoExceptionCookieProcesser(urllib2.HTTPCookieProcessor): 
      def http_error_403(self, req, fp, code, msg, hdrs): 
        return fp 
      def http_error_400(self, req, fp, code, msg, hdrs): 
        return fp 
      def http_error_500(self, req, fp, code, msg, hdrs): 
        return fp 
     
    def hand_cookie(): 
      cookie = cookielib.CookieJar() 
      #cookie_handler = urllib2.HTTPCookieProcessor(cookie) 
      #after add error exception handler 
      cookie_handler = NoExceptionCookieProcesser(cookie) 
      opener = urllib2.build_opener(cookie_handler, urllib2.HTTPHandler) 
      url_login = "https://www.yourwebsite/?login" 
      params = {"username":"user","password":"111111"} 
      opener.open(url_login, urllib.urlencode(params)) 
      for item in cookie: 
        print item.name,item.value 
      #urllib2.install_opener(opener) 
      #content = urllib2.urlopen(url).read() 
      #print len(content) 
    #得到重定向 N 次以后最后页面URL 
    def get_request_direct(): 
      import httplib 
      httplib.HTTPConnection.debuglevel = 1 
      request = urllib2.Request("http://www.google.com") 
      request.add_header("Accept", "text/html,*/*") 
      request.add_header("Connection", "Keep-Alive") 
      opener = urllib2.build_opener() 
      f = opener.open(request) 
      print f.url 
      print f.headers.dict 
      print len(f.read()) 
     
    if __name__ == "__main__": 
      use_urllib2() 
      get_request() 
      get_request_direct() 
      use_proxy() 
      hand_cookie() 


递归的抓取需要的数据:

抓取某个网站下图片

可定义 图片保存路径,最小图片大小域值,遍历深度,是否遍历到外站,抓取并下载图片

    #!/usr/bin/python 
    # -*- coding:utf-8 -*- 
    # author: wklken 
    # 2012-03-17 [email protected] 
    #1实现url解析 #2实现图片下载 #3优化重构 
    #4多线程 尚未加入 
     
    import os,sys,urllib,urllib2,urlparse 
    from sgmllib import SGMLParser  
     
    img = [] 
    class URLLister(SGMLParser): 
      def reset(self): 
        SGMLParser.reset(self) 
        self.urls=[] 
        self.imgs=[] 
      def start_a(self, attrs): 
        href = [ v for k,v in attrs if k=="href"and v.startswith("http")] 
        if href: 
          self.urls.extend(href) 
      def start_img(self, attrs): 
        src = [ v for k,v in attrs if k=="src"and v.startswith("http") ] 
        if src: 
          self.imgs.extend(src) 
     
     
    def get_url_of_page(url, if_img = False): 
      urls = [] 
      try: 
        f = urllib2.urlopen(url, timeout=1).read() 
        url_listen = URLLister() 
        url_listen.feed(f) 
        if if_img: 
          urls.extend(url_listen.imgs) 
        else: 
          urls.extend(url_listen.urls) 
      except urllib2.URLError, e: 
        print e.reason 
      return urls 
     
    #递归处理页面 
    def get_page_html(begin_url, depth, ignore_outer, main_site_domain): 
      #若是设置排除外站 过滤之 
      if ignore_outer: 
        ifnot main_site_domain in begin_url: 
          return 
     
      if depth == 1: 
        urls = get_url_of_page(begin_url, True) 
        img.extend(urls) 
      else: 
        urls = get_url_of_page(begin_url) 
        if urls: 
          for url in urls: 
            get_page_html(url, depth-1) 
     
    #下载图片 
    def download_img(save_path, min_size): 
      print"download begin..." 
      for im in img: 
        filename = im.split("/")[-1] 
        dist = os.path.join(save_path, filename) 
        #此方式判断图片的大小太浪费了 
        #if len(urllib2.urlopen(im).read()) < min_size: 
        #  continue 
        #这种方式先拉头部,应该好多了,不用再下载一次 
        connection = urllib2.build_opener().open(urllib2.Request(im)) 
        if int(connection.headers.dict['content-length']) < min_size: 
          continue 
        urllib.urlretrieve(im, dist,None) 
        print"Done: ", filename 
      print"download end..." 
     
    if __name__ == "__main__": 
      #抓取图片首个页面 
      url = "http://www.baidu.com/" 
      #图片保存路径 
      save_path = os.path.abspath("./downlaod") 
      ifnot os.path.exists(save_path): 
        os.mkdir(save_path) 
      #限制图片最小必须大于此域值  单位 B 
      min_size = 92 
      #遍历深度 
      max_depth = 1 
      #是否只遍历目标站内,即存在外站是否忽略 
      ignore_outer = True 
      main_site_domain = urlparse.urlsplit(url).netloc 
     
      get_page_html(url, max_depth, ignore_outer, main_site_domain) 
     
      download_img(save_path, min_size) 

 

 

 

后续可以优化

1.使用多线程优化下载,目前多层遍历不够速度

2.使用BeautifulSoup写一个版本

3.加入图形界面......




你可能感兴趣的:(python urllib2的使用案例)