Python网络爬虫《三》

案例分析:

案例1:京东商品页面爬取

#/usr/bin/env.python
import requests
def getHTMLText(url):
    try:
        r = requests.get(url,timeout=30)
        print r.status_code
        r.raise_for_status()
        r.encoding=r.apparent_encoding
        return r.text[:1000]
    except:
        return "wrong"
if __name__=="__main__":
    url = "http://item.jd.com/17746216603.html"
    print getHTMLText(url)
案例2:亚马逊商品

当user-agent被拒绝时,修改header信息;

#/usr/bin/env.python
import requests
def getHTML1(url):
    try:
        r = requests.get(url)
        print r.status_code
        print r.request.headers
        r.raise_for_status()
        return r.text[:100]
    except:
        return "Wrong!"

def getHTML2(url):
    try:
        kv={'user-agent':'Mozilla/5.0'}
        r = requests.get(url,headers=kv)
        print r.status_code
        print r.request.headers
        r.raise_for_status()
        r.encoding=r.apparent_encoding
        return r.text[:100]
    except:
        return "Wrong!"
if __name__=="__main__":
    url ="https://www.amazon.cn/gp/product/B01M8L5Z3Y"
    print getHTML1(url)
    #200 {'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, deflate', 'Accept': '*/*', 'User-Agent': 'python-requests/2.11.1'}
    print "-------------------"
    print getHTML2(url)
    #200 {'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, deflate', 'Accept': '*/*', 'user-agent': 'Mozilla/5.0'}
案例三:百度360搜索关键词

百度的关键字接口:http://www.baidu.com/s?wd=keyword

360的关键词接口:http://www.so.com/s?q=keyword

通过关键词搜索:

import requests

def getHTML(url):
    try:
        kv={'wd':'Python'}
        r = requests.get(url,params=kv)
        print r.request.url
        r.raise_for_status()
        return len(r.text)
    except:
        return "Wrong!"
if __name__=="__main__":
    url="http://www.baidu.com"
    print getHTML(url)
#http://www.baidu.com/?wd=Python
#2381
案例四:爬取图片并保存到文件

import requests
import os
def getHTMLphoto(root,url):
    path = root+url.split('/')[-1]
    try:
        if not os.path.exists(root):
            os.mkdir(root)
        if not os.path.exists(path):
            r=requests.get(url)
            with open(path,'wb') as f:
                f.write(r.content)
                f.close
                print "saved!!!"
        else:
            print "not saved!"
    except:
        print "Wrong!"

if __name__=="__main__":
    root = "D://pic//"
    url="https://b-ssl.duitang.com/uploads/item/201501/01/20150101084426_sVcze.thumb.700_0.jpeg"
    getHTMLphoto(root,url)

案例五:IP地址归属地的自动查询

import requests
if __name__=="__main__":
   try:
        url = "http://m.ip138.com/ip.asp?ip="
        r = requests.get(url+'202.204.80.112')
        r.status_code
        r.text[-500:]
        print r.text[-500:]
   except:
        print "Failed!"

你可能感兴趣的:(Python,Python全栈工程师)