经常使用百度搜索一些资料,故用Python模拟发送请求。
!/usr/bin/env python # coding=utf8 ##################### name : bd.py author: jaysonzhang date : 2013-08-04 ##################### import urllib2 import string import urllib import re import random #设置多个user_agents,防止百度限制IP user_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0', \ 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0', \ 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533+ \ (KHTML, like Gecko) Element Browser 5.0', \ 'IBM WebExplorer /v0.94', 'Galaxy/1.0 [en] (Mac OS X 10.5.6; U; en)', \ 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', \ 'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14', \ 'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) \ Version/6.0 Mobile/10A5355d Safari/8536.25', \ 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) \ Chrome/28.0.1468.0 Safari/537.36', \ 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)'] #组装url,搜索后返回结果 def baidu_search(keyword,pn): p= {'wd': keyword} #wd 代表关键字 pn 代表页码 cl=3 代表网页搜索 access_url=("http://www.baidu.com/s?"+urllib.urlencode(p)+"&pn={0}&cl=3&rn=10").format(pn) #res=urllib2.urlopen(("http://www.baidu.com/s?"+urllib.urlencode(p)+"&pn={0}&cl=3&rn=10").format(pn)) res=urllib2.urlopen(access_url) html=res.read() return html #查找匹配关键字的所有结果,追加的结果以列表返回 def getList(regex,text): arr = [] res = re.findall(regex, text) if res: for r in res: arr.append(r) return arr #查找匹配关键字的所有结果 def getMatch(regex,text): res = re.findall(regex, text) if res: return res[0] return "" #清除搜索结果中标题多余的html标签 def clearTag(text): p = re.compile(u'<[^>]+>') retval = p.sub("",text) return retval #清除百度搜索后结果列表里存在的换行符和TAB符,以便下面正则表达式不用处理这些字符 def subContent(content): content=re.sub('\n','',content) content=re.sub('\t','',content) return content #开始搜索 def geturl(keyword,totalpage): for page in range(totalpage): print 'Now Page %d Result:' %(page+1) pn=page*10+1 #开始搜索,从第一页开始 html = baidu_search(keyword,pn) content = unicode(html, 'utf-8','ignore') #clean \n \t content = subContent(content) #得到左侧搜索结果列表 arrList = getList(u"<table.*?class=\"result\".*?>.*?<\/a>", content) #遍历左侧搜索结果列表 for item in arrList: regex = u"<h3.*?class=\"t\".*?><a.*?href=\"(.*?)\".*?>(.*?)<\/a>" link = getMatch(regex,item) #获取百度返回带加密串的url url = link[0] #获取标题 title = clearTag(link[1]).encode('utf8') #获取搜索结果的URL真实地址 try: domain=urllib2.Request(url) r=random.randint(0,11) domain.add_header('User-agent', user_agents[r]) domain.add_header('connection','keep-alive') response=urllib2.urlopen(domain) uri=response.geturl() print "[%s]---->[%s]" %(title,uri) except: continue if __name__=='__main__': #输入要显示的总页数 totalpage=int( raw_input('input totalpage :') ) #输入关键字 key=u'%s' %( raw_input('input key word:') ) geturl(key,totalpage)