#CrawUnivRankingB.py
import requests
from bs4 import BeautifulSoup
import bs4
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def fillUnivList(ulist, html):
soup = BeautifulSoup(html, "html.parser")
for tr in soup.find('tbody').children:#children迭代找到tbldy标签的孩子
if isinstance(tr, bs4.element.Tag): #排除不是tag类型的内容,比如字符串
tds = tr('td')#在每个tr中查找td,tr('td')等价tr.find_all('td')查找全部td标签内容,返回列表
ulist.append([tds[0].string, tds[1].string, tds[3].string])#前三项就是排名,名称,总分,二位列表中添加一项列表,内含三个元素
def printUnivList(ulist, num):
tplt = "{0:^10}\t{1:{3}^10}\t{2:^10}"#3是槽中的槽,是中文空格字符
print(tplt.format("排名","学校名称","总分",chr(12288)))#12288中文空格编码,被chr转换成字符串
for i in range(num):
u=ulist[i]
print(tplt.format(u[0],u[1],u[2],chr(12288)))
def main():
uinfo = []
url = 'http://www.zuihaodaxue.cn/zuihaodaxuepaiming2018.html'
html = getHTMLText(url)
fillUnivList(uinfo, html)
printUnivList(uinfo, 20) # 20 univs
main()
总结:采用requests-bs4技术路线,实现定向爬虫,对中英文混排输出问题进行了优化。