.*?(.*?) ').groups()[0] # .*?表示任意非换行值,()是分组,可用于输出。
#BeautifulSoup
soup.find('table').find('tr', id='places_area__row').find('td', class_="w2p_fw").text
#lxml_css selector
tree.cssselect('table > tr#places_area__row > td.w2p_fw' )[0].text_content()
#lxml_xpath
tree.xpath('//tr[@id="places_area__row"]/td[@class="w2p_fw"]' )[0].text_content()
Chrome 浏览器可以方便的复制出各种表达方式:
有了以上的download函数和不同的表达式,我们就可以用三种不同的方法来抓取数据了。
1.不同方式抓取数据
1.1 正则表达式爬取网页
正则表达式不管在python还是其他语言都有很好的应用,用简单的规定符号来表达不同的字符串组成形式,简洁又高效。学习正则表达式很有必要。https://docs.python.org/3/howto/regex.html。 python内置正则表达式,无需额外安装。
import re
targets = ('area', 'population', 'iso', 'country', 'capital', 'continent',
'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format',
'postal_code_regex', 'languages', 'neighbours')
def re_scraper(html):
results = {}
for target in targets:
results[target] = re.search(r'.*?(.*?) '
% target, html).groups()[0]
return results
1.2BeautifulSoup抓取数据
BeautifulSoup用法可见python 网络爬虫 - BeautifulSoup 爬取网络数据 代码如下:
from bs4 import BeautifulSoup
targets = ('area', 'population', 'iso', 'country', 'capital', 'continent',
'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format',
'postal_code_regex', 'languages', 'neighbours')
def bs_scraper(html):
soup = BeautifulSoup(html, 'html.parser')
results = {}
for target in targets:
results[target] = soup.find('table').find('tr', id='places_%s__row' % target) \
.find('td', class_="w2p_fw").text
return results
1.3 lxml 抓取数据
from lxml.html import fromstring
def lxml_scraper(html):
tree = fromstring(html)
results = {}
for target in targets:
results[target] = tree.cssselect('table > tr#places_%s__row > td.w2p_fw' % target)[0].text_content()
return results
def lxml_xpath_scraper(html):
tree = fromstring(html)
results = {}
for target in targets:
results[target] = tree.xpath('//tr[@id="places_%s__row"]/td[@class="w2p_fw"]' % target)[0].text_content()
return results
1.4 运行结果
scrapers = [('re', re_scraper), ('bs',bs_scraper), ('lxml', lxml_scraper), ('lxml_xpath',lxml_xpath_scraper)]
html = download('http://example.webscraping.com/places/default/view/Australia-14')
for name, scraper in scrapers:
print(name,"=================================================================")
result = scraper(html)
print(result)
==========================================
Downloading: http://example.webscraping.com/places/default/view/Australia-14
re =================================================================
{'area': '7,686,850 square kilometres', 'population': '21,515,754', 'iso': 'AU', 'country': 'Australia', 'capital': 'Canberra', 'continent': 'OC ', 'tld': '.au', 'currency_code': 'AUD', 'currency_name': 'Dollar', 'phone': '61', 'postal_code_format': '####', 'postal_code_regex': '^(\\d{4})$', 'languages': 'en-AU', 'neighbours': ''}
bs =================================================================
{'area': '7,686,850 square kilometres', 'population': '21,515,754', 'iso': 'AU', 'country': 'Australia', 'capital': 'Canberra', 'continent': 'OC', 'tld': '.au', 'currency_code': 'AUD', 'currency_name': 'Dollar', 'phone': '61', 'postal_code_format': '####', 'postal_code_regex': '^(\\d{4})$', 'languages': 'en-AU', 'neighbours': ' '}
lxml =================================================================
{'area': '7,686,850 square kilometres', 'population': '21,515,754', 'iso': 'AU', 'country': 'Australia', 'capital': 'Canberra', 'continent': 'OC', 'tld': '.au', 'currency_code': 'AUD', 'currency_name': 'Dollar', 'phone': '61', 'postal_code_format': '####', 'postal_code_regex': '^(\\d{4})$', 'languages': 'en-AU', 'neighbours': ' '}
lxml_xpath =================================================================
{'area': '7,686,850 square kilometres', 'population': '21,515,754', 'iso': 'AU', 'country': 'Australia', 'capital': 'Canberra', 'continent': 'OC', 'tld': '.au', 'currency_code': 'AUD', 'currency_name': 'Dollar', 'phone': '61', 'postal_code_format': '####', 'postal_code_regex': '^(\\d{4})$', 'languages': 'en-AU', 'neighbours': ' '}
从结果可以看出正则表达式在某些地方返回多余元素,而不是纯粹的文本。这是因为这些地方的网页结构和别的地方不同,因此正则表达式不能完全覆盖一样的内容,如有的地方包含链接和图片。而BeautifulSoup和lxml有专门的提取文本函数,因此不会有类似错误。
既然有三种不同的抓取方式,那有什么区别?应用场合如何?该如何选择呢? ···to be continued···
你可能感兴趣的:(python网络爬虫-爬取网页的三种方式(1))