安装好BeautifulSoup4和Jupyter之后,在cmd中输入jupyter notebook 运行,会直接跳转到网页jupyter编辑器中。
import requests
newsurl = "http://news.sina.com.cn/china/"
res = requests.get(newsurl)
res.encoding = 'utf-8'
print(res.text)
from bs4 import BeautifulSoup
html_sample = ' \
<html> \
<body> \
<h1 id="title">Hello Worldh1> \
<a href="#" class="link"> This is link1a> \
<a href="# link2" class="link"> This is link2a> \
body> \
html> '
soup = BeautifulSoup(html_sample, 'html.parser')
print(soup.text)
from bs4 import BeautifulSoup
html_sample = ' \
\
\
"title">Hello World
\
"#" class="link"> This is link1 \
"# link2" class="link"> This is link2 \
\
'
soup = BeautifulSoup(html_sample, 'html.parser')
header = soup.select('h1')
#print(type(soup))
print(header)
#使用select找出含有‘h1’标签的词
from bs4 import BeautifulSoup
html_sample = ' \
\
\
"title">Hello World
\
"#" class="link"> This is link1 \
"# link2" class="link"> This is link2 \
\
'
soup = BeautifulSoup(html_sample, 'html.parser')
header = soup.select('h1')
#print(type(soup))
print(header)
print(header[0])
print(header[0].text)
from bs4 import BeautifulSoup
html_sample = ' \
\
\
"title">Hello World
\
"#" class="link"> This is link1 \
"# link2" class="link"> This is link2 \
\
'
soup = BeautifulSoup(html_sample, 'html.parser')
alink = soup.select('a')
#print(type(soup))
print(alink)
for link in alink:
print(link)
print(link.text)
from bs4 import BeautifulSoup
html_sample = ' \
\
\
"title">Hello World
\
"#" class="link"> This is link1 \
"# link2" class="link"> This is link2 \
\
'
soup = BeautifulSoup(html_sample, 'html.parser')
alink = soup.select('#title')
print(alink)
for link in soup.select('.link'):
print(link)
alinks = soup.select('a')
for link in alinks:
print(link['href'])
a = '<a href="#" qoo=123, abc=456> I am a linka>'
soup2 = BeautifulSoup(a, 'html.parser')
print(soup2.select('a')[0]['qoo'])
print(soup2.select('a')[0]['abc'])
print(soup2.select('a')[0]['href'])