网络爬虫2
def get_html(urls):
import urllib.request as ur
try:
page = ur.urlopen(urls)
return page.read().decode('utf-8-sig')
except:
return ""
def get_url(html_page):
start_position = html_page.find('a href="')
if start_position == -1:
return None, 0
start_position += 8
end_position = html_page.find('"', start_position)
return html_page[start_position:end_position], end_position
def get_all_url(seed):
html_page =get_html(seed)
useful_links = []
while True:
a_url, end = get_url(html_page)
if a_url:
if a_url not in useful_links and is_useful(a_url):
useful_links.append(a_url)
html_page = html_page[end + 1:]
else:
break
useful_links.sort()
return useful_links
def is_useful(a_url):
import re
if re.match(r"/\d+.\d+.+\.html", a_url):
return True
else:
return False
def get_content(html_page):
start_flag = html_page.find("readx()")
# print(start_flag)
if start_flag < 0:
return "获取章节失败"
end_flag = html_page.find("read3()")
content_page = html_page[start_flag:end_flag]
start_flag = content_page.find("")
if start_flag < 0:
return "获取章节失败"
else:
start_flag += 8
end_flag = content_page.find("
现在比之一来说代码的可读性和移植性都很好,但是不知道为什么执行的效率很慢
求解