import os
import sys
import urllib
import urllib.request
from urllib.parse import urlparse
from http.client import InvalidURL
import re
class Retriever:
__slots__ = ("url", 'file')
def __init__(self, url):
self.url, self.file = self.get_file(url)
def get_file(self, url, default='index.html'):
"""
从url中得到可以使用的本地用户名
:param url:
:param default: 默认名设置为index.html
:return: url, 文件路径
"""
parsed = urlparse(url)
host = parsed.netloc.split('@')[-1].split(':')[0]
filepath = '%s%s' % (host, parsed.path)
if not os.path.splitext(parsed.path)[1]:
filepath = os.path.join(filepath, default)
linkdir = os.path.dirname(filepath)
if not os.path.isdir(linkdir):
if os.path.exists(linkdir):
os.unlink(linkdir)
os.makedirs(linkdir)
return url, filepath
def download(self):
"""
下载文件
:return:本地文件名称
"""
try:
retval = urllib.request.urlretrieve(self.url, self.file)
except (IOError, InvalidURL) as e:
retval = (('***ERROR:bad URL "%s":%s' % (self.file, e)),)
return retval
def parse_links(self):
"""
解析获取下载到的文件中的url
:return:url 列表
"""
try:
f = open(self.file, 'r')
data = f.read()
f.close()
list_url = re.findall("https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+", data)
return list_url
except UnicodeDecodeError as e:
print(e)
return []
class Crawler:
count = 0
def __init__(self, url):
self.q = [url]
self.seen = set()
parsed = urlparse(url)
host = parsed.netloc.split('@')[-1].split(':')[0]
self.dom = '.'.join(host.split('.')[-2:])
def get_page(self, url, media=False):
"""
下载页面和解析到的链接,将下载到的页面中满足条件的url放入列表q
:param url: url地址
:param media: 默认为False,忽略媒体文件
:return:
"""
r = Retriever(url)
fname = r.download()[0]
if fname[0] == '*':
print(fname, '...skipping parse')
return
Crawler.count += 1
print('\n(', Crawler.count, ')')
print('URL:', url)
print('FILE', fname)
self.seen.add(url)
ftype = os.path.splitext(fname)[1]
if ftype not in ('.htm', '.html'):
return
for link in r.parse_links():
if link.startswith('mailto:'):
print('discarded, mailto link')
continue
if not media:
if ftype in ('.mp3', '.mp4', '.m4v', '.wav'):
print('...discarded,media file')
continue
if not link.startswith('http://') and not link.startswith('https://'):
link = urlparse(url, link)
print('*', link)
if link not in self.seen:
if self.dom not in link:
print(self.dom, link)
print('...discarded, not in domain')
else:
if link not in self.q:
self.q.append(link)
print('...new, added to Q')
else:
print('..discarded, already in Q')
else:
print('...discarded, already processed')
def go(self, media=False):
"""
取q中的url地址进行处理,直至q为空
:param media:
:return:
"""
while self.q:
url = self.q.pop()
self.get_page(url, media)
def main():
if len(sys.argv) > 1:
url = sys.argv[1]
else:
try:
url = input('Enter starting URL: ')
except (KeyboardInterrupt, EOFError):
url = ''
if not url:
return
if not url.startswith('http://') and not url.startswith('https://') and not url.startswith('ftp://'):
url = 'https://%s' % url
robot = Crawler(url)
robot.go()
if __name__ == "__main__":
main()