urls.txt 文件
https://regex101.com/
https://docs.python.org/3/this-url-will-404.html
https://www.nytimes.com/guides/
https://www.mediamatters.org/
https://1.1.1.1/
https://www.politico.com/tipsheets/morning-money
https://www.bloomberg.com/markets/economics
https://www.ietf.org/rfc/rfc2616.txt
import sys
import aiohttp
import asyncio
import logging
from typing import IO
from aiohttp import ClientSession
logging.basicConfig(
format="%(asctime)s %(levelname)s:%(name)s: %(message)s",
level=logging.DEBUG, # 设置日志级别
stream=sys.stderr,
)
logger = logging.getLogger("areq")
logging.getLogger("chardet.charsetprober").disabled = True
async def fetch_html(url: str, session: ClientSession, **kwargs) -> str: # 获取源码
resp = await session.request(method="GET", url=url, **kwargs)
resp.raise_for_status() # 抛出状态类型异常
logger.info("响应码:[%s] URL: %s", resp.status, url)
html = await resp.text()
return html
async def parse(url: str, session: ClientSession, **kwargs) -> set: # 解析链接
from urllib.parse import urljoin
from urllib.error import URLError
from aiohttp import ClientError
from aiohttp.http_exceptions import HttpProcessingError
found = set()
try:
html = await fetch_html(url=url, session=session, **kwargs)
except (ClientError, HttpProcessingError) as e:
logger.error(
"aiohttp异常 %s [%s]: %s", url,
getattr(e, "status", None), getattr(e, "message", None))
return found
except Exception as e:
logger.exception("发生非aiohttp异常: %s", getattr(e, "__dict__", {}))
return found
else:
import re
HREF_RE = re.compile(r'href="(.*?)"')
# 扫描当前页面的全部链接
for link in HREF_RE.findall(html):
try:
# 拼接链接,适合扫描全站
abslink = urljoin(url, link)
except (URLError, ValueError):
logger.exception("解析错误的链接: %s", link)
pass
else:
# 添加链接
found.add(abslink)
logger.info(f"在 {url}发现 {len(found)}条链接")
# 当前页面的全部链接
return found
async def write_one(file: IO, url: str, **kwargs) -> None:
import aiofiles
res = await parse(url=url, **kwargs)
if not res:
return None # 没有响应数据
async with aiofiles.open(file, "a") as f:
for p in res:
await f.write(f"{url}\t{p}\n")
logger.info("根据资源链接写入结果: %s", url)
async def main(file: IO, urls: set, **kwargs)->None:
async with ClientSession() as session:
tasks = [write_one(file, url, session=session, **kwargs)
for url in urls]
await asyncio.gather(*tasks)
if __name__ == '__main__':
import pathlib
# 判断版本信息大于3.7版本
assert sys.version_info >= (3, 7), "Script requires Python 3.7+."
# 获取当前文件目录路径
here = pathlib.Path(__file__).parent
# joinpath:合并完整的文件夹路径
with open(here.joinpath("urls.txt")) as infile:
# 读取全部数据,形成一个字典
urls = set(map(str.strip, infile))
outpath = here.joinpath("foundurls.txt")
with open(outpath, "w") as outfile:
outfile.write("source_url\tparsed_url\n")
# 运行协程
asyncio.run(main(file=outpath, urls=urls))
输出:
2019-12-16 20:41:28,711 DEBUG:asyncio: Using selector: SelectSelector
2019-12-16 20:41:29,662 INFO:areq: 响应码:[200] URL: https://www.ietf.org/rfc/rfc2616.txt
2019-12-16 20:41:29,746 INFO:areq: 响应码:[200] URL: https://1.1.1.1/
2019-12-16 20:41:30,153 INFO:areq: 在 https://1.1.1.1/发现 13条链接
2019-12-16 20:41:30,164 INFO:areq: 在 https://www.ietf.org/rfc/rfc2616.txt发现 0条链接
2019-12-16 20:41:30,172 INFO:areq: 根据资源链接写入结果: https://1.1.1.1/
2019-12-16 20:41:30,342 INFO:areq: 响应码:[200] URL: https://www.mediamatters.org/
2019-12-16 20:41:30,786 INFO:areq: 响应码:[200] URL: https://regex101.com/
2019-12-16 20:41:30,850 INFO:areq: 在 https://www.mediamatters.org/发现 116条链接
2019-12-16 20:41:30,871 INFO:areq: 根据资源链接写入结果: https://www.mediamatters.org/
2019-12-16 20:41:31,292 INFO:areq: 响应码:[200] URL: https://www.politico.com/tipsheets/morning-money
2019-12-16 20:41:31,367 INFO:areq: 在 https://www.politico.com/tipsheets/morning-money发现 149条链接
2019-12-16 20:41:31,400 INFO:areq: 根据资源链接写入结果: https://www.politico.com/tipsheets/morning-money
2019-12-16 20:41:33,031 INFO:areq: 在 https://regex101.com/发现 24条链接
2019-12-16 20:41:33,036 INFO:areq: 根据资源链接写入结果: https://regex101.com/
2019-12-16 20:41:36,827 ERROR:areq: aiohttp异常 https://docs.python.org/3/this-url-will-404.html [404]: Not Found
2019-12-16 20:41:50,115 ERROR:areq: aiohttp异常 https://www.nytimes.com/guides/ [None]: None
2019-12-16 20:41:50,116 ERROR:areq: aiohttp异常 https://www.bloomberg.com/markets/economics [None]: None
[Finished in 23.9s]