爬虫的一些常用代码模块

下载图片

from bs4 import BeautifulSoup
import requests
URL = "http://www.nationalgeographic.com.cn/animals/"
html = requests.get(URL).text
soup = BeautifulSoup(html, 'lxml')
img_ul = soup.find_all('ul', {
     "class": "img_list"})
for ul in img_ul:
    imgs = ul.find_all('img')
    for img in imgs:
        url = img['src']
        r = requests.get(url, stream=True)
        image_name = url.split('/')[-1]
        with open('./img/%s' % image_name, 'wb') as f:
            for chunk in r.iter_content(chunk_size=128):
                f.write(chunk)
        print('Saved %s' % image_name)

异步加载Asyncio库

import time

def job(t):
    print("Start job",t)
    time.sleep(t)
    print('Job',t,'takes',t,'s')
def main():
    [job(t) for t in range(1,3)]
t1=time.time()
main()
print("NO async total time:",time.time()-t1)

3.0

import requests

URL = 'https://mofanpy.com/'


def normal():
    for i in range(2):
        r = requests.get(URL)
        url = r.url
        print(url)

t1 = time.time()
normal()
print("Normal total time:", time.time()-t1)

"""
https://mofanpy.com/
https://mofanpy.com/
Normal total time: 0.3869960308074951
"""
import asyncio
async def job(t):                   # async 形式的功能
    print('Start job ', t)
    await asyncio.sleep(t)          # 等待 "t" 秒, 期间切换其他任务
    print('Job ', t, ' takes ', t, ' s')

async def main(loop):                       # async 形式的功能
    tasks = [
    loop.create_task(job(t)) for t in range(1, 3)
    ]                                       # 创建任务, 但是不执行
    await asyncio.wait(tasks)               # 执行并等待所有任务完成

t1 = time.time()
loop = asyncio.get_event_loop()             # 建立 loop
loop.run_until_complete(main(loop))         # 执行 loop
loop.close()                                # 关闭 loop
print("Async total time : ", time.time() - t1)

“”"
[‘https://mofanpy.com/’, ‘https://mofanpy.com/’]
Async total time: 0.11447715759277344
“”"

selenium的使用

你可能感兴趣的:(笔记)