【爬虫】使用requests爬取英雄联盟英雄皮肤

使用requests爬取英雄联盟英雄皮肤

自己做的

import requests

response = requests.get("https://game.gtimg.cn/images/lol/act/img/js/heroList/hero_list.js")
result = response.json()

hero_name = []
for x in result['hero']:
    hero_name.append(x['heroId'])
print(hero_name)

for y in hero_name:
    url = f'https://game.gtimg.cn/images/lol/act/img/js/hero/{y}.js'
    resp = requests.get(url).json()
    # print(resp)
    list = resp['skins']
    for i in list:
        name = i['name']
        if i['mainImg'] != '':
            req = requests.get(i['mainImg'])
            result = req.content
        try:
            with open(f'files\heroes\{name}.jpg', 'wb') as f:
                f.write(result)
        except FileNotFoundError:
            name = name.replace('/','1')
            with open(f'files\heroes\{name}.jpg', 'wb') as f:
                f.write(result)

print("爬取结束")

升级版:每个英雄创建一个文件夹

import requests
import os
# 1.获取所有英雄ID
def get_all_hero_id():
    response = requests.get('https://game.gtimg.cn/images/lol/act/img/js/heroList/hero_list.js')
    result = response.json()
    return [x['heroId'] for x in result['hero']]


# 下载皮肤对应的图片
def download(hero_name, skin_name, skin_url):
    # 创建英雄对应的文件夹
    if not os.path.exists(f'files/{hero_name}'):
        os.mkdir(f'files/{hero_name}')

    # 下载图片
    response = requests.get(skin_url)
    with open(f'files/{hero_name}/{skin_name}.jpg', 'wb') as f:
        f.write(response.content)
    print(f'----------{skin_name}下载完成---------------')


# 2. 获取每个英雄的皮肤地址
def get_hero_skin_url(hero_id):
    url = f'https://game.gtimg.cn/images/lol/act/img/js/hero/{hero_id}.js'
    response = requests.get(url)
    for x in response.json()['skins']:
        hero_name = x['heroTitle']
        skin_name = x['name'].replace('/', '')
        skin_url = x['mainImg'] if x['mainImg'] else x['chromaImg']
        download(hero_name, skin_name, skin_url)


if __name__ == '__main__':
    for x in get_all_hero_id():
        get_hero_skin_url(x)

你可能感兴趣的:(python练习题,爬虫,python,开发语言)