爬取今日头条街拍数据,练习ajax数据爬取

今日头条街拍数据:

  • 获取页面:https://www.toutiao.com/search/?keyword=%E8%A1%97%E6%8B%8D的页面中的ajax加载的数据。经过分析页面时数据流的形式展现数据,在浏览器 F12 - Network选项 - XHR中查看到ajax的请求信息,其中 request url为:
    • https://www.toutiao.com/search_content/?offset=40&format=json&keyword=%E8%A1%97%E6%8B%8D&autoload=true&count=20&cur_tab=1&from=search_tab
    • offset 参数代表了数据的分页,数据时 json 类型
  • 解析数据目标:
    • title : 标题名
    • image_list : 图片url列表
  • 存储方式:以标题为文件夹名,将图片下载至该文件夹中。

代码:

import requests
import json
import os
from urllib.request import urljoin
from urllib.parse import urlencode
from hashlib import md5



# 获取单页数据
def getOnePage(offset):
    data = {
        'offset': offset,
        'format': 'json',
        'keyword': '街拍',
        'autoload': 'true',
        'count': '20',
        'cur_tab': '1',
        'from': 'search_tab',
    }
    try:
        url = 'https://www.toutiao.com/search_content/?' + urlencode(data)
        response = requests.get(url)
        if response.status_code == requests.codes.ok:
            return response.json()
    except Exception as f:
        print(f)
        return None
    
# 解析单页数据
def parseOnePage(content):
    if content.get('data'):
        for item in content.get('data'):
            title = item.get('title')
            if title is not None:
                imgList = item.get('image_list')
                for imgUrl in imgList:
                    yield {
                        'title': title,
                        'imgUrl': 'http:'+imgUrl.get('url')
                    }

# 下载图片
def downloadImg(item):
    if not os.path.exists(item.get('title')):
        os.mkdir(item.get('title'))
    try:
        response = requests.get(item.get('imgUrl'))
        print(response.status_code)
        if response.status_code == requests.codes.ok:
            fileName = md5(response.content).hexdigest() + '.jpg'
            filePath = os.path.join(item.get('title'),fileName)
            if not os.path.exists(filePath):
                with open(filePath, 'wb') as f:
                    f.write(response.content)
            else:
                print('file already Download', filePath)
    except Exception as e:
        print('file download failed!', item.get('imgUrl'))
            
def main(offset):
    content = getOnePage(offset)
    for item in parseOnePage(content):
        downloadImg(item)

if __name__ == '__main__':
    # 使用进程池
    from multiprocessing.pool import Pool
    pool = Pool()
    
    GROUP_START = 1
    GROUP_END = 20
    
    groups = ([i*20 for i in range(GROUP_START, GROUP_END+1)])
    pool.map(main, groups)
    pool.close()
    pool.join()
    
    

你可能感兴趣的:(爬取今日头条街拍数据,练习ajax数据爬取)