今日头条街拍图片爬虫

import requests
from urllib.parse import urlencode
from requests import codes
import os     #路径
from hashlib import md5    #摘要算法(哈希)
from multiprocessing.pool import Pool
import re

def get_page(offset):   #获取网页json文本
    params = {
        'aid':'24',
        'app_name':'web_search',
        'offset': offset,
        'format': 'json',
        'keyword': '街拍图片',
        'autoload': 'true',
        'count': '20',
        'en_qc': '1',
        'cur_tab ': '1',
        'from': 'search_tab',
        'pd': 'synthesis'
    }
    base_url = 'https://www.toutiao.com/api/search/content/?'
    url = base_url + urlencode(params)
    headers={
        "user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
        "cookie":"tt_webid=6701616237875398148; WEATHER_CITY=%E5%8C%97%E4%BA%AC; UM_distinctid=16b4b9bc9e970e-02765bc3432b43-e353165-144000-16b4b9bc9ea5ac; tt_webid=6701616237875398148; csrftoken=22502c1f2bb590feb6bd44fd012c9aad; s_v_web_id=21f69859b89c3a163ebef7cba7ba3a4e; __tasessionId=8sspmcj7l1561426743570; CNZZDATA1259612802=551418336-1560339887-%7C1561425289",
        "x-requested-with":"XMLHttpRequest",
        "referer":"https://www.toutiao.com/search/?keyword=%E8%A1%97%E6%8B%8D%E5%9B%BE%E7%89%87"
    }
    try:
        response = requests.get(url=url,headers=headers)
        if 200  == response.status_code:
            return response.json()
    except requests.ConnectionError:
        return None

def get_images(json):   #获取图片来源
    if json.get("data"):
        data = json.get("data")
        for item in data:
            if item.get('title') is None:    #如果x不是空的时候
                continue
            title = item.get('title')
            images = item.get('image_list')
            for image in images:
                #origin_image = re.sub("list", "origin", image.get('url'))
                origin_image = re.sub("list.*?pgc-image", "large/pgc-image", image.get('url'))   #字符串的替换第一个参数是要被替换的部分,第二个参数是将要替换的东西,第三个参数是要替换的字符串
                yield {
                    'image':  origin_image,
                    #'iamge': image.get('url'),
                    'title': title
                }

def save_image(item):   #图片写入
    img_path = 'img' + os.path.sep + item.get('title')  #os.path.sep是‘\’
    if not os.path.exists(img_path):   #判断括号里的文件是否存在
        os.makedirs(img_path)          #创建目录
    try:
        resp = requests.get(item.get('image'))
        if codes.ok == resp.status_code:
            file_path = img_path + os.path.sep + '{file_name}.{file_suffix}'.format(
                file_name=md5(resp.content).hexdigest(),       #文本类型用text(返回Unicode型的数据),图片、文件类型用contexnt(返回二进制)
                file_suffix='jpg')                             #hexdigest()返回摘要,作为十六进制数据字符串值,md5会随机生成一串十六进制字符串
            if not os.path.exists(file_path):                  #如果不存在,写入,存在就不写入
                with open(file_path, 'wb') as f:
                    f.write(resp.content)
                print('Downloaded image path is %s' % file_path)
            else:
                print('Already Downloaded', file_path)
    except requests.ConnectionError:
        print('Failed to Save Image,item %s' % item)

def main(offset):
    json = get_page(offset)
    for item in get_images(json):
        save_image(item)


GROUP_START = 0
GROUP_END = 20

if __name__ == '__main__':
    pool = Pool()     #pool=Pool(5)创建拥有5个进程数量的进程池,默认计算机能力范围
    groups = ([x * 20 for x in range(GROUP_START, GROUP_END + 1)])     #列表解析
    pool.map(main, groups)
    pool.close()    #关闭进程池,不再接受新的进程
    pool.join()     #主进程阻塞等待子进程的退出

你可能感兴趣的:(今日头条街拍图片爬虫)