python爬取妹子图全部图片

摘要: 利用python, requests, BeautifulSoup爬取妹子图所有图片

# -*- coding: utf-8 -*-

'''
python 3.5.2
'''

# 导入模块
import time
import requests, re, random, os
from bs4 import BeautifulSoup

def ip_test(ip, url_for_test='https://www.baidu.com', set_timeout=10):
    '''
    检测爬取到的ip地址可否使用,能使用返回True,否则返回False,默认去访问百度测试代理
    :param ip:
    :param url_for_test:
    :param set_timeout:
    :return:
    '''
    try:
        r = requests.get(url_for_test, headers=headers, proxies={'http': ip[0]+':'+ip[1]}, timeout=set_timeout)
        if r.status_code == 200:
            return True
        else:
            return False
    except:
        return False

def scrawl_ip(url, num, url_for_test='https://www.baidu.com'):
    '''
    爬取代理ip地址,代理的url是西祠代理
    :param url:
    :param num:
    :param url_for_test:
    :return:
    '''
    ip_list = []
    for num_page in range(1, num+1):
        url = url + str(num_page)

        response = requests.get(url, headers=headers)
        response.encoding = 'utf-8'
        content = response.text

        pattern = re.compile('.*?alt="Cn" />.*?.*?(.*?).*?(.*?)', re.S)
        items = re.findall(pattern, content)
        for ip in items:
            if ip_test(ip[1], url_for_test):  # 测试爬取到ip是否可用,测试通过则加入ip_list列表之中
                print('测试通过,IP地址为' + str(ip[0]) + ':' + str(ip[1]))
                ip_list.append(ip[0]+':'+ip[1])
        return ip_list

    time.sleep(5)  # 等待5秒爬取下一页

def get_random_ip():    # 随机获取一个IP
    ind = random.randint(0, len(total_ip)-1)
    return total_ip[ind]

def download_img(img_list, img_title):
    '''
    通过scrawl_url函数获得了单个图册里面所有图片的url列表和图册的名字,就可以下载图片了
    此函数的作用下载单个图册里面的所有图片
    接收参数img_list是单个图册里面所有图片的的url,
    如['http://mm.howkuai.com/wp-content/uploads/2017a/02/07/01.jpg',
    'http://mm.howkuai.com/wp-content/uploads/2017a/02/07/02.jpg',...]
    img_title是单个图册的名字,如’香车美女,最完美的黄金搭档‘
    :param img_list:
    :param img_title:
    :return:
    '''

    img_title = format_name(img_title) # 如果图册名字有特殊字符需要处理。不然在windows下保存不了文件夹
    for img_urls in img_list:
        img_url = img_urls.attrs['src'] # 单个图片的url地址
        print(img_url)
        title = img_urls.attrs['alt'] # 单个图片的名字
        print(title)

        try:
            if not os.path.exists(os.path.join(file_path, img_title)):
                os.makedirs(os.path.join(file_path, img_title))
            os.chdir(file_path + '\\' + img_title)

            # 图片保存到本地
            exists = os.path.exists(img_title)
            if not exists:
                try:
                    img_html = requests.get(img_url, headers=headers, stream=True, timeout=20, verify=True)
                    with open(title+".jpg", 'wb') as f:
                        f.write(img_html.content)
                        f.close()
                except:
                    continue
        except:
            continue

def scrawl_list(url_list, proxy_flag=False, try_time=0):
    '''
    此函数的作用是爬取每一页面所有图册的url,一个页面包含10个图册,所有调用一次函数则返回一个包含10个url的列表
    格式如['http://www.meizitu.com/a/list_1_1.html',...]
    :param url_list:
    :param proxy_flag:
    :param try_time:
    :return:
    '''
    if not proxy_flag:  # 不使用代理
        try:
            html = requests.get(url_list, headers=headers,  timeout=10)
            html.encoding = 'gb2312'
            text = html.text

            bsop = BeautifulSoup(text, 'html.parser')

            url_imgs = []
            li_list = bsop.find('ul', {'class': 'wp-list clearfix'}).findAll('li', {'class':'wp-item'})
            for i in li_list:
                url_img = i.find('h3',{'class':'tit'}).find('a').attrs['href']
                url_imgs.append(url_img)
            return url_imgs
        except:
            return scrawl_list(url_list, proxy_flag=True)  # 否则调用自己,使用3次IP代理
    else:   # 使用代理时
        if try_time','!','|']:
        while i in img_title:
            img_title = img_title.strip().replace(i, '')
    return img_title

def get_total_pages(first_url):
    '''
    获取妹子图所有页面
    :param first_url:
    :return:
    '''
    html = requests.get(first_url, headers=headers, timeout=10)
    html.encoding = 'gb2312'
    text = html.text
    bsop = BeautifulSoup(text, 'html.parser')
    lis =bsop.find('div',{'id':'wp_page_numbers'}).find('ul').findAll('li')
    pages = lis[-1].find('a').attrs['href'].split('.')[0].split('_')[-1]
    pages = int(pages)
    return pages


# 妹子图的首页,用来获取总的页数
first_url = 'http://www.meizitu.com/a/list_1_1.html'

# 爬取代理的url地址,选择的是西祠代理
url_ip = "http://www.xicidaili.com/nt/"

# 设定等待时间
set_timeout = 10

# 爬取代理的页数,2表示爬取2页的ip地址
num = 2

# 代理的使用次数
count_time = 5

# 构造headers
UserAgent_List = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
headers = {'User-Agent': random.choice(UserAgent_List),
           'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
           'Accept-Encoding': 'gzip',
           }

# 图片存储路径
file_path = 'E:\selfprogress\programming\project\meizitu'

# 获取总页数
pages = get_total_pages(first_url)

# 爬取IP代理
total_ip = scrawl_ip(url_ip, num)

# 带爬取的url
url_imgss = download_urls(pages)

for i in url_imgss:
    for j in i:
        try:
            with open('url.txt','a') as f:
                f.write(j+"\n")
                f.close()
                print("写入url.txt文件成功")
        except:
            print("写入url.txt文件失败")

for url_imgs in url_imgss:
    for url_img in url_imgs:
        img_list, img_title = scrawl_url(url_img)
        if not img_list:
            continue
        download_img(img_list, img_title)

        time.sleep(5)

你可能感兴趣的:(python爬取妹子图全部图片)