小型的图片爬虫——男生必备

通过http://jandan.net/ooxx/ 我们进行美女图片的爬取。男生必备
#_*_ coding:utf-8 _*_

import os
import urllib.request as req


# 进行访问服务器返回对应的数据
def url_open(url):
    requ=req.Request(url)
    requ.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36')
    repo=req.urlopen(url)
    html=repo.read()
    return html

# 得到网站中图片的标示符
def get_page(url):    
    html = url_open(url).decode('utf-8')
    
    a = html.find('current-comment-page') + 23
    b = html.find(']',a)
    
    return html[a:b]

# 查找到网站每个刷新页面图片所在的地址
def find_imgs(page_url):
    html = url_open(page_url).decode('utf-8')
    
    img_addrs = []
    
    a = html.find('img src=')
    
    while a != -1:
        b = html.find('.jpg',a,a+255)
        
        if b != -1:
            img_addrs.append(html[a+9:b+4]) 
        else:
            b=a+9
            
        a = html.find('img src=',b)   
   
    return img_addrs

# 保存图片到本地
def save_img(folder,img_addrs):
    for each in img_addrs:
        each = 'http:' + each
        print(each)
        if each.split('//')[0] !='http:' :
            continue
        filename = each.split('/')[-1]
        with open(filename,'wb') as f:
            img = url_open(each)
            f.write(img)
              
def download_tup(folder='tup',pages=10):
    os.mkdir(folder)
    os.chdir(folder)
    
    url = "http://jandan.net/ooxx/"
    
    page_num = int(get_page(url))
    
    for i in range(pages):
        page_num -= i
        page_url = url + 'page-' + str(page_num) +'#comments'
        img_addrs = find_imgs(page_url)
        save_img(folder,img_addrs)

if __name__ == '__main__':
    download_tup('tup',10)

你可能感兴趣的:(Python)