python 爬取***网站视频整站Down下来


&

#!/usr/bin/env python
# -*- coding: utf-8 -*-

import requests
from bs4 import BeautifulSoup
import time
import random

#设置代理IP
proxyiplist = [
    {'http':'http://101.68.73.54:53281'},
    {'http':'http://115.223.193.121:9000'},
    {'http':'http://115.223.245.201:9000'},
    {'http':'http://180.118.247.90:9000'},
    {'http':'http://115.223.222.253:9000'}
]


def ***all(url):
    """
    针对url取上面的地址
    """
    proxies = random.sample(proxyiplist, 1)[0]
    response = requests.get(url,proxies=proxies,timeout=10)
    soup = BeautifulSoup(response.text, 'lxml')
    shipinlist = []
    for p in soup.find_all('p', class_='img'):
        shipinlist.append('http://www.***.com' + p.a['href'])
    return shipinlist

def yuan(url):
    """
    资源的源地址
    """
    proxies = random.sample(proxyiplist, 1)[0]
    url = " .".join(url.split(".")[0:3]).replace(' ', '')
    url = url + '-src-1-num-1.html'
    url = url.replace('detail', 'play')
    response = requests.get(url,proxies=proxies,timeout=10)
    soup = BeautifulSoup(response.text, 'lxml')
    try:
        videoUrl = soup.find('div', class_='dz').p.string
        if videoUrl != 'None':
            return videoUrl
    except Exception as e:
       pass


def urllists(leixin):
    proxies = random.sample(proxyiplist, 1)[0]
    userlist=[]
    url = 'http://www.***.com/?m=vod-type-id-%s-pg-500.html' % leixin
    response = requests.get(url,proxies=proxies)
    soup = BeautifulSoup(response.text, 'lxml')
    pagenum = soup.find_all('a', class_="pagelink_b")[-1].text
    for page in range(1, int(pagenum) + 1):
        url = 'http://www.***.com/?m=vod-type-id-%s-pg-%s.html' % (leixin,page)
        userlist.append(url)
    return userlist

if __name__ == '__main__':
    for leixin in range(16,25):
        for url in urllists(leixin):
            for url in ***all(url):
                print yuan(url)
                time.sleep(0.8)
            time.sleep(5)


你可能感兴趣的:(python 爬取***网站视频整站Down下来)