【python爬虫】使用代理IP进行网站爬取

我使用代理IP是为了刷票,公司参加了比赛,投票规则是每IP只能投8票,并没有每天刷新还是永久限制,无奈之下使用代理IP,

代理ip网址

http://www.goubanjia.com/

http://www.ip181.com/

https://www.kuaidaili.com/

python 环境

安装requests库
安装bs4库

proxies设置代理服务器地址

proxies = {'http':  'http://61.155.164.110:3128'}

http://www.goubanjia.com/

 

def get_ip_list(url, headers):
    """
    获取代理服务器列表
    :param url:
    :param headers:
    :return:
    """
    web_data = requests.get(url, headers=headers)
    soup = BeautifulSoup(web_data.text, 'lxml')
    ips = soup.find_all('tr')
    ip_list = []
    for i in range(1, len(ips)):
        ip_info = ips[i]
        tds = ip_info.find_all('td')
        ip_list.append(tds[2].text + '://' + tds[0].text)
    return ip_list

 

http://www.ip181.com/

 

def get_ip_list(url, headers):
    """
    获取代理服务器列表
    :param url:
    :param headers:
    :return:
    """
    web_data = requests.get(url, headers=headers)
    soup = BeautifulSoup(web_data.text, 'lxml')
    ips = soup.find_all('tr')
    ip_list = []
    for i in range(1, len(ips)):
        ip_info = ips[i]
        tds = ip_info.find_all('td')
        ip_list.append('http' + '://' + tds[0].text + tds[1].text)
    return ip_list

 

https://www.kuaidaili.com/

 

def get_ip_list(url, headers):
    """
    获取代理服务器列表
    :param url:
    :param headers:
    :return:
    """
    web_data = requests.get(url, headers=headers)
    soup = BeautifulSoup(web_data.text, 'lxml')
    ips = soup.find_all('tr')
    ip_list = []
    for i in range(1, len(ips)):
        ip_info = ips[i]
        tds = ip_info.find_all('td')
        ip_list.append(tds[5].text + '://' + tds[1].text + ':' + tds[2].text)
    return ip_list

完整代码

 

 

 

 

from bs4 import BeautifulSoup
import requests

def get_ip_list(url, headers):
    """
    获取代理服务器列表
    :param url:
    :param headers:
    :return:
    """
    web_data = requests.get(url, headers=headers)
    soup = BeautifulSoup(web_data.text, 'lxml')
    ips = soup.find_all('tr')
    ip_list = []
    for i in range(1, len(ips)):
        ip_info = ips[i]
        tds = ip_info.find_all('td')
        ip_list.append(tds[2].text + '://' + tds[0].text)
    return ip_list


def cast_a_vote(url, headers, ip_list):
    """
    投票
    :param url:
    :param headers:
    :return:
    """
    for ip in ip_list:
        ip = ip.lower()
        proxies = {ip.split(':')[0]: ip}
        try:
            for i in range(0, 8):
                web_data = requests.get(url, headers=headers, proxies=proxies, timeout=1)
                print web_data.text
        except Exception as e:
            continue

if __name__ == '__main__':
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
    }

    vote_url = 'https://www.baidu.com'
    for i in range(0, 10):
        url = 'http://www.goubanjia.com/free/index%d.shtml' % i
        ip_list = get_ip_list(url, headers=headers)
        cast_a_vote(vote_url, headers, ip_list)

 

 

 

 

 

你可能感兴趣的:(Python,爬虫)