Scrapy配置代理

采集免费代理信息

# -*- coding:utf-8 -*-
import scrapy
from scrapy import Request
import json


class XiciSpider(scrapy.Spider):
    name = 'xici_proxy'
    allowed_domains = ["www.xicidaili.com"]

    def start_requests(self):
        for i in range(1, 4):
            yield Request('http://www.xicidaili.com/nn/%s' % i)

    def parse(self, response):
        for sel in response.xpath('//table[@id="ip_list"]/tr[position()>1]'):
            # 提取代理的IP、port、scheme(http or https)
            ip = sel.css('td:nth-child(2)::text').extract_first()
            port = sel.css('td:nth-child(3)::text').extract_first()
            scheme = sel.css('td:nth-child(6)::text').extract_first()

            # 使用爬取到的代理再次发送请求到http(s)://httpbin.org/ip, 验证代理是否可用
            url = '%s://httpbin.org/ip' % scheme
            proxy = '%s://%s:%s' % (scheme, ip, port)

            meta = {
                'proxy': proxy,
                'dont_retry': True,
                'download_timeout': 10,

                # 以下两个字段是传递给check_available方法的信息,方便检测
                '_proxy_scheme': scheme,
                '_proxy_ip': ip,
            }

            yield Request(url, callback=self.check_available, meta=meta, dont_filter=True)
        pass

    def check_available(self, response):
        proxy_ip = response.meta['_proxy_ip']

        # 判断代理是否具有隐藏IP功能
        if proxy_ip == json.loads(response.text)['origin']:
            yield {
                'proxy_scheme': response.meta['_proxy_scheme'],
                'proxy': response.meta['proxy'],
            }

运行写入json文件

$ scrapy crawl douban -o proxy_list.json

使用代理进行采集

middlewares.py中添加以下代码:

from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
from scrapy.exceptions import NotConfigured
from collections import defaultdict
import json
import random
import re


class RandomHttpProxyMiddleware(HttpProxyMiddleware):

    @classmethod
    def from_crawler(cls, crawler):
        # 从配置文件中读取用户验证信息的编码
        auth_coding = crawler.settings.get('HTTPPROXY_AUTH_ENCODING', 'latin-1')

        # 从配置文件中读取代理服务器列表文件(json)的路径
        proxy_file_file = crawler.settings.get("HTTPPROXY_PROXY_LIST_FILE")

        return cls(auth_coding, proxy_file_file)

    def __init__(self, auth_encoding='latin-1', proxy_list_file=None):
        if not proxy_list_file:
            raise NotConfigured

        self.auth_encoding = auth_encoding
        self.proxies = defaultdict(list)

        # 从json文件中读取代理服务器信息,填入self.proxies
        with open(proxy_list_file) as f:
            proxy_list = json.loads(f.read())
            for proxy in proxy_list:
                scheme = proxy['proxy_scheme']
                url = proxy['proxy']
                self.proxies[scheme].append(self._get_proxy(url, scheme))

    def process_request(self, request, spider):
        # 随机选择一个代理
        scheme = re.findall("(.*?):", request.url)[0].upper()
        creds, proxy = random.choice(self.proxies[scheme])
        request.meta['proxy'] = proxy
        if creds:
            request.headers['Proxy-Authorization'] = b'Basic' + creds

在settings.py中添加:

USER_AGENT = 'Mozilla/5.0 (X11;Linux x86_64) AppleWebKit/537.36 Chrome/41.0.2272.76'
HTTPPROXY_PROXY_LIST_FILE = 'proxy_list.json'
DOWNLOADER_MIDDLEWARES = {
    'proxy_example.middlewares.RandomHttpProxyMiddleware': 100,
}

新建一个test_random_proxy.py的爬虫:

# -*- coding:utf-8 -*-
import scrapy
from scrapy import Request
import json


class TestRandomProxySpider(scrapy.Spider):
    name = "test_random_proxy"

    def start_requests(self):
        for _ in range(20):
            yield Request('http://httpbin.org/ip', dont_filter=True)
            yield Request('https://httpbin.org/ip', dont_filter=True)

    def parse(self, response):
        print(json.loads(response.text))

即可在输出中看到原始的IP与本机IP不一致了。

你可能感兴趣的:(Scrapy配置代理)