抓取免费代理IP
在settings.py文件添加USER_AGENT
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36'
编写XiciSpider.py爬取程序
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
import json
class XiciSpider(scrapy.Spider):
name = 'xici'
allowed_domains = ['xicidaili.com']
start_urls = ['http://www.xicidaili.com/nn/1']
# def start_requests(self):
# for i in range(1,2):
# yield Request('http://www.xicidaili.com/nn/' + str(i))
def parse(self, response):
for sel in response.xpath('//table[@id="ip_list"]/tr[position()>1]'):
ip = sel.css('td:nth-child(2)::text').extract_first()
port = sel.css('td:nth-child(3)::text').extract_first()
scheme = sel.css('td:nth-child(6)::text').extract_first().lower()
url = scheme + '://httpbin.org/ip'
proxy = scheme + '://' + str(ip) + ':' + str(port)
meta = {
'proxy':proxy,
'dont_retry':True,
'download_timeout':30,
'_proxy_scheme':scheme,
'_proxy_ip':ip,
}
print('===>>> proxy:', proxy)
yield Request(url, callback=self.check_available, meta=meta, dont_filter=True)
def check_available(self, response):
proxy_ip = response.meta['_proxy_ip']
print(proxy_ip)
print(response.text)
if proxy_ip == json.loads(response.text)['origin']:
print('yield')
yield {
'proxy_scheme':response.meta['_proxy_scheme'],
'proxy':response.meta['proxy'],
}
获取代理IP并存储到json文件
[visitor@localhost demo3]$ python3 -m scrapy crawl xici -o ~/proxies/xici.json
查看xici.json
[
{"proxy_scheme": "http", "proxy": "http://218.73.239.226:61202"},
{"proxy_scheme": "http", "proxy": "http://42.87.73.172:61202"},
{"proxy_scheme": "https", "proxy": "https://180.212.140.200:8118"},
{"proxy_scheme": "http", "proxy": "http://112.84.91.60:8118"},
{"proxy_scheme": "https", "proxy": "https://14.118.252.235:61234"},
{"proxy_scheme": "https", "proxy": "https://125.120.201.139:6666"},
{"proxy_scheme": "https", "proxy": "https://180.113.45.132:8118"},
{"proxy_scheme": "https", "proxy": "https://218.87.143.201:8118"},
{"proxy_scheme": "https", "proxy": "https://112.95.56.203:8118"},
{"proxy_scheme": "https", "proxy": "https://59.48.148.226:61202"}
]
使用代理抓取网页
配置settings.xml
DOWNLOADER_MIDDLEWARES = {
'demo3.middlewares.RandomHttpProxyMiddleware':745,
}
HTTPPROXY_PROXY_LIST_FILE='~/proxies/xici.json'
在middlewares.py添加RandomHttpProxyMiddleware:
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
from collections import defaultdict
import json
import random
class RandomHttpProxyMiddleware(HttpProxyMiddleware):
def __init__(self, auth_encoding='utf-8', proxy_list_file=None):
if not proxy_list_file:
raise NotConfigured
self.auth_encoding=auth_encoding
self.proxies=defaultdict(list)
with open(proxy_list_file) as f:
proxy_list=json.load(f)
for proxy in proxy_list:
scheme=proxy['proxy_scheme']
url=proxy['proxy']
self.proxies[scheme].append(self._get_proxy(url,scheme))
@classmethod
def from_crawler(cls, crawler):
auth_encoding=crawler.settings.get('HTTPPROXY_AUTH_ENCODING','utf-8')
proxy_list_file=crawler.settings.get('HTTPPROXY_PROXY_LIST_FILE')
return cls(auth_encoding, proxy_list_file)
def _set_proxy(self, request, scheme):
creds,proxy=random.choice(self.proxies[scheme])
request.meta['proxy']=proxy
if creds:
request.headers['Proxy-Authorization']=b'Basic'+creds
编写http_proxy.py:
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
class HttpProxySpider(scrapy.Spider):
name = 'http_proxy'
def start_requests(self):
for i in range(1,10):
yield Request('http://httpbin.org/ip', dont_filter=True)
yield Request('https://httpbin.org/ip', dont_filter=True)
def parse(self, response):
print(response.text)
采用随机代理抓取网页
[visitor@localhost demo3]$ python3 -m scrapy crawl http_proxy