python爬虫——Scrapy-Redis分布式爬虫实现,并将数据写入MySQL数据库

首先在Settings文件中配置redis数据库

# 启动Scrapy-Redis去重过滤器,取消Scrapy的去重功能
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 启用Scrapy-Redis的调度器,取消Scrapy的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# Scrapy-Redis断点续爬
SCHEDULER_PERSIST = True
# 配置Redis数据库的连接
REDIS_URL = 'redis://127.0.0.1:6379'

新建一个生成IP池的文件:

import redis
import urllib.request as ur
import time

#创建获取IP的类
class ProxyPool():
    def __init__(self):
        #创建连接
        self.redis_conn = redis.StrictRedis(
            host='localhost',
            port=6379,
            decode_responses=True,
        )

    def set_proxy(self):
        proxy_odd = None
        while True:
            #或许新的IP
            proxy_new = ur.urlopen('http://api.ip.data5u.com/dynamic/get.html?order=d314e5e5e19b0dfd19762f98308114ba&sep=4').read().decode('utf-8').strip().split(' ')
            #如果新的IP不等于老的IP
            if proxy_new != proxy_odd:
                #对老IP进行重新复值
                proxy_odd = proxy_new
                #删除数据库里的IP
                self.redis_conn.delete('proxy')
                #添加新的IP,保存为Set
                self.redis_conn.sadd('proxy',*proxy_new)
                print('更换代理ip为:',proxy_new)
                time.sleep(2)
            else:
                time.sleep(1)

    def get_proxy(self):
        #随机取出一个IP
        proxy_s = self.redis_conn.srandmember('proxy',1)
        if proxy_s:
            return proxy_s[0]
        else:
            time.sleep(0.1)
            return self.get_proxy()

if __name__ == '__main__':
    ProxyPool().set_proxy()
    # ProxyPool().get_proxy()

更改IP获取方法:

 def process_request(self, request, spider):
        # request.meta['proxy'] = 'http://' + ur.urlopen('http://api.ip.data5u.com/dynamic/get.html?order=d314e5e5e19b0dfd19762f98308114ba&sep=3').read().decode('utf-8').strip()
        #从IP池中获取IP
        request.meta['proxy'] = 'http://' + self.proxyPool.get_proxy()
        print(request.meta['proxy'])

爬取的主程序:(和前面的博客变动不大,优化一下而已)

# -*- coding: utf-8 -*-
import scrapy
import re
from copy import deepcopy

class BilianSpider(scrapy.Spider):
    name = 'bilian'
    allowed_domains = ['ebnew.com','ss.ebnew.com']


    keyword_s = [
        '路由器','变压器'
    ]
    # 存储的数据格式
    sql_data = dict(
        projectcode = '',  # 项目编号
        web = '',  # 信息来源网站
        keyword = '',  # 关键字
        detail_url = '',  # 招标详细页网址
        title = '',  # 第三方网站发布标题
        toptype = '',  # 信息类型
        province = '',  # 归属省份
        product = '',  # 产品范畴
        industry = '',  # 归属行业
        tendering_manner = '',  # 招标方式
        publicity_date = '',  # 招标公示日期
        expiry_date = '',  # 招标截止时间
    )
    # Form表单的数据格式
    form_data = dict(
        infoClassCodes = '',
        rangeType = '',
        projectType = 'bid',
        fundSourceCodes = '',
        dateType = '',
        startDateCode = '',
        endDateCode = '',
        normIndustry = '',
        normIndustryName = '',
        zone = '',
        zoneName = '',
        zoneText = '',
        key = '',   # 搜索的关键字
        pubDateType = '',
        pubDateBegin = '',
        pubDateEnd = '',
        sortMethod = 'timeDesc',
        orgName = '',
        currentPage = '',   # 当前页码
    )


    def start_requests(self):
        for keyword in self.keyword_s:
            form_data = deepcopy(self.form_data)
            form_data['key'] = keyword
            form_data['currentPage'] = '1'
            request = scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse_start
            )
            request.meta['form_data'] = form_data
            yield request
        # yield scrapy.Request(
        #     url='http://www.ebnew.com/businessShow/631160959.html',
        #     callback=self.parse_page2
        # )
        # form_data = self.form_data
        # form_data['key'] = '路由器'
        # form_data['currentPage'] = '2'
        # yield scrapy.FormRequest(
        #     url='http://ss.ebnew.com/tradingSearch/index.htm',
        #     formdata=form_data,
        #     callback=self.parse_page1,
        # )

    def parse_start(self,response):
        a_text_s = response.xpath('//form[@id="pagerSubmitForm"]/a/text()').extract()
        page_max = max(
            [int(a_text) for a_text in a_text_s if re.match('\d+',a_text)]
        )
        # page_max = 2
        self.parse_page1(response)
        for page in range(2,page_max+1):
            form_data = deepcopy(response.meta['form_data'])
            form_data['currentPage'] = str(page)
            request = scrapy.FormRequest(
                url='http://ss.ebnew.com/tradingSearch/index.htm',
                formdata=form_data,
                callback=self.parse_page1
            )
            request.meta['form_data'] = form_data
            yield request

    def parse_page1(self,response):
        form_data = response.meta['form_data']
        keyword = form_data.get('key')
        content_list_x_s = response.xpath('//div[@class="ebnew-content-list"]/div')
        for content_list_x in content_list_x_s:
            sql_data = deepcopy(self.sql_data)
            sql_data['toptype'] = content_list_x.xpath('./div[1]/i[1]/text()').extract_first()
            sql_data['title'] = content_list_x.xpath('./div[1]/a/text()').extract_first()
            sql_data['publicity_date'] = content_list_x.xpath('./div[1]/i[2]/text()').extract_first()
            if sql_data['publicity_date']:
                sql_data['publicity_date'] = re.sub('[^0-9\-]','',sql_data['publicity_date'])

            sql_data['tendering_manner'] = content_list_x.xpath('./div[2]/div[1]/p[1]/span[2]/text()').extract_first()
            sql_data['product'] = content_list_x.xpath('./div[2]/div[1]/p[2]/span[2]/text()').extract_first()
            sql_data['expiry_date'] = content_list_x.xpath('./div[2]/div[2]/p[1]/span[2]/text()').extract_first()
            sql_data['province'] = content_list_x.xpath('./div[2]/div[2]/p[2]/span[2]/text()').extract_first()
            sql_data['detail_url'] = content_list_x.xpath('./div[1]/a/@href').extract_first()
            sql_data['keyword'] = keyword
            sql_data['web'] = '必联网'

            request = scrapy.Request(
                url=sql_data['detail_url'],
                callback=self.parse_page2
            )
            request.meta['sql_data'] = sql_data
            yield request


    def parse_page2(self,response):
        sql_data = response.meta['sql_data']
        sql_data['projectcode'] = response.xpath('/ul[contains(@class,"ebnew-project-information")]/li[1]/span[2]/text()').extract_first()
        sql_data['industry'] = response.xpath('/ul[contains(@class,"ebnew-project-information")]/li[8]/span[2]/text()').extract_first()
        if not sql_data['projectcode']:
            # projectcode_find = re.findall('项目编号[::]{0,1}\s{0,2}([a-zA-Z0-9\-_]{10,80})',response.body.decode('utf-8'))
            # sql_data['projectcode'] = projectcode_find[0] if projectcode_find else ""
            projectcode_find = re.findall(
                '(项目编码|项目标号|采购文件编号|招标编号|项目编号|竞价文件编号)[::]{0,1}\s{0,2}\n*()*\n*()*\n*()*\n*([a-zA-Z0-9\-_\[\]]{1,100})',
                response.body.decode('utf-8'))
            if projectcode_find:
                sql_data['projectcode'] = projectcode_find[0][4] if projectcode_find else ""

        # print('parse_2',sql_data)
        yield sql_data

 

你可能感兴趣的:(Redis,爬虫)