基于scrapy的可配置爬虫,大大提高工作效率

原理说明

基于spalsh或者selenium的渲染后HTML,通过配置文件解析,入库。 提高了效率,一天可以写几十个配置dict,即完成几十个网站爬虫的编写。

配置文件说明:

{
                "industry_type": "政策",  # 行业类别
                "website_type": "央行",  # 网站/微信公众号名称
                "url_type": "中国人民银行-条法司-规范性文件",  # 网站模块
                "link": "http://www.pbc.gov.cn/tiaofasi/144941/3581332/index.html",  # 访问链接
                "article_rows_xpath": '//div[@id="r_con"]//table//tr/td/font[contains(@class, "newslist_style")]',
                # 提取文章列表xpath对象
                "title_xpath": "./a",  # 提取标题
                "title_parse": "./@title",  # 提取标题
                "title_link_xpath": "./a/@href",  # 提取标题链接
                "date_re_switch": "False",  # 是否使用正则提取日期时间
                "date_re_expression": "",  # 日期时间正则表达式
                "date_xpath": "./following-sibling::span[1]",  # 提取日期时间
                "date_parse": "./text()",  # 提取日期时间
                "content": '//*[@class="content"]',  # 正文HTML xpath
                "prefix": "http://www.pbc.gov.cn/",  # link前缀
                "config": "{'use_selenium':'False'}"  # 其他配置:是否使用selenium(默认使用spalsh)
            },
复制代码

完整代码参考

# -*- coding: utf-8 -*-
'''
需求列表:使用任何资讯网站的抓取
央行:
http://www.pbc.gov.cn/tiaofasi/144941/3581332/index.html
http://www.pbc.gov.cn/tiaofasi/144941/144959/index.html

公安部:
https://www.mps.gov.cn/n2254314/n2254487/
https://www.mps.gov.cn/n2253534/n2253535/index.html
http://www.qth.gov.cn/xxsbxt/sxdw/gajxx/

'''
from risk_control_info.items import BIgFinanceNews
import dateparser

from w3lib.url import canonicalize_url
from urllib.parse import urljoin
import scrapy
from scrapy_splash import SplashRequest

from risk_control_info.utils import make_md5, generate_text, clean_string
import re

script = """
function main(splash, args)
  splash.images_enabled = false
  splash:set_user_agent("{ua}")
  assert(splash:go(args.url))
  assert(splash:wait(args.wait))
  return splash:html()
end""".format(
    ua="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36")


class BigFinanceAllGovSpider(scrapy.Spider):
    name = 'big_finance_all_gov'
    custom_settings = {
        'RANDOMIZE_DOWNLOAD_DELAY': True,
        'DOWNLOAD_DELAY': 60 / 360.0,
        'CONCURRENT_REQUESTS_PER_IP': 8,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy_splash.SplashCookiesMiddleware': 723,
            'scrapy_splash.SplashMiddleware': 725,
            'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
            # 'risk_control_info.middlewares.SplashProxyMiddleware': 843,  # 代理ip,此方法没成功
            'risk_control_info.middlewares.RandomUserAgentMiddleware': 843,
            'risk_control_info.middlewares.SeleniumMiddleware': 844
        },
        # 入库
        'ITEM_PIPELINES': {
            'risk_control_info.pipelines.RiskControlInfoPipeline': 401,
            'risk_control_info.pipelines.MysqlPipeline': 402,
        },
        'SPIDER_MIDDLEWARES': {
            'risk_control_info.middlewares.RiskControlInfoSpiderMiddleware': 543,
            'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
        },
    }

    def __init__(self, **kwargs):
        super().__init__()
        self.env = kwargs.get('env', 'online')

    def start_requests(self):
        for target in self.target_info():
            if target.get('title_xpath') and target.get('title_link_xpath') \
                    and target.get('date_xpath') and target.get('article_rows_xpath'):
                self.logger.info(f"目标网站可配置爬虫信息:{target}")
                # 使用selenium
                if target.get("config") and eval(eval(target.get("config")).get('use_selenium')):
                    self.logger.info(f"使用 Selenium 请求 {target['link']}")
                    yield scrapy.Request(url=target['link'],
                                         meta={
                                             "target": target,
                                             "use_selenium": True
                                         },
                                         callback=self.parse,
                                         )
                else:
                    # 默认使用 Splash
                    self.logger.info(f"使用 Splash 请求 {target['link']}")
                    yield SplashRequest(url=target['link'],
                                        meta={"target": target},
                                        callback=self.parse,
                                        # endpoint='execute',
                                        # args={
                                        #     'lua_source': script,
                                        #     'wait': 8},
                                        endpoint='render.json',
                                        args={
                                            # 'lua_source': script,
                                            # 'proxy': f"http://{proxy_ip_dict['ip']}:{proxy_ip_dict['port']}",
                                            'wait': 10,
                                            'html': 1,
                                            'png': 1
                                        },
                                        )

    def parse(self, response):
        target = response.meta['target']
        article_rows = response.xpath(target['article_rows_xpath'])
        # 遍历所有文字列表
        for article_row in article_rows:
            item = BIgFinanceNews()
            # 处理标题
            _article_row = article_row.xpath(target['title_xpath'])  # 定位标题
            item['title'] = clean_string(
                generate_text(_article_row.xpath(target['title_parse']).extract_first().strip()))  # 解析标题

            # 处理链接
            if target.get('prefix'):
                item['title_link'] = urljoin(target['prefix'], article_row.xpath(
                    target['title_link_xpath']).extract_first())
            else:
                item['title_link'] = article_row.xpath(target['title_link_xpath']).extract_first()

            # 处理发布日期
            # 日期顺序规则
            date_order = "YMD"
            _title_time = article_row.xpath(target['date_xpath'])  # 定位:发布时间
            _date_str = clean_string(
                generate_text(_title_time.xpath(target['date_parse']).extract_first()))  # 解析:发布时间
            if not eval(target.get('date_re_switch')):
                item['title_time'] = dateparser.parse(_date_str, settings={'DATE_ORDER': date_order}).strftime(
                    "%Y-%m-%d")
            else:  # 使用正则提取时间字符串,存在默认正则表达式
                date_re_expression = target.get('date_re_expression', None)
                _expression = date_re_expression or r"(20\d{2}[-/]?\d{2}[-/]?\d{2})"
                results = re.findall(r"%s" % _expression, _date_str, re.S)
                self.logger.info(f"_date_str:{_date_str},results:{results} ")
                if results:
                    item['title_time'] = dateparser.parse(results[0], settings={'DATE_ORDER': date_order}).strftime(
                        "%Y-%m-%d")
                else:
                    item['title_time'] = None

            # 以下写死的
            item['bi_channel'] = "gov"
            item['industry_type'] = f"{target['industry_type']}"
            item['website_type'] = f"{target['website_type']}"
            item['url_type'] = f"{target['url_type']}"
            item['title_hour'] = 0  # 原网站没有发布时间,0代替
            item['source_type'] = 0  # 数据来源,0 网站web, 1 微信公众号
            item['redis_duplicate_key'] = make_md5(item['title'] + canonicalize_url(item['title_link']))

            # 请求详情页
            # 使用selenium
            if target.get("config") and eval(eval(target.get("config")).get('use_selenium')):
                self.logger.info(f"使用 Selenium 请求 {item['title_link']}")
                yield scrapy.Request(url=item['title_link'],
                                     meta={
                                         "target": target,
                                         "use_selenium": True,
                                         "item": item
                                     },
                                     callback=self.parse_detail,
                                     )
            else:
                # 使用 Splash
                self.logger.info(f"使用 Splash 请求 {item['title_link']}")
                yield SplashRequest(url=item['title_link'],
                                    meta={
                                        "target": target,
                                        "item": item
                                    },
                                    callback=self.parse_detail,
                                    # endpoint='execute',
                                    # args={
                                    #     'lua_source': script,
                                    #     'wait': 8},
                                    endpoint='render.json',
                                    args={
                                        # 'lua_source': script,
                                        # 'proxy': f"http://{proxy_ip_dict['ip']}:{proxy_ip_dict['port']}",
                                        'wait': 20,
                                        'html': 1,
                                        'png': 1
                                    },
                                    )

    def parse_detail(self, response):
        self.logger.info(f"处理详情页 {response.url}")
        item = response.meta['item']
        target = response.meta['target']
        print(response.xpath(target['content']))
        if response.xpath(target['content']):
            item['content'] = generate_text(response.xpath(target['content']).extract_first())
        else:
            item['content'] = ""

        yield item

    @staticmethod
    def target_info():
        '''
        返回目标网站信息
        '''
        target_list = [
            {
                "industry_type": "政策",  # 行业类别
                "website_type": "央行",  # 网站/微信公众号名称
                "url_type": "中国人民银行-条法司-规范性文件",  # 网站模块
                "link": "http://www.pbc.gov.cn/tiaofasi/144941/3581332/index.html",  # 访问链接
                "article_rows_xpath": '//div[@id="r_con"]//table//tr/td/font[contains(@class, "newslist_style")]',
                # 提取文章列表xpath对象
                "title_xpath": "./a",  # 提取标题
                "title_parse": "./@title",  # 提取标题
                "title_link_xpath": "./a/@href",  # 提取标题链接
                "date_re_switch": "False",  # 是否使用正则提取日期时间
                "date_re_expression": "",  # 日期时间正则表达式
                "date_xpath": "./following-sibling::span[1]",  # 提取日期时间
                "date_parse": "./text()",  # 提取日期时间
                "content": '//*[@class="content"]',  # 正文HTML xpath
                "prefix": "http://www.pbc.gov.cn/",  # link前缀
                "config": "{'use_selenium':'False'}"  # 其他配置:是否使用selenium(默认使用spalsh)
            },

            {
                "industry_type": "政策",  # 行业类别
                "website_type": "央行",  # 网站/微信公众号名称
                "url_type": "中国人民银行-条法司-其他文件",  # 网站模块
                "link": "http://www.pbc.gov.cn/tiaofasi/144941/144959/index.html",  # 访问链接
                "article_rows_xpath": '//div[@id="r_con"]//table//tr/td/font[contains(@class, "newslist_style")]',
                "title_xpath": "./a",
                "title_parse": "./@title",
                "title_link_xpath": "./a/@href",
                "date_re_switch": "False",  # 是否使用正则提取日期时间
                "date_re_expression": "",  # 日期时间正则表达式
                "date_xpath": "./following-sibling::span[1]",
                "date_parse": "./text()",
                "content": '//*[@class="content"]',  # 正文HTML xpath
                "prefix": "http://www.pbc.gov.cn/",
                "config": "{'use_selenium':'False'}"
            },

            {
                "industry_type": "政策",  # 行业类别
                "website_type": "公安部",  # 网站/微信公众号名称
                "url_type": "中华人民共和国公安部-规划计划",  # 网站模块
                "link": "https://www.mps.gov.cn/n2254314/n2254487/",  # 访问链接
                "article_rows_xpath": '//span/dl/dd',
                "title_xpath": "./a",
                "title_parse": "./text()",
                "title_link_xpath": "./a/@href",
                "date_re_switch": "True",  # 是否使用正则提取日期时间 ( 2020-04-14 )
                "date_re_expression": "",  # 日期时间正则表达式
                "date_xpath": "./span",
                "date_parse": "./text()",
                "content": '//*[@class="arcContent center"]',  # 正文HTML xpath
                "prefix": "https://www.mps.gov.cn/",
                "config": "{'use_selenium':'True'}"
            },

            {
                "industry_type": "政策",  # 行业类别
                "website_type": "公安部",  # 网站/微信公众号名称
                "url_type": "中华人民共和国公安部-公安要闻",  # 网站模块
                "link": "https://www.mps.gov.cn/n2253534/n2253535/index.html",  # 访问链接
                "article_rows_xpath": '//span/dl/dd',
                "title_xpath": "./a",
                "title_parse": "./text()",
                "title_link_xpath": "./a/@href",
                "date_re_switch": "True",  # 是否使用正则提取日期时间 ( 2020-04-14 )
                "date_re_expression": "",  # 日期时间正则表达式
                "date_xpath": "./span",
                "date_parse": "./text()",
                "content": '//*[@class="arcContent center"]',  # 正文HTML xpath
                "prefix": "https://www.mps.gov.cn/",
                "config": "{'use_selenium':'True'}"
            },

            {
                "industry_type": "政策",  # 行业类别
                "website_type": "公安部",  # 网站/微信公众号名称
                "url_type": "七台河市人民政府-信息上报系统-市辖单位-公安局",  # 网站模块
                "link": "http://www.qth.gov.cn/xxsbxt/sxdw/gajxx/",  # 访问链接
                "article_rows_xpath": '//td[contains(text(), "公安局")]/parent::tr/parent::tbody/parent::table/parent::td/parent::tr/following::tr[1]/td/table//tr/td/a/parent::td/parent::tr',
                "title_xpath": "./td/a",
                "title_parse": "./@title",
                "title_link_xpath": "./td/a/@href",
                "date_re_switch": "False",  # 是否使用正则提取日期时间 ( 2020-04-14 )
                "date_re_expression": "",  # 日期时间正则表达式
                "date_xpath": "./td[3]",
                "date_parse": "./text()",
                "content": '//*[@class="TRS_Editor"]',  # 正文HTML xpath
                "prefix": "http://www.qth.gov.cn/xxsbxt/sxdw/gajxx/",
                "config": "{'use_selenium':'False'}"
            },

        ]
        for target in target_list:
            yield target

 

你可能感兴趣的:(基于scrapy的可配置爬虫,大大提高工作效率)