Druid的监控很强大,但可惜的是监控数据是存在内存中的,需求就是定时把监控数据记录下来,以日志文件的形式或者数据库入库。
Druid包中有个DruidStatService类,这个是监控的业务类。
其中有个service方法, public String service(String url) ,参数是形如/sql.json的字符串,service方法根据不同的参数,获取不同的监控数据,返回的字符串即已经序列化后的监控数据JSON字符串。
例如,”/basic.json” 就可以获取基础数据,”/weburi.json” 就可以获取URI页面的数据。
利用这个业务接口,即可获取我们想要的监控数据。
使用springboot的定时任务,可以方便的定时执行日志记录。直接上代码。
说明:SyslogService是自定义的业务类,用于持久化日志,可注释掉。
package com.company.project.timetask;
import com.alibaba.druid.stat.DruidStatService;
import com.alibaba.fastjson.JSONObject;
import com.company.project.model.Syslog;
import com.company.project.service.SyslogService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import java.time.LocalDateTime;
import java.util.Date;
/**
* 记录Druid的监控信息
*/
@Component
public class DruidLogTask {
private static Logger myLogger = LoggerFactory.getLogger(DruidLogTask.class);
// 获取DruidStatService
private DruidStatService druidStatService = DruidStatService.getInstance();
// 是否是重启后的第一次记录
private boolean isFirstflag = true;
// @Autowired
// private SyslogService syslogService;
// 启动后延迟5秒调用 每5*60*1000即5分钟记录一次
// @Scheduled(initialDelay = 5000, fixedDelay = 300000)
@Scheduled(initialDelay = 5000, fixedDelay = 20000)
@Async// 定时任务异步化 还需在启动类上加@EnableAsync
public void log() throws InterruptedException {
// 首次启动标志
if (isFirstflag) {
myLogger.info("===============已重启,重启时间是{},开始新的记录===============", LocalDateTime.now().toString());
isFirstflag = !isFirstflag;
// Syslog newLog = new Syslog();
// newLog.setLogType("druidLog");
// newLog.setLogBody("检测到重启");
// newLog.setCreatTime(new Date());
// syslogService.save(newLog);
}
JSONObject allResult = new JSONObject(16, true);
// 首页信息
String basicJson = druidStatService.service("/basic.json");
// 数据源
String datasourceJson = druidStatService.service("/datasource.json");
// SQL监控
String sqlJson = druidStatService.service("/sql.json?orderBy=SQL&orderType=desc&page=1&perPageCount=1000000&");
// SQL防火墙
String wallJson = druidStatService.service("/wall.json");
// web应用
String webappJson = druidStatService.service("/webapp.json");
// URI监控
String weburiJson = druidStatService.service("/weburi.json?orderBy=URI&orderType=desc&page=1&perPageCount=1000000&");
// session监控
String websessionJson = druidStatService.service("/websession.json");
// spring监控
String springJson = druidStatService.service("/spring.json");
allResult.put("/basic.json", JSONObject.parseObject(basicJson));
allResult.put("/datasource.json", JSONObject.parseObject(datasourceJson));
allResult.put("/sql.json", JSONObject.parseObject(sqlJson));
allResult.put("/wall.json", JSONObject.parseObject(wallJson));
allResult.put("/webapp.json", JSONObject.parseObject(webappJson));
allResult.put("/weburi.json", JSONObject.parseObject(weburiJson));
allResult.put("/websession.json", JSONObject.parseObject(websessionJson));
allResult.put("/spring.json", JSONObject.parseObject(springJson));
String allResultJsonString = allResult.toJSONString();
myLogger.info("Druid监控定时记录,allResult==={}", allResultJsonString);
// Syslog newLog = new Syslog();
// newLog.setLogType("druidLog");
// newLog.setLogBody(allResultJsonString);
// newLog.setCreatTime(new Date());
// syslogService.save(newLog);
}
}
主要是使用了内置的RollingFileAppender和自定义logger指定类进行记录
核心配置
<property name="LOG_HOME_PATH" value="file_logs"/>
<property name="encoder_pattern" value="%d{yyyy/MM/dd HH:mm:ss.SSS} %-5level [%thread] [%c{0}:%L] : %msg%n"/>
<property name="maxHistory" value="60"/>
<property name="maxFileSize" value="10MB"/>
<appender name="druidSqlRollingFile" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME_PATH}/druid-sql.%d.%i.logfileNamePattern>
<maxHistory>${maxHistory}maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}maxFileSize>
timeBasedFileNamingAndTriggeringPolicy>
rollingPolicy>
<encoder>
<pattern>${encoder_pattern}pattern>
<charset>UTF-8charset>
encoder>
appender>
<logger name="druid.sql.Statement" level="DEBUG" additivity="false">
<appender-ref ref="druidSqlRollingFile" />
logger>
如果文件的写压力比较大,还可以再引用一层异步队列appender,这个AsyncAppender也是logback提供好的。
完整的logback配置文件:
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<property name="LOG_HOME_PATH" value="file_logs"/>
<property name="encoder_pattern" value="%d{yyyy/MM/dd HH:mm:ss.SSS} %-5level [%thread] [%c{0}:%L] : %msg%n"/>
<property name="maxHistory" value="60"/>
<property name="maxFileSize" value="10MB"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${encoder_pattern}pattern>
<charset>UTF-8charset>
encoder>
appender>
<appender name="FILE_All" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME_PATH}/level_all.%d.%i.logfileNamePattern>
<maxHistory>${maxHistory}maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}maxFileSize>
timeBasedFileNamingAndTriggeringPolicy>
rollingPolicy>
<encoder>
<pattern>${encoder_pattern}pattern>
<charset>UTF-8charset>
encoder>
appender>
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME_PATH}/level_info.%d.%i.logfileNamePattern>
<maxHistory>${maxHistory}maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}maxFileSize>
timeBasedFileNamingAndTriggeringPolicy>
rollingPolicy>
<encoder>
<pattern>${encoder_pattern}pattern>
<charset>UTF-8charset>
encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFOlevel>
<onMatch>ACCEPTonMatch>
<onMismatch>DENYonMismatch>
filter>
appender>
<appender name="FILE_DEBUG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME_PATH}/level_debug.%d.%i.logfileNamePattern>
<maxHistory>${maxHistory}maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}maxFileSize>
timeBasedFileNamingAndTriggeringPolicy>
rollingPolicy>
<encoder>
<pattern>${encoder_pattern}pattern>
<charset>UTF-8charset>
encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>DEBUGlevel>
<onMatch>ACCEPTonMatch>
<onMismatch>DENYonMismatch>
filter>
appender>
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME_PATH}/level_error.%d.%i.logfileNamePattern>
<maxHistory>${maxHistory}maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}maxFileSize>
timeBasedFileNamingAndTriggeringPolicy>
rollingPolicy>
<encoder>
<pattern>${encoder_pattern}pattern>
<charset>UTF-8charset>
encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERRORlevel>
<onMatch>ACCEPTonMatch>
<onMismatch>DENYonMismatch>
filter>
appender>
<appender name="FILE_CONTROLLER_LOG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME_PATH}/controller_log.%d.%i.logfileNamePattern>
<maxHistory>${maxHistory}maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}maxFileSize>
timeBasedFileNamingAndTriggeringPolicy>
rollingPolicy>
<encoder>
<pattern>${encoder_pattern}pattern>
<charset>UTF-8charset>
encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFOlevel>
<onMatch>ACCEPTonMatch>
<onMismatch>DENYonMismatch>
filter>
appender>
<appender name="druidSqlRollingFile" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME_PATH}/druid-sql.%d.%i.logfileNamePattern>
<maxHistory>${maxHistory}maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}maxFileSize>
timeBasedFileNamingAndTriggeringPolicy>
rollingPolicy>
<encoder>
<pattern>${encoder_pattern}pattern>
<charset>UTF-8charset>
encoder>
appender>
<appender name="druidMonitorRollingFile" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME_PATH}/druid-monitor.%d.%i.logfileNamePattern>
<maxHistory>${maxHistory}maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>${maxFileSize}maxFileSize>
timeBasedFileNamingAndTriggeringPolicy>
rollingPolicy>
<encoder>
<pattern>${encoder_pattern}pattern>
<charset>UTF-8charset>
encoder>
appender>
<appender name ="ASYNC_INFO" class= "ch.qos.logback.classic.AsyncAppender">
<discardingThreshold>0discardingThreshold>
<queueSize>512queueSize>
<appender-ref ref ="FILE_INFO"/>
appender>
<appender name ="ASYNC_CONTROLLER_LOG" class= "ch.qos.logback.classic.AsyncAppender">
<discardingThreshold>0discardingThreshold>
<queueSize>512queueSize>
<appender-ref ref ="FILE_CONTROLLER_LOG"/>
appender>
<root level="DEBUG">
<appender-ref ref="STDOUT"/>
<appender-ref ref="FILE_All"/>
<appender-ref ref="FILE_DEBUG"/>
<appender-ref ref="ASYNC_INFO"/>
<appender-ref ref="FILE_ERROR"/>
root>
<logger name="druid.sql.Statement" level="DEBUG" additivity="false">
<appender-ref ref="druidSqlRollingFile" />
logger>
<logger name="com.company.project.timetask.DruidLogTask" level="DEBUG" additivity="false">
<appender-ref ref="druidMonitorRollingFile" />
logger>
<logger name="com.company.project.support.aop.ControllerLogAop" level="INFO" additivity="false">
<appender-ref ref="ASYNC_CONTROLLER_LOG" />
logger>
<logger name="org.springframework" level="ERROR" />
<logger name="org.mybatis" level="ERROR" />
<logger name="java.sql.Connection" level="DEBUG" />
<logger name="java.sql.Statement" level="DEBUG" />
<logger name="java.sql.PreparedStatement" level="DEBUG" />
<logger name="org.springframework.scheduling" level="INFO"/>
<logger name="org.springframework.session" level="INFO"/>
<logger name="org.apache.catalina.startup.DigesterFactory" level="ERROR"/>
<logger name="org.apache.catalina.util.LifecycleBase" level="ERROR"/>
<logger name="org.apache.coyote.http11.Http11NioProtocol" level="WARN"/>
<logger name="org.apache.sshd.common.util.SecurityUtils" level="WARN"/>
<logger name="org.apache.tomcat.util.net.NioSelectorPool" level="WARN"/>
<logger name="org.crsh.plugin" level="WARN"/>
<logger name="org.crsh.ssh" level="WARN"/>
<logger name="org.eclipse.jetty.util.component.AbstractLifeCycle" level="ERROR"/>
<logger name="org.hibernate.validator.internal.util.Version" level="WARN"/>
<logger name="org.springframework.boot.actuate.autoconfigure.CrshAutoConfiguration" level="WARN"/>
configuration>