继上篇帖子之后 , 公司又要求将Druid Monitor的监控信息保存起来 , 因为Druid的监控记录在是缓存的,重启之后无法找回,所以需要做持久化,定期把监控记录转存到日志文件中 研究了半天 , 将经验贴出来 , 希望可以帮到大家 , 少走点弯路 .
首先贴出Druid官方给出的方法 https://github.com/alibaba/druid/wiki/%E6%80%8E%E4%B9%88%E4%BF%9D%E5%AD%98Druid%E7%9A%84%E7%9B%91%E6%8E%A7%E8%AE%B0%E5%BD%95
废话不多说 , 直接进入正题 .
- DataSource中增加配置:
< property name ="timeBetweenLogStatsMillis" value ="600000" /> < property name ="statLogger" ref ="localStatLogger" />
- 定义bean
< bean id ="localStatLogger" class ="com.hyde.tracker.server.druid.LocalStatLogger" > bean>
- 类LocalStatLogger,重写API中的方法实现自定义的日志存储
package com.hyde.tracker.server.druid;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import java.net.URL;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import org.apache.log4j.PropertyConfigurator;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils;
import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
/**
* @author WangHuijie
*/
public class LocalStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
private static final Log LOGGER = LogFactory.getLog(LocalStatLogger.class);
/**
* @see com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter#log(com.alibaba.druid.pool.DruidDataSourceStatValue)
*/
@Override
public void log(DruidDataSourceStatValue statValue) {
Mapmap = new LinkedHashMap ();
/*map.put("url", statValue.getUrl());
map.put("dbType", statValue.getDbType());*/
map.put("name", statValue.getName());
map.put("activeCount", statValue.getActiveCount());
if (statValue.getActivePeak() > 0) {
map.put("activePeak", statValue.getActivePeak());
map.put("activePeakTime", statValue.getActivePeakTime());
}
map.put("poolingCount", statValue.getPoolingCount());
if (statValue.getPoolingPeak() > 0) {
map.put("poolingPeak", statValue.getPoolingPeak());
map.put("poolingPeakTime", statValue.getPoolingPeakTime());
}
map.put("connectCount", statValue.getConnectCount());
map.put("closeCount", statValue.getCloseCount());
if (statValue.getWaitThreadCount() > 0) {
map.put("waitThreadCount", statValue.getWaitThreadCount());
}
if (statValue.getNotEmptyWaitCount() > 0) {
map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
}
if (statValue.getNotEmptyWaitMillis() > 0) {
map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
}
if (statValue.getLogicConnectErrorCount() > 0) {
map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
}
if (statValue.getPhysicalConnectCount() > 0) {
map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
}
if (statValue.getPhysicalCloseCount() > 0) {
map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
}
if (statValue.getPhysicalConnectErrorCount() > 0) {
map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
}
if (statValue.getExecuteCount() > 0) {
map.put("executeCount", statValue.getExecuteCount());
}
if (statValue.getErrorCount() > 0) {
map.put("errorCount", statValue.getErrorCount());
}
if (statValue.getCommitCount() > 0) {
map.put("commitCount", statValue.getCommitCount());
}
if (statValue.getRollbackCount() > 0) {
map.put("rollbackCount", statValue.getRollbackCount());
}
if (statValue.getPstmtCacheHitCount() > 0) {
map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
}
if (statValue.getPstmtCacheMissCount() > 0) {
map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
}
if (statValue.getStartTransactionCount() > 0) {
map.put("startTransactionCount", statValue.getStartTransactionCount());
map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
}
if (statValue.getConnectCount() > 0) {
map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
}
if (statValue.getClobOpenCount() > 0) {
map.put("clobOpenCount", statValue.getClobOpenCount());
}
if (statValue.getBlobOpenCount() > 0) {
map.put("blobOpenCount", statValue.getBlobOpenCount());
}
if (statValue.getSqlSkipCount() > 0) {
map.put("sqlSkipCount", statValue.getSqlSkipCount());
}
ArrayList - log4j.properties,定义转存日志文件位置及名称
log4j.rootLogger=INFO, druid, logfile log4j.appender.druid.Threshold=INFO log4j.appender.druid=org.apache.log4j.ConsoleAppender log4j.appender.druid.layout=org.apache.log4j.PatternLayout log4j.appender.druid.layout.ConversionPattern=%d %p [%c]:%L - %m%n log4j.appender.logfile.Threshold=INFO log4j.appender.logfile=org.apache.log4j.DailyRollingFileAppender log4j.appender.logfile.DatePattern='.'yyyy-MM-dd log4j.appender.logfile.File=../logs/druid/druid.log log4j.appender.logfile.layout=org.apache.log4j.PatternLayout log4j.appender.logfile.layout.ConversionPattern=[%d] - %m%n