如何将druid连接池监控到的sql执行效率,连接池资源情况等进行持久化存储,方便系统运维分析优化,以下案例初步测试成功。

第一部:

新建MyDruidStatLogger类实现接口 extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger,详细代码如下:本实例只实现接收消息并在控制台打印,实际业务应用需要具体实现存储方案。

package xxx;

import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG;
public MyDruidStatLogger(){
this.configFromProperties(System.getProperties());
}
@Override
public void configFromProperties(Properties properties) {
String property = properties.getProperty("druid.stat.loggerName");
if (property != null && property.length() > 0) {
setLoggerName(property);
}
}
public Log getLogger() {
return logger;
} @Override
public void setLoggerName(String loggerName) {
logger = LogFactory.getLog(loggerName);
} @Override
public void setLogger(Log logger) {
if (logger == null) {
throw new IllegalArgumentException("logger can not be null");
}
this.logger = logger;
} public boolean isLogEnable() {
return true;
} public void log(String value) {
logger.info(value);
}
@Override
public void log(DruidDataSourceStatValue statValue) {
Map<String, Object> map = new LinkedHashMap<String, Object>(); map.put("url", statValue.getUrl());
map.put("dbType", statValue.getDbType());
map.put("name", statValue.getName());
map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) {
map.put("activePeak", statValue.getActivePeak());
map.put("activePeakTime", statValue.getActivePeakTime());
}
map.put("poolingCount", statValue.getPoolingCount());
if (statValue.getPoolingPeak() > 0) {
map.put("poolingPeak", statValue.getPoolingPeak());
map.put("poolingPeakTime", statValue.getPoolingPeakTime());
}
map.put("connectCount", statValue.getConnectCount());
map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) {
map.put("waitThreadCount", statValue.getWaitThreadCount());
} if (statValue.getNotEmptyWaitCount() > 0) {
map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
} if (statValue.getNotEmptyWaitMillis() > 0) {
map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
} if (statValue.getLogicConnectErrorCount() > 0) {
map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
} if (statValue.getPhysicalConnectCount() > 0) {
map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
} if (statValue.getPhysicalCloseCount() > 0) {
map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
} if (statValue.getPhysicalConnectErrorCount() > 0) {
map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
} if (statValue.getExecuteCount() > 0) {
map.put("executeCount", statValue.getExecuteCount());
} if (statValue.getErrorCount() > 0) {
map.put("errorCount", statValue.getErrorCount());
} if (statValue.getCommitCount() > 0) {
map.put("commitCount", statValue.getCommitCount());
} if (statValue.getRollbackCount() > 0) {
map.put("rollbackCount", statValue.getRollbackCount());
} if (statValue.getPstmtCacheHitCount() > 0) {
map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
} if (statValue.getPstmtCacheMissCount() > 0) {
map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
} if (statValue.getStartTransactionCount() > 0) {
map.put("startTransactionCount", statValue.getStartTransactionCount());
map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
} if (statValue.getConnectCount() > 0) {
map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
} if (statValue.getClobOpenCount() > 0) {
map.put("clobOpenCount", statValue.getClobOpenCount());
} if (statValue.getBlobOpenCount() > 0) {
map.put("blobOpenCount", statValue.getBlobOpenCount());
} if (statValue.getSqlSkipCount() > 0) {
map.put("sqlSkipCount", statValue.getSqlSkipCount());
} ArrayList<Map<String, Object>> sqlList = new ArrayList<Map<String, Object>>();
if (statValue.getSqlList().size() > 0) {
for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
Map<String, Object> sqlStatMap = new LinkedHashMap<String, Object>();
sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) {
sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
} long executeErrorCount = sqlStat.getExecuteErrorCount();
if (executeErrorCount > 0) {
sqlStatMap.put("executeErrorCount", executeErrorCount);
} int runningCount = sqlStat.getRunningCount();
if (runningCount > 0) {
sqlStatMap.put("runningCount", runningCount);
} int concurrentMax = sqlStat.getConcurrentMax();
if (concurrentMax > 0) {
sqlStatMap.put("concurrentMax", concurrentMax);
} if (sqlStat.getFetchRowCount() > 0) {
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax());
sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
} if (sqlStat.getUpdateCount() > 0) {
sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
} if (sqlStat.getInTransactionCount() > 0) {
sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
} if (sqlStat.getClobOpenCount() > 0) {
sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
} if (sqlStat.getBlobOpenCount() > 0) {
sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
} sqlList.add(sqlStatMap);
} map.put("sqlList", sqlList);
} if (statValue.getKeepAliveCheckCount() > 0) {
map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
} String text = JSONUtils.toJSONString(map);
System.out.println("==============:"+text);
}
}

第二步:配置spring bean

<bean id="myStatLogger" class="com.andaily.web.context.MyDruidStatLogger"> </bean>

<bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">   
   <!-- 基本属性 url、user、password -->  
   <property name="url" value="${jdbc.url}" />  
   <property name="username" value="${jdbc.username}" />  
   <property name="password" value="${jdbc.password}" />  
       
   <!-- 配置初始化大小、最小、最大 -->  
   <property name="initialSize" value="1" />  
   <property name="minIdle" value="1" />   
   <property name="maxActive" value="20" />  
  
   <!-- 配置获取连接等待超时的时间 -->  
   <property name="maxWait" value="60000" />  
  
   <!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 -->  
   <property name="timeBetweenEvictionRunsMillis" value="60000" />  
   <!-- 配置一个连接在池中最小生存的时间,单位是毫秒 -->  
   <property name="minEvictableIdleTimeMillis" value="300000" />  
   <property name="validationQuery" value="SELECT 'x'" />  
   <property name="testWhileIdle" value="true" />  
   <property name="testOnBorrow" value="false" />  
   <property name="testOnReturn" value="false" />  
   <!-- 打开PSCache,并且指定每个连接上PSCache的大小 -->  
   <property name="poolPreparedStatements" value="true" />  
   <property name="maxPoolPreparedStatementPerConnectionSize" value="20" />  
  
   <!-- 配置监控统计拦截的filters,去掉后监控界面sql无法统计 -->  
   <property name="filters" value="stat" />   
   <property name="timeBetweenLogStatsMillis" value="1000" />
   <property name="statLogger" ref="myStatLogger"/>
</bean>

启动后就可以看到控制台打印的durid监控信息了。

package com.andaily.web.context;

import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG;
public MyDruidStatLogger(){
this.configFromProperties(System.getProperties());
}
@Override
public void configFromProperties(Properties properties) {
String property = properties.getProperty("druid.stat.loggerName");
if (property != null && property.length() > 0) {
setLoggerName(property);
}
}
public Log getLogger() {
return logger;
} @Override
public void setLoggerName(String loggerName) {
logger = LogFactory.getLog(loggerName);
} @Override
public void setLogger(Log logger) {
if (logger == null) {
throw new IllegalArgumentException("logger can not be null");
}
this.logger = logger;
} public boolean isLogEnable() {
return true;
} public void log(String value) {
logger.info(value);
}
@Override
public void log(DruidDataSourceStatValue statValue) {
Map map = new LinkedHashMap(); map.put("url", statValue.getUrl());
map.put("dbType", statValue.getDbType());
map.put("name", statValue.getName());
map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) {
map.put("activePeak", statValue.getActivePeak());
map.put("activePeakTime", statValue.getActivePeakTime());
}
map.put("poolingCount", statValue.getPoolingCount());
if (statValue.getPoolingPeak() > 0) {
map.put("poolingPeak", statValue.getPoolingPeak());
map.put("poolingPeakTime", statValue.getPoolingPeakTime());
}
map.put("connectCount", statValue.getConnectCount());
map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) {
map.put("waitThreadCount", statValue.getWaitThreadCount());
} if (statValue.getNotEmptyWaitCount() > 0) {
map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
} if (statValue.getNotEmptyWaitMillis() > 0) {
map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
} if (statValue.getLogicConnectErrorCount() > 0) {
map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
} if (statValue.getPhysicalConnectCount() > 0) {
map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
} if (statValue.getPhysicalCloseCount() > 0) {
map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
} if (statValue.getPhysicalConnectErrorCount() > 0) {
map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
} if (statValue.getExecuteCount() > 0) {
map.put("executeCount", statValue.getExecuteCount());
} if (statValue.getErrorCount() > 0) {
map.put("errorCount", statValue.getErrorCount());
} if (statValue.getCommitCount() > 0) {
map.put("commitCount", statValue.getCommitCount());
} if (statValue.getRollbackCount() > 0) {
map.put("rollbackCount", statValue.getRollbackCount());
} if (statValue.getPstmtCacheHitCount() > 0) {
map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
} if (statValue.getPstmtCacheMissCount() > 0) {
map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
} if (statValue.getStartTransactionCount() > 0) {
map.put("startTransactionCount", statValue.getStartTransactionCount());
map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
} if (statValue.getConnectCount() > 0) {
map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
} if (statValue.getClobOpenCount() > 0) {
map.put("clobOpenCount", statValue.getClobOpenCount());
} if (statValue.getBlobOpenCount() > 0) {
map.put("blobOpenCount", statValue.getBlobOpenCount());
} if (statValue.getSqlSkipCount() > 0) {
map.put("sqlSkipCount", statValue.getSqlSkipCount());
} ArrayList> sqlList = new ArrayList>();
if (statValue.getSqlList().size() > 0) {
for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
Map sqlStatMap = new LinkedHashMap();
sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) {
sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
} long executeErrorCount = sqlStat.getExecuteErrorCount();
if (executeErrorCount > 0) {
sqlStatMap.put("executeErrorCount", executeErrorCount);
} int runningCount = sqlStat.getRunningCount();
if (runningCount > 0) {
sqlStatMap.put("runningCount", runningCount);
} int concurrentMax = sqlStat.getConcurrentMax();
if (concurrentMax > 0) {
sqlStatMap.put("concurrentMax", concurrentMax);
} if (sqlStat.getFetchRowCount() > 0) {
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax());
sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
} if (sqlStat.getUpdateCount() > 0) {
sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
} if (sqlStat.getInTransactionCount() > 0) {
sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
} if (sqlStat.getClobOpenCount() > 0) {
sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
} if (sqlStat.getBlobOpenCount() > 0) {
sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
} sqlList.add(sqlStatMap);
} map.put("sqlList", sqlList);
} if (statValue.getKeepAliveCheckCount() > 0) {
map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
} String text = JSONUtils.toJSONString(map);
System.out.println("==============:"+text);
}
}

阿里druid连接池监控数据自定义存储的更多相关文章

  1. 注意:阿里Druid连接池监控的两个坑

    阿里的Druid大家都知道是最好的连接池,其强大的监控功能是我们追求的重要特性.但在实际情况中也有不少坑,说下最近遇到的一个坑吧! 问题1:不断打印error级别的错误日志 session ip ch ...

  2. 阿里druid连接池监控配置

    首先在web.xml中添加如下配置: <filter> <filter-name>DruidWebStatFilter</filter-name> <filt ...

  3. 阿里druid连接池

    1.加入jar包, 下载地址:druid-1.1.0.zip 2.ApplicationContext.xml <!-- druid阿里云连接池 --> <bean name=&qu ...

  4. Mybatis 搭配 阿里druid连接池 连接 oracle 或 mysql

    DRUID介绍 DRUID是阿里巴巴开源平台上一个数据库连接池实现,它结合了C3P0.DBCP.PROXOOL等DB池的优点,同时加入了日志监控,可以很好的监控DB池连接和SQL的执行情况,可以说是针 ...

  5. 结合 spring 使用阿里 Druid 连接池配置方法

    1.数据源 <!-- 配置数据源 --> <bean name="dataSource" class="com.alibaba.druid.pool.D ...

  6. 阿里Druid连接池的坑。。

    Druid的坑 当查询数据库的Clob转换为Oracle Clob类型的时候. java.lang.ClassCastException: com.alibaba.druid.proxy.jdbc.C ...

  7. Druid连接池

    Druid 连接池简介 Druid首先是一个数据库连接池.Druid是目前最好的数据库连接池,在功能.性能.扩展性方面,都超过其他数据库连接池,包括DBCP.C3P0.BoneCP.Proxool.J ...

  8. SpringBoot2.0 基础案例(07):集成Druid连接池,配置监控界面

    一.Druid连接池 1.druid简介 Druid连接池是阿里巴巴开源的数据库连接池项目.Druid连接池为监控而生,内置强大的监控功能,监控特性不影响性能.功能强大,能防SQL注入,内置Login ...

  9. Spring Boot (四): Druid 连接池密码加密与监控

    在上一篇文章<Spring Boot (三): ORM 框架 JPA 与连接池 Hikari> 我们介绍了 JPA 与连接池 Hikari 的整合使用,在国内使用比较多的连接池还有一个是阿 ...

随机推荐

  1. xtarbackup 简单恢复

    xtrbackup Xtrabackup安装 #下载epel源 wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/ep ...

  2. IDEA 使用教程(破解2019.1.1)

    2019-08-02更新 最新破解方法: ZKVVPH4MIO-eyJsaWNlbnNlSWQiOiJaS1ZWUEg0TUlPIiwibGljZW5zZWVOYW1lIjoi5o6I5p2D5Luj ...

  3. java读取excel的内容(可保存到数据库中)

    //** poi jar包 // public class ReadExcel { @SuppressWarnings("static-access") private stati ...

  4. java基础(9)---静态方法和成员方法

    一.方法:  方法的区别: 静态方法:有static方法 成员方法:没有static方法 方法的定义:  方法的调用:类.静态方法,对象.成员方法 一个MyClass类包含静态方法和成员方法: 静态方 ...

  5. MySQL 查询排除指定字段、自定义变量、动态执行SQL

    今天在项目中,要查询一个表.这个表中有几十个字段.但是要把其中的一个特殊处理. 这个该怎么办呢?查来查去,SQL 中没有排除某一些字段的语句,只能单独写一些语句来处理: 基本思路:对于MySQL数据库 ...

  6. 《少年先疯队》第八次团队作业:Alpha冲刺第一天

    前言   第一天冲刺会议   时间:2019.6.14   地点:9C406 1.1 今日完成任务情况以及遇到的问题.   1.1.1今日完成任务情况 姚玉婷:管理员登录功能的实现,用户登录功能的实现 ...

  7. vue 中数据共享的方式

    1.父子组件的数据传递2.store模式 - 局部的数据共享3.vuex 中共享 state - 全局的数据共享

  8. tomcat绑定项目classes路径

    在Host中加入如下内容: <Context path="" docBase="D:\svn\MainSource\WebRoot" debug=&quo ...

  9. CQOI2016 不同的最小割 (最小割树模板)(等价流树的Gusfield构造算法)

    题目 最小割树模板 算法详解及证明见: 2016年国家队候选队员论文 <浅谈无向图最小割问题的一些算法及应用--绍兴一中 王文涛> 3.2节 CODE #include <bits/ ...

  10. fread和fwrite和feof读写二进制文件

    #include <stdio.h> #include <stdlib.h> void text_to_bin(char *argv[]); void bin_to_text( ...