如何将druid连接池监控到的sql执行效率,连接池资源情况等进行持久化存储,方便系统运维分析优化,以下案例初步测试成功。

第一部:

新建MyDruidStatLogger类实现接口 extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger,详细代码如下:本实例只实现接收消息并在控制台打印,实际业务应用需要具体实现存储方案。

package xxx;

import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG;
public MyDruidStatLogger(){
this.configFromProperties(System.getProperties());
}
@Override
public void configFromProperties(Properties properties) {
String property = properties.getProperty("druid.stat.loggerName");
if (property != null && property.length() > 0) {
setLoggerName(property);
}
}
public Log getLogger() {
return logger;
} @Override
public void setLoggerName(String loggerName) {
logger = LogFactory.getLog(loggerName);
} @Override
public void setLogger(Log logger) {
if (logger == null) {
throw new IllegalArgumentException("logger can not be null");
}
this.logger = logger;
} public boolean isLogEnable() {
return true;
} public void log(String value) {
logger.info(value);
}
@Override
public void log(DruidDataSourceStatValue statValue) {
Map<String, Object> map = new LinkedHashMap<String, Object>(); map.put("url", statValue.getUrl());
map.put("dbType", statValue.getDbType());
map.put("name", statValue.getName());
map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) {
map.put("activePeak", statValue.getActivePeak());
map.put("activePeakTime", statValue.getActivePeakTime());
}
map.put("poolingCount", statValue.getPoolingCount());
if (statValue.getPoolingPeak() > 0) {
map.put("poolingPeak", statValue.getPoolingPeak());
map.put("poolingPeakTime", statValue.getPoolingPeakTime());
}
map.put("connectCount", statValue.getConnectCount());
map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) {
map.put("waitThreadCount", statValue.getWaitThreadCount());
} if (statValue.getNotEmptyWaitCount() > 0) {
map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
} if (statValue.getNotEmptyWaitMillis() > 0) {
map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
} if (statValue.getLogicConnectErrorCount() > 0) {
map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
} if (statValue.getPhysicalConnectCount() > 0) {
map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
} if (statValue.getPhysicalCloseCount() > 0) {
map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
} if (statValue.getPhysicalConnectErrorCount() > 0) {
map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
} if (statValue.getExecuteCount() > 0) {
map.put("executeCount", statValue.getExecuteCount());
} if (statValue.getErrorCount() > 0) {
map.put("errorCount", statValue.getErrorCount());
} if (statValue.getCommitCount() > 0) {
map.put("commitCount", statValue.getCommitCount());
} if (statValue.getRollbackCount() > 0) {
map.put("rollbackCount", statValue.getRollbackCount());
} if (statValue.getPstmtCacheHitCount() > 0) {
map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
} if (statValue.getPstmtCacheMissCount() > 0) {
map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
} if (statValue.getStartTransactionCount() > 0) {
map.put("startTransactionCount", statValue.getStartTransactionCount());
map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
} if (statValue.getConnectCount() > 0) {
map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
} if (statValue.getClobOpenCount() > 0) {
map.put("clobOpenCount", statValue.getClobOpenCount());
} if (statValue.getBlobOpenCount() > 0) {
map.put("blobOpenCount", statValue.getBlobOpenCount());
} if (statValue.getSqlSkipCount() > 0) {
map.put("sqlSkipCount", statValue.getSqlSkipCount());
} ArrayList<Map<String, Object>> sqlList = new ArrayList<Map<String, Object>>();
if (statValue.getSqlList().size() > 0) {
for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
Map<String, Object> sqlStatMap = new LinkedHashMap<String, Object>();
sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) {
sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
} long executeErrorCount = sqlStat.getExecuteErrorCount();
if (executeErrorCount > 0) {
sqlStatMap.put("executeErrorCount", executeErrorCount);
} int runningCount = sqlStat.getRunningCount();
if (runningCount > 0) {
sqlStatMap.put("runningCount", runningCount);
} int concurrentMax = sqlStat.getConcurrentMax();
if (concurrentMax > 0) {
sqlStatMap.put("concurrentMax", concurrentMax);
} if (sqlStat.getFetchRowCount() > 0) {
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax());
sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
} if (sqlStat.getUpdateCount() > 0) {
sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
} if (sqlStat.getInTransactionCount() > 0) {
sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
} if (sqlStat.getClobOpenCount() > 0) {
sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
} if (sqlStat.getBlobOpenCount() > 0) {
sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
} sqlList.add(sqlStatMap);
} map.put("sqlList", sqlList);
} if (statValue.getKeepAliveCheckCount() > 0) {
map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
} String text = JSONUtils.toJSONString(map);
System.out.println("==============:"+text);
}
}

第二步:配置spring bean

<bean id="myStatLogger" class="com.andaily.web.context.MyDruidStatLogger"> </bean>

<bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">   
   <!-- 基本属性 url、user、password -->  
   <property name="url" value="${jdbc.url}" />  
   <property name="username" value="${jdbc.username}" />  
   <property name="password" value="${jdbc.password}" />  
       
   <!-- 配置初始化大小、最小、最大 -->  
   <property name="initialSize" value="1" />  
   <property name="minIdle" value="1" />   
   <property name="maxActive" value="20" />  
  
   <!-- 配置获取连接等待超时的时间 -->  
   <property name="maxWait" value="60000" />  
  
   <!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 -->  
   <property name="timeBetweenEvictionRunsMillis" value="60000" />  
   <!-- 配置一个连接在池中最小生存的时间,单位是毫秒 -->  
   <property name="minEvictableIdleTimeMillis" value="300000" />  
   <property name="validationQuery" value="SELECT 'x'" />  
   <property name="testWhileIdle" value="true" />  
   <property name="testOnBorrow" value="false" />  
   <property name="testOnReturn" value="false" />  
   <!-- 打开PSCache,并且指定每个连接上PSCache的大小 -->  
   <property name="poolPreparedStatements" value="true" />  
   <property name="maxPoolPreparedStatementPerConnectionSize" value="20" />  
  
   <!-- 配置监控统计拦截的filters,去掉后监控界面sql无法统计 -->  
   <property name="filters" value="stat" />   
   <property name="timeBetweenLogStatsMillis" value="1000" />
   <property name="statLogger" ref="myStatLogger"/>
</bean>

启动后就可以看到控制台打印的durid监控信息了。

package com.andaily.web.context;

import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG;
public MyDruidStatLogger(){
this.configFromProperties(System.getProperties());
}
@Override
public void configFromProperties(Properties properties) {
String property = properties.getProperty("druid.stat.loggerName");
if (property != null && property.length() > 0) {
setLoggerName(property);
}
}
public Log getLogger() {
return logger;
} @Override
public void setLoggerName(String loggerName) {
logger = LogFactory.getLog(loggerName);
} @Override
public void setLogger(Log logger) {
if (logger == null) {
throw new IllegalArgumentException("logger can not be null");
}
this.logger = logger;
} public boolean isLogEnable() {
return true;
} public void log(String value) {
logger.info(value);
}
@Override
public void log(DruidDataSourceStatValue statValue) {
Map map = new LinkedHashMap(); map.put("url", statValue.getUrl());
map.put("dbType", statValue.getDbType());
map.put("name", statValue.getName());
map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) {
map.put("activePeak", statValue.getActivePeak());
map.put("activePeakTime", statValue.getActivePeakTime());
}
map.put("poolingCount", statValue.getPoolingCount());
if (statValue.getPoolingPeak() > 0) {
map.put("poolingPeak", statValue.getPoolingPeak());
map.put("poolingPeakTime", statValue.getPoolingPeakTime());
}
map.put("connectCount", statValue.getConnectCount());
map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) {
map.put("waitThreadCount", statValue.getWaitThreadCount());
} if (statValue.getNotEmptyWaitCount() > 0) {
map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
} if (statValue.getNotEmptyWaitMillis() > 0) {
map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
} if (statValue.getLogicConnectErrorCount() > 0) {
map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
} if (statValue.getPhysicalConnectCount() > 0) {
map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
} if (statValue.getPhysicalCloseCount() > 0) {
map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
} if (statValue.getPhysicalConnectErrorCount() > 0) {
map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
} if (statValue.getExecuteCount() > 0) {
map.put("executeCount", statValue.getExecuteCount());
} if (statValue.getErrorCount() > 0) {
map.put("errorCount", statValue.getErrorCount());
} if (statValue.getCommitCount() > 0) {
map.put("commitCount", statValue.getCommitCount());
} if (statValue.getRollbackCount() > 0) {
map.put("rollbackCount", statValue.getRollbackCount());
} if (statValue.getPstmtCacheHitCount() > 0) {
map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
} if (statValue.getPstmtCacheMissCount() > 0) {
map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
} if (statValue.getStartTransactionCount() > 0) {
map.put("startTransactionCount", statValue.getStartTransactionCount());
map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
} if (statValue.getConnectCount() > 0) {
map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
} if (statValue.getClobOpenCount() > 0) {
map.put("clobOpenCount", statValue.getClobOpenCount());
} if (statValue.getBlobOpenCount() > 0) {
map.put("blobOpenCount", statValue.getBlobOpenCount());
} if (statValue.getSqlSkipCount() > 0) {
map.put("sqlSkipCount", statValue.getSqlSkipCount());
} ArrayList> sqlList = new ArrayList>();
if (statValue.getSqlList().size() > 0) {
for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
Map sqlStatMap = new LinkedHashMap();
sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) {
sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
} long executeErrorCount = sqlStat.getExecuteErrorCount();
if (executeErrorCount > 0) {
sqlStatMap.put("executeErrorCount", executeErrorCount);
} int runningCount = sqlStat.getRunningCount();
if (runningCount > 0) {
sqlStatMap.put("runningCount", runningCount);
} int concurrentMax = sqlStat.getConcurrentMax();
if (concurrentMax > 0) {
sqlStatMap.put("concurrentMax", concurrentMax);
} if (sqlStat.getFetchRowCount() > 0) {
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax());
sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
} if (sqlStat.getUpdateCount() > 0) {
sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
} if (sqlStat.getInTransactionCount() > 0) {
sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
} if (sqlStat.getClobOpenCount() > 0) {
sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
} if (sqlStat.getBlobOpenCount() > 0) {
sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
} sqlList.add(sqlStatMap);
} map.put("sqlList", sqlList);
} if (statValue.getKeepAliveCheckCount() > 0) {
map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
} String text = JSONUtils.toJSONString(map);
System.out.println("==============:"+text);
}
}

阿里druid连接池监控数据自定义存储的更多相关文章

  1. 注意:阿里Druid连接池监控的两个坑

    阿里的Druid大家都知道是最好的连接池,其强大的监控功能是我们追求的重要特性.但在实际情况中也有不少坑,说下最近遇到的一个坑吧! 问题1:不断打印error级别的错误日志 session ip ch ...

  2. 阿里druid连接池监控配置

    首先在web.xml中添加如下配置: <filter> <filter-name>DruidWebStatFilter</filter-name> <filt ...

  3. 阿里druid连接池

    1.加入jar包, 下载地址:druid-1.1.0.zip 2.ApplicationContext.xml <!-- druid阿里云连接池 --> <bean name=&qu ...

  4. Mybatis 搭配 阿里druid连接池 连接 oracle 或 mysql

    DRUID介绍 DRUID是阿里巴巴开源平台上一个数据库连接池实现,它结合了C3P0.DBCP.PROXOOL等DB池的优点,同时加入了日志监控,可以很好的监控DB池连接和SQL的执行情况,可以说是针 ...

  5. 结合 spring 使用阿里 Druid 连接池配置方法

    1.数据源 <!-- 配置数据源 --> <bean name="dataSource" class="com.alibaba.druid.pool.D ...

  6. 阿里Druid连接池的坑。。

    Druid的坑 当查询数据库的Clob转换为Oracle Clob类型的时候. java.lang.ClassCastException: com.alibaba.druid.proxy.jdbc.C ...

  7. Druid连接池

    Druid 连接池简介 Druid首先是一个数据库连接池.Druid是目前最好的数据库连接池,在功能.性能.扩展性方面,都超过其他数据库连接池,包括DBCP.C3P0.BoneCP.Proxool.J ...

  8. SpringBoot2.0 基础案例(07):集成Druid连接池,配置监控界面

    一.Druid连接池 1.druid简介 Druid连接池是阿里巴巴开源的数据库连接池项目.Druid连接池为监控而生,内置强大的监控功能,监控特性不影响性能.功能强大,能防SQL注入,内置Login ...

  9. Spring Boot (四): Druid 连接池密码加密与监控

    在上一篇文章<Spring Boot (三): ORM 框架 JPA 与连接池 Hikari> 我们介绍了 JPA 与连接池 Hikari 的整合使用,在国内使用比较多的连接池还有一个是阿 ...

随机推荐

  1. 基于yum的方式安装Cloudera Manager Agent(使用Mysql 8.0版本)

    基于yum的方式安装Cloudera Manager Agent(使用Mysql 8.0版本) 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任.  一.选择CDH版本 1>.确认 ...

  2. C++(四十二) — 函数模板多态

     1.函数模板(参数多态) 相当于一个函数发生器,参数多态,可以重载. 普通函数和模板函数的本质区别: 普通函数的调用,可以进行隐式的类型转换: 函数模板的调用,使用类型参数化,严格按照类型进行匹配, ...

  3. node-images 进行图片压缩

    前置条件:先安装images npm install images 编写代码 思路: 从指定文件夹遍历图片,执行压缩,压缩完成后放到指定文件夹中,并保持图片名无变化. var images = req ...

  4. P2261 [CQOI2007]余数求和[整除分块]

    题目大意 给出正整数 n 和 k 计算 \(G(n, k)=k\ \bmod\ 1 + k\ \bmod\ 2 + k\ \bmod\ 3 + \cdots + k\ \bmod\ n\) 的值 其中 ...

  5. P2279 [HNOI2003]消防局的设立[树形dp]

    题目描述 2020年,人类在火星上建立了一个庞大的基地群,总共有n个基地.起初为了节约材料,人类只修建了n-1条道路来连接这些基地,并且每两个基地都能够通过道路到达,所以所有的基地形成了一个巨大的树状 ...

  6. 02 c++的封装性 (构造和析构)

    封装性: 关键字:public private protected 破坏封装:友元函数 friend 实现数据的隐藏:class类 默认是私有,结构体默认是公有. 类和对象:(定义类的注意事项) 在类 ...

  7. Python语言程序设计(3)--数字类型及操作--实例3-天天向上的力量

    1.整数 2.浮点数 3.复数 4.数值运算操作符 5.数值运算函数 5.天天向上的力量:实例

  8. 【Python学习】Python3 基本数据类型

    参考学习地址:https://www.runoob.com/python3/python3-data-type.html Python3 基本数据类型 Python 中的变量不需要声明.每个变量在使用 ...

  9. [RxJS] Groupby operator

    The use case is similar to Twitter "like" button, you can click "click" button o ...

  10. FailOver的机制

    package util import ( "fmt" "hash/crc32" "math/rand" "sort" ...