阿里druid连接池监控数据自定义存储
如何将druid连接池监控到的sql执行效率,连接池资源情况等进行持久化存储,方便系统运维分析优化,以下案例初步测试成功。
第一部:
新建MyDruidStatLogger类实现接口 extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger,详细代码如下:本实例只实现接收消息并在控制台打印,实际业务应用需要具体实现存储方案。
package xxx; import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG;
public MyDruidStatLogger(){
this.configFromProperties(System.getProperties());
}
@Override
public void configFromProperties(Properties properties) {
String property = properties.getProperty("druid.stat.loggerName");
if (property != null && property.length() > 0) {
setLoggerName(property);
}
}
public Log getLogger() {
return logger;
} @Override
public void setLoggerName(String loggerName) {
logger = LogFactory.getLog(loggerName);
} @Override
public void setLogger(Log logger) {
if (logger == null) {
throw new IllegalArgumentException("logger can not be null");
}
this.logger = logger;
} public boolean isLogEnable() {
return true;
} public void log(String value) {
logger.info(value);
}
@Override
public void log(DruidDataSourceStatValue statValue) {
Map<String, Object> map = new LinkedHashMap<String, Object>(); map.put("url", statValue.getUrl());
map.put("dbType", statValue.getDbType());
map.put("name", statValue.getName());
map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) {
map.put("activePeak", statValue.getActivePeak());
map.put("activePeakTime", statValue.getActivePeakTime());
}
map.put("poolingCount", statValue.getPoolingCount());
if (statValue.getPoolingPeak() > 0) {
map.put("poolingPeak", statValue.getPoolingPeak());
map.put("poolingPeakTime", statValue.getPoolingPeakTime());
}
map.put("connectCount", statValue.getConnectCount());
map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) {
map.put("waitThreadCount", statValue.getWaitThreadCount());
} if (statValue.getNotEmptyWaitCount() > 0) {
map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
} if (statValue.getNotEmptyWaitMillis() > 0) {
map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
} if (statValue.getLogicConnectErrorCount() > 0) {
map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
} if (statValue.getPhysicalConnectCount() > 0) {
map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
} if (statValue.getPhysicalCloseCount() > 0) {
map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
} if (statValue.getPhysicalConnectErrorCount() > 0) {
map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
} if (statValue.getExecuteCount() > 0) {
map.put("executeCount", statValue.getExecuteCount());
} if (statValue.getErrorCount() > 0) {
map.put("errorCount", statValue.getErrorCount());
} if (statValue.getCommitCount() > 0) {
map.put("commitCount", statValue.getCommitCount());
} if (statValue.getRollbackCount() > 0) {
map.put("rollbackCount", statValue.getRollbackCount());
} if (statValue.getPstmtCacheHitCount() > 0) {
map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
} if (statValue.getPstmtCacheMissCount() > 0) {
map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
} if (statValue.getStartTransactionCount() > 0) {
map.put("startTransactionCount", statValue.getStartTransactionCount());
map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
} if (statValue.getConnectCount() > 0) {
map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
} if (statValue.getClobOpenCount() > 0) {
map.put("clobOpenCount", statValue.getClobOpenCount());
} if (statValue.getBlobOpenCount() > 0) {
map.put("blobOpenCount", statValue.getBlobOpenCount());
} if (statValue.getSqlSkipCount() > 0) {
map.put("sqlSkipCount", statValue.getSqlSkipCount());
} ArrayList<Map<String, Object>> sqlList = new ArrayList<Map<String, Object>>();
if (statValue.getSqlList().size() > 0) {
for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
Map<String, Object> sqlStatMap = new LinkedHashMap<String, Object>();
sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) {
sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
} long executeErrorCount = sqlStat.getExecuteErrorCount();
if (executeErrorCount > 0) {
sqlStatMap.put("executeErrorCount", executeErrorCount);
} int runningCount = sqlStat.getRunningCount();
if (runningCount > 0) {
sqlStatMap.put("runningCount", runningCount);
} int concurrentMax = sqlStat.getConcurrentMax();
if (concurrentMax > 0) {
sqlStatMap.put("concurrentMax", concurrentMax);
} if (sqlStat.getFetchRowCount() > 0) {
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax());
sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
} if (sqlStat.getUpdateCount() > 0) {
sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
} if (sqlStat.getInTransactionCount() > 0) {
sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
} if (sqlStat.getClobOpenCount() > 0) {
sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
} if (sqlStat.getBlobOpenCount() > 0) {
sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
} sqlList.add(sqlStatMap);
} map.put("sqlList", sqlList);
} if (statValue.getKeepAliveCheckCount() > 0) {
map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
} String text = JSONUtils.toJSONString(map);
System.out.println("==============:"+text);
}
}
第二步:配置spring bean
<bean id="myStatLogger" class="com.andaily.web.context.MyDruidStatLogger"> </bean> <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">
<!-- 基本属性 url、user、password -->
<property name="url" value="${jdbc.url}" />
<property name="username" value="${jdbc.username}" />
<property name="password" value="${jdbc.password}" />
<!-- 配置初始化大小、最小、最大 -->
<property name="initialSize" value="1" />
<property name="minIdle" value="1" />
<property name="maxActive" value="20" />
<!-- 配置获取连接等待超时的时间 -->
<property name="maxWait" value="60000" />
<!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 -->
<property name="timeBetweenEvictionRunsMillis" value="60000" />
<!-- 配置一个连接在池中最小生存的时间,单位是毫秒 -->
<property name="minEvictableIdleTimeMillis" value="300000" />
<property name="validationQuery" value="SELECT 'x'" />
<property name="testWhileIdle" value="true" />
<property name="testOnBorrow" value="false" />
<property name="testOnReturn" value="false" />
<!-- 打开PSCache,并且指定每个连接上PSCache的大小 -->
<property name="poolPreparedStatements" value="true" />
<property name="maxPoolPreparedStatementPerConnectionSize" value="20" />
<!-- 配置监控统计拦截的filters,去掉后监控界面sql无法统计 -->
<property name="filters" value="stat" />
<property name="timeBetweenLogStatsMillis" value="1000" />
<property name="statLogger" ref="myStatLogger"/>
</bean>
启动后就可以看到控制台打印的durid监控信息了。
package com.andaily.web.context; import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG;
public MyDruidStatLogger(){
this.configFromProperties(System.getProperties());
}
@Override
public void configFromProperties(Properties properties) {
String property = properties.getProperty("druid.stat.loggerName");
if (property != null && property.length() > 0) {
setLoggerName(property);
}
}
public Log getLogger() {
return logger;
} @Override
public void setLoggerName(String loggerName) {
logger = LogFactory.getLog(loggerName);
} @Override
public void setLogger(Log logger) {
if (logger == null) {
throw new IllegalArgumentException("logger can not be null");
}
this.logger = logger;
} public boolean isLogEnable() {
return true;
} public void log(String value) {
logger.info(value);
}
@Override
public void log(DruidDataSourceStatValue statValue) {
Map map = new LinkedHashMap(); map.put("url", statValue.getUrl());
map.put("dbType", statValue.getDbType());
map.put("name", statValue.getName());
map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) {
map.put("activePeak", statValue.getActivePeak());
map.put("activePeakTime", statValue.getActivePeakTime());
}
map.put("poolingCount", statValue.getPoolingCount());
if (statValue.getPoolingPeak() > 0) {
map.put("poolingPeak", statValue.getPoolingPeak());
map.put("poolingPeakTime", statValue.getPoolingPeakTime());
}
map.put("connectCount", statValue.getConnectCount());
map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) {
map.put("waitThreadCount", statValue.getWaitThreadCount());
} if (statValue.getNotEmptyWaitCount() > 0) {
map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
} if (statValue.getNotEmptyWaitMillis() > 0) {
map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
} if (statValue.getLogicConnectErrorCount() > 0) {
map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
} if (statValue.getPhysicalConnectCount() > 0) {
map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
} if (statValue.getPhysicalCloseCount() > 0) {
map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
} if (statValue.getPhysicalConnectErrorCount() > 0) {
map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
} if (statValue.getExecuteCount() > 0) {
map.put("executeCount", statValue.getExecuteCount());
} if (statValue.getErrorCount() > 0) {
map.put("errorCount", statValue.getErrorCount());
} if (statValue.getCommitCount() > 0) {
map.put("commitCount", statValue.getCommitCount());
} if (statValue.getRollbackCount() > 0) {
map.put("rollbackCount", statValue.getRollbackCount());
} if (statValue.getPstmtCacheHitCount() > 0) {
map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
} if (statValue.getPstmtCacheMissCount() > 0) {
map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
} if (statValue.getStartTransactionCount() > 0) {
map.put("startTransactionCount", statValue.getStartTransactionCount());
map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
} if (statValue.getConnectCount() > 0) {
map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
} if (statValue.getClobOpenCount() > 0) {
map.put("clobOpenCount", statValue.getClobOpenCount());
} if (statValue.getBlobOpenCount() > 0) {
map.put("blobOpenCount", statValue.getBlobOpenCount());
} if (statValue.getSqlSkipCount() > 0) {
map.put("sqlSkipCount", statValue.getSqlSkipCount());
} ArrayList> sqlList = new ArrayList>();
if (statValue.getSqlList().size() > 0) {
for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
Map sqlStatMap = new LinkedHashMap();
sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) {
sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
} long executeErrorCount = sqlStat.getExecuteErrorCount();
if (executeErrorCount > 0) {
sqlStatMap.put("executeErrorCount", executeErrorCount);
} int runningCount = sqlStat.getRunningCount();
if (runningCount > 0) {
sqlStatMap.put("runningCount", runningCount);
} int concurrentMax = sqlStat.getConcurrentMax();
if (concurrentMax > 0) {
sqlStatMap.put("concurrentMax", concurrentMax);
} if (sqlStat.getFetchRowCount() > 0) {
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax());
sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
} if (sqlStat.getUpdateCount() > 0) {
sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
} if (sqlStat.getInTransactionCount() > 0) {
sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
} if (sqlStat.getClobOpenCount() > 0) {
sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
} if (sqlStat.getBlobOpenCount() > 0) {
sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
} sqlList.add(sqlStatMap);
} map.put("sqlList", sqlList);
} if (statValue.getKeepAliveCheckCount() > 0) {
map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
} String text = JSONUtils.toJSONString(map);
System.out.println("==============:"+text);
}
}
阿里druid连接池监控数据自定义存储的更多相关文章
- 注意:阿里Druid连接池监控的两个坑
阿里的Druid大家都知道是最好的连接池,其强大的监控功能是我们追求的重要特性.但在实际情况中也有不少坑,说下最近遇到的一个坑吧! 问题1:不断打印error级别的错误日志 session ip ch ...
- 阿里druid连接池监控配置
首先在web.xml中添加如下配置: <filter> <filter-name>DruidWebStatFilter</filter-name> <filt ...
- 阿里druid连接池
1.加入jar包, 下载地址:druid-1.1.0.zip 2.ApplicationContext.xml <!-- druid阿里云连接池 --> <bean name=&qu ...
- Mybatis 搭配 阿里druid连接池 连接 oracle 或 mysql
DRUID介绍 DRUID是阿里巴巴开源平台上一个数据库连接池实现,它结合了C3P0.DBCP.PROXOOL等DB池的优点,同时加入了日志监控,可以很好的监控DB池连接和SQL的执行情况,可以说是针 ...
- 结合 spring 使用阿里 Druid 连接池配置方法
1.数据源 <!-- 配置数据源 --> <bean name="dataSource" class="com.alibaba.druid.pool.D ...
- 阿里Druid连接池的坑。。
Druid的坑 当查询数据库的Clob转换为Oracle Clob类型的时候. java.lang.ClassCastException: com.alibaba.druid.proxy.jdbc.C ...
- Druid连接池
Druid 连接池简介 Druid首先是一个数据库连接池.Druid是目前最好的数据库连接池,在功能.性能.扩展性方面,都超过其他数据库连接池,包括DBCP.C3P0.BoneCP.Proxool.J ...
- SpringBoot2.0 基础案例(07):集成Druid连接池,配置监控界面
一.Druid连接池 1.druid简介 Druid连接池是阿里巴巴开源的数据库连接池项目.Druid连接池为监控而生,内置强大的监控功能,监控特性不影响性能.功能强大,能防SQL注入,内置Login ...
- Spring Boot (四): Druid 连接池密码加密与监控
在上一篇文章<Spring Boot (三): ORM 框架 JPA 与连接池 Hikari> 我们介绍了 JPA 与连接池 Hikari 的整合使用,在国内使用比较多的连接池还有一个是阿 ...
随机推荐
- c# 常见文件操作
- ansible之基础篇(一)
ansible简介 ansible是新出现的自动化运维工具,基于Python开发,集合了众多运维工具(puppet.cfengine.chef.func.fabric)的优点,实现了批量系统配置.批量 ...
- http通信示例Httpclient和HttpServer
本示例源于为朋友解决一个小问题,数据库到服务器的数据传输,由于本人能力有限,暂时将它理解为从数据库中获取数取表数据,实际上有可能是文件或者其他形式的数据,不过原理都得用流传输, 首先httpclien ...
- 优先级队列(python)
# -*- coding:utf-8 -*- class Array(object): def __init__(self, size=32): self._size = size self._ite ...
- 分布式调度平台XXL-JOB源码分析-执行器端
上一篇文章已经说到调度中心端如何进行任务管理及调度,本文将分析执行器端是如何接收到任务调度请求,然后执行业务代码的. XxlJobExecutorApplication为我们执行器的启动项,其中有个X ...
- 《你说对就队》第七次作业:团队项目设计完善&编码
<你说对就队>第七次作业:团队项目设计完善&编码 项目 内容 这个作业属于哪个课程 [教师博客主页链接] 这个作业的要求在哪里 [作业链接地址] 团队名称 <你说对就队> ...
- 行为型模式(八) 职责链模式(Chain of Responsibility)
一.动机(Motivate) 在软件构建过程中,一个请求可能被多个对象处理,但是每个请求在运行时只能有一个接受者,如果显示指定,将必不可少地带来请求发送者与接受者的紧耦合.如何使请求的发送者不需要指定 ...
- python字符串、字符串处理函数及字符串相关操作
python字符串.字符串处理函数及字符串相关操作 字符串介绍 python字符串表示 Python除处理数字外还可以处理字符串,字符串用单撇号或双撇号包裹: >>> 'spam e ...
- SIGAI机器学习第二十三集 高斯混合模型与EM算法
讲授高斯混合模型的基本概念,训练算法面临的问题,EM算法的核心思想,算法的实现,实际应用. 大纲: 高斯混合模型简介实际例子训练算法面临的困难EM算法应用-视频背景建模总结 高斯混合模型简写GMM,期 ...
- winform DateTimePicker 设置成秒
C# Windows窗体应用中,用到时间选择控件DateTimePicker,发现不能选择时分秒,难道要自己写一个控件?! 答案是否定的,通过属性修改是可以选择时间的,DateTimePicker完全 ...