阿里druid连接池监控数据自定义存储
如何将druid连接池监控到的sql执行效率,连接池资源情况等进行持久化存储,方便系统运维分析优化,以下案例初步测试成功。
第一部:
新建MyDruidStatLogger类实现接口 extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger,详细代码如下:本实例只实现接收消息并在控制台打印,实际业务应用需要具体实现存储方案。
package xxx; import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG;
public MyDruidStatLogger(){
this.configFromProperties(System.getProperties());
}
@Override
public void configFromProperties(Properties properties) {
String property = properties.getProperty("druid.stat.loggerName");
if (property != null && property.length() > 0) {
setLoggerName(property);
}
}
public Log getLogger() {
return logger;
} @Override
public void setLoggerName(String loggerName) {
logger = LogFactory.getLog(loggerName);
} @Override
public void setLogger(Log logger) {
if (logger == null) {
throw new IllegalArgumentException("logger can not be null");
}
this.logger = logger;
} public boolean isLogEnable() {
return true;
} public void log(String value) {
logger.info(value);
}
@Override
public void log(DruidDataSourceStatValue statValue) {
Map<String, Object> map = new LinkedHashMap<String, Object>(); map.put("url", statValue.getUrl());
map.put("dbType", statValue.getDbType());
map.put("name", statValue.getName());
map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) {
map.put("activePeak", statValue.getActivePeak());
map.put("activePeakTime", statValue.getActivePeakTime());
}
map.put("poolingCount", statValue.getPoolingCount());
if (statValue.getPoolingPeak() > 0) {
map.put("poolingPeak", statValue.getPoolingPeak());
map.put("poolingPeakTime", statValue.getPoolingPeakTime());
}
map.put("connectCount", statValue.getConnectCount());
map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) {
map.put("waitThreadCount", statValue.getWaitThreadCount());
} if (statValue.getNotEmptyWaitCount() > 0) {
map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
} if (statValue.getNotEmptyWaitMillis() > 0) {
map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
} if (statValue.getLogicConnectErrorCount() > 0) {
map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
} if (statValue.getPhysicalConnectCount() > 0) {
map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
} if (statValue.getPhysicalCloseCount() > 0) {
map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
} if (statValue.getPhysicalConnectErrorCount() > 0) {
map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
} if (statValue.getExecuteCount() > 0) {
map.put("executeCount", statValue.getExecuteCount());
} if (statValue.getErrorCount() > 0) {
map.put("errorCount", statValue.getErrorCount());
} if (statValue.getCommitCount() > 0) {
map.put("commitCount", statValue.getCommitCount());
} if (statValue.getRollbackCount() > 0) {
map.put("rollbackCount", statValue.getRollbackCount());
} if (statValue.getPstmtCacheHitCount() > 0) {
map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
} if (statValue.getPstmtCacheMissCount() > 0) {
map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
} if (statValue.getStartTransactionCount() > 0) {
map.put("startTransactionCount", statValue.getStartTransactionCount());
map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
} if (statValue.getConnectCount() > 0) {
map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
} if (statValue.getClobOpenCount() > 0) {
map.put("clobOpenCount", statValue.getClobOpenCount());
} if (statValue.getBlobOpenCount() > 0) {
map.put("blobOpenCount", statValue.getBlobOpenCount());
} if (statValue.getSqlSkipCount() > 0) {
map.put("sqlSkipCount", statValue.getSqlSkipCount());
} ArrayList<Map<String, Object>> sqlList = new ArrayList<Map<String, Object>>();
if (statValue.getSqlList().size() > 0) {
for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
Map<String, Object> sqlStatMap = new LinkedHashMap<String, Object>();
sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) {
sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
} long executeErrorCount = sqlStat.getExecuteErrorCount();
if (executeErrorCount > 0) {
sqlStatMap.put("executeErrorCount", executeErrorCount);
} int runningCount = sqlStat.getRunningCount();
if (runningCount > 0) {
sqlStatMap.put("runningCount", runningCount);
} int concurrentMax = sqlStat.getConcurrentMax();
if (concurrentMax > 0) {
sqlStatMap.put("concurrentMax", concurrentMax);
} if (sqlStat.getFetchRowCount() > 0) {
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax());
sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
} if (sqlStat.getUpdateCount() > 0) {
sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
} if (sqlStat.getInTransactionCount() > 0) {
sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
} if (sqlStat.getClobOpenCount() > 0) {
sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
} if (sqlStat.getBlobOpenCount() > 0) {
sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
} sqlList.add(sqlStatMap);
} map.put("sqlList", sqlList);
} if (statValue.getKeepAliveCheckCount() > 0) {
map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
} String text = JSONUtils.toJSONString(map);
System.out.println("==============:"+text);
}
}
第二步:配置spring bean
<bean id="myStatLogger" class="com.andaily.web.context.MyDruidStatLogger"> </bean> <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">
<!-- 基本属性 url、user、password -->
<property name="url" value="${jdbc.url}" />
<property name="username" value="${jdbc.username}" />
<property name="password" value="${jdbc.password}" />
<!-- 配置初始化大小、最小、最大 -->
<property name="initialSize" value="1" />
<property name="minIdle" value="1" />
<property name="maxActive" value="20" />
<!-- 配置获取连接等待超时的时间 -->
<property name="maxWait" value="60000" />
<!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 -->
<property name="timeBetweenEvictionRunsMillis" value="60000" />
<!-- 配置一个连接在池中最小生存的时间,单位是毫秒 -->
<property name="minEvictableIdleTimeMillis" value="300000" />
<property name="validationQuery" value="SELECT 'x'" />
<property name="testWhileIdle" value="true" />
<property name="testOnBorrow" value="false" />
<property name="testOnReturn" value="false" />
<!-- 打开PSCache,并且指定每个连接上PSCache的大小 -->
<property name="poolPreparedStatements" value="true" />
<property name="maxPoolPreparedStatementPerConnectionSize" value="20" />
<!-- 配置监控统计拦截的filters,去掉后监控界面sql无法统计 -->
<property name="filters" value="stat" />
<property name="timeBetweenLogStatsMillis" value="1000" />
<property name="statLogger" ref="myStatLogger"/>
</bean>
启动后就可以看到控制台打印的durid监控信息了。
package com.andaily.web.context; import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG;
public MyDruidStatLogger(){
this.configFromProperties(System.getProperties());
}
@Override
public void configFromProperties(Properties properties) {
String property = properties.getProperty("druid.stat.loggerName");
if (property != null && property.length() > 0) {
setLoggerName(property);
}
}
public Log getLogger() {
return logger;
} @Override
public void setLoggerName(String loggerName) {
logger = LogFactory.getLog(loggerName);
} @Override
public void setLogger(Log logger) {
if (logger == null) {
throw new IllegalArgumentException("logger can not be null");
}
this.logger = logger;
} public boolean isLogEnable() {
return true;
} public void log(String value) {
logger.info(value);
}
@Override
public void log(DruidDataSourceStatValue statValue) {
Map map = new LinkedHashMap(); map.put("url", statValue.getUrl());
map.put("dbType", statValue.getDbType());
map.put("name", statValue.getName());
map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) {
map.put("activePeak", statValue.getActivePeak());
map.put("activePeakTime", statValue.getActivePeakTime());
}
map.put("poolingCount", statValue.getPoolingCount());
if (statValue.getPoolingPeak() > 0) {
map.put("poolingPeak", statValue.getPoolingPeak());
map.put("poolingPeakTime", statValue.getPoolingPeakTime());
}
map.put("connectCount", statValue.getConnectCount());
map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) {
map.put("waitThreadCount", statValue.getWaitThreadCount());
} if (statValue.getNotEmptyWaitCount() > 0) {
map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
} if (statValue.getNotEmptyWaitMillis() > 0) {
map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
} if (statValue.getLogicConnectErrorCount() > 0) {
map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
} if (statValue.getPhysicalConnectCount() > 0) {
map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
} if (statValue.getPhysicalCloseCount() > 0) {
map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
} if (statValue.getPhysicalConnectErrorCount() > 0) {
map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
} if (statValue.getExecuteCount() > 0) {
map.put("executeCount", statValue.getExecuteCount());
} if (statValue.getErrorCount() > 0) {
map.put("errorCount", statValue.getErrorCount());
} if (statValue.getCommitCount() > 0) {
map.put("commitCount", statValue.getCommitCount());
} if (statValue.getRollbackCount() > 0) {
map.put("rollbackCount", statValue.getRollbackCount());
} if (statValue.getPstmtCacheHitCount() > 0) {
map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
} if (statValue.getPstmtCacheMissCount() > 0) {
map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
} if (statValue.getStartTransactionCount() > 0) {
map.put("startTransactionCount", statValue.getStartTransactionCount());
map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
} if (statValue.getConnectCount() > 0) {
map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
} if (statValue.getClobOpenCount() > 0) {
map.put("clobOpenCount", statValue.getClobOpenCount());
} if (statValue.getBlobOpenCount() > 0) {
map.put("blobOpenCount", statValue.getBlobOpenCount());
} if (statValue.getSqlSkipCount() > 0) {
map.put("sqlSkipCount", statValue.getSqlSkipCount());
} ArrayList> sqlList = new ArrayList>();
if (statValue.getSqlList().size() > 0) {
for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
Map sqlStatMap = new LinkedHashMap();
sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) {
sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
} long executeErrorCount = sqlStat.getExecuteErrorCount();
if (executeErrorCount > 0) {
sqlStatMap.put("executeErrorCount", executeErrorCount);
} int runningCount = sqlStat.getRunningCount();
if (runningCount > 0) {
sqlStatMap.put("runningCount", runningCount);
} int concurrentMax = sqlStat.getConcurrentMax();
if (concurrentMax > 0) {
sqlStatMap.put("concurrentMax", concurrentMax);
} if (sqlStat.getFetchRowCount() > 0) {
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax());
sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
} if (sqlStat.getUpdateCount() > 0) {
sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
} if (sqlStat.getInTransactionCount() > 0) {
sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
} if (sqlStat.getClobOpenCount() > 0) {
sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
} if (sqlStat.getBlobOpenCount() > 0) {
sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
} sqlList.add(sqlStatMap);
} map.put("sqlList", sqlList);
} if (statValue.getKeepAliveCheckCount() > 0) {
map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
} String text = JSONUtils.toJSONString(map);
System.out.println("==============:"+text);
}
}
阿里druid连接池监控数据自定义存储的更多相关文章
- 注意:阿里Druid连接池监控的两个坑
阿里的Druid大家都知道是最好的连接池,其强大的监控功能是我们追求的重要特性.但在实际情况中也有不少坑,说下最近遇到的一个坑吧! 问题1:不断打印error级别的错误日志 session ip ch ...
- 阿里druid连接池监控配置
首先在web.xml中添加如下配置: <filter> <filter-name>DruidWebStatFilter</filter-name> <filt ...
- 阿里druid连接池
1.加入jar包, 下载地址:druid-1.1.0.zip 2.ApplicationContext.xml <!-- druid阿里云连接池 --> <bean name=&qu ...
- Mybatis 搭配 阿里druid连接池 连接 oracle 或 mysql
DRUID介绍 DRUID是阿里巴巴开源平台上一个数据库连接池实现,它结合了C3P0.DBCP.PROXOOL等DB池的优点,同时加入了日志监控,可以很好的监控DB池连接和SQL的执行情况,可以说是针 ...
- 结合 spring 使用阿里 Druid 连接池配置方法
1.数据源 <!-- 配置数据源 --> <bean name="dataSource" class="com.alibaba.druid.pool.D ...
- 阿里Druid连接池的坑。。
Druid的坑 当查询数据库的Clob转换为Oracle Clob类型的时候. java.lang.ClassCastException: com.alibaba.druid.proxy.jdbc.C ...
- Druid连接池
Druid 连接池简介 Druid首先是一个数据库连接池.Druid是目前最好的数据库连接池,在功能.性能.扩展性方面,都超过其他数据库连接池,包括DBCP.C3P0.BoneCP.Proxool.J ...
- SpringBoot2.0 基础案例(07):集成Druid连接池,配置监控界面
一.Druid连接池 1.druid简介 Druid连接池是阿里巴巴开源的数据库连接池项目.Druid连接池为监控而生,内置强大的监控功能,监控特性不影响性能.功能强大,能防SQL注入,内置Login ...
- Spring Boot (四): Druid 连接池密码加密与监控
在上一篇文章<Spring Boot (三): ORM 框架 JPA 与连接池 Hikari> 我们介绍了 JPA 与连接池 Hikari 的整合使用,在国内使用比较多的连接池还有一个是阿 ...
随机推荐
- zabbix的psk加密结合zabbix_get取值
转载:https://www.xj123.info/7386.html 参考文档:https://www.zabbix.com/documentation/3.0/manpages/zabbix_ge ...
- java写入内容到本地文件 -读取文件内容
/** 日志记录 * @author sys * @param content 要写入的类容 * @param path 目标路径 c:/log/ * @param filename 文件名 log. ...
- SpringCloud学习心得—1.2—Eureka注册中心的密码认证、高可用的设置
SpringCloud学习心得—1.2—Eureka注册中心的密码认证.高可用的设置 这是相关代码 链接 Eureka开启密码配置 添加依赖 <dependency> <grou ...
- 大规模异常滥用检测:基于局部敏感哈希算法——来自Uber Engineering的实践
uber全球用户每天会产生500万条行程,保证数据的准确性至关重要.如果所有的数据都得到有效利用,t通过元数据和聚合的数据可以快速检测平台上的滥用行为,如垃圾邮件.虚假账户和付款欺诈等.放大正确的数据 ...
- [转]Linux网络 - 数据包的发送过程
转, 原文:https://segmentfault.com/a/1190000008926093 -------------------------------------------------- ...
- 兼容火狐,Chrome,IE6,IE7,IE8的HTML换行写法
本文链接:https://java-er.com/blog/html-break-line-firefox-chrome/ 兼容火狐,Chrome,IE6,IE7,IE8的HTML换行写法1.任意数据 ...
- Mac OpenSSL 生成支付宝 2048位密钥
安装OpenSSL: brew install openssl 然后: OpenSSL> genrsa -out rsa_private_key.pem 2048 #生成私钥 OpenSSL&g ...
- docker的笔记
docker run 命令 docker run ubuntu:15.10 /bin/echo "Hello world" 各个参数解析: docker: Docker 的二进制执 ...
- Java - 基础到进阶
# day01 一:基本操作 package _01.java基本操作; /** * 文档注释 */ public class _01Alls { public static void main(St ...
- jquery页面多个倒计时效果
<div class="timeBox" data-times="2019/06/30,23:59:59"> 距结束 <span class= ...