logback输出json格式日志(包括mdc)发送到kafka
1,pom.xml
<!-- kafka -->
<dependency>
<groupId>com.github.danielwegener</groupId>
<artifactId>logback-kafka-appender</artifactId>
<version>0.2.0-RC1</version>
<scope>runtime</scope>
</dependency>
<!-- logback-->
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.2.3</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.0</version>
</dependency> <!-- other-->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-access</artifactId>
</dependency>
2, spring-logback.xml
<?xml version="1.0" encoding="UTF-8"?> <configuration debug="false" scan="true" scanPeriod="600000">
<!--定义日志文件的存储地址 勿在 LogBack 的配置中使用相对路径 -->
<property name="LOG_HOME" value="/var/log" />
<contextName>${HOSTNAME}</contextName>
<springProperty scope="context" name="appName"
source="spring.application.name" />
<springProperty scope="context" name="ip"
source="spring.cloud.client.ipAddress" /> <!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符 -->
<property name="CONSOLE_LOG_PATTERN"
value="[%d{yyyy-MM-dd HH:mm:ss.SSS} ${ip} ${appName} %highlight(%-5level) %yellow(%X{X-B3-TraceId}),%green(%X{X-B3-SpanId}),%blue(%X{X-B3-ParentSpanId}) %yellow(%thread) %green(%logger) %msg%n" /> <!-- <logger name="org.springframework.web" level="DEBUG" /> --> <!-- show parameters for hibernate sql 专为 Hibernate 定制 -->
<!--<logger name="org.hibernate.type.descriptor.sql.BasicBinder" level="TRACE"
/> -->
<!--<logger name="org.hibernate.type.descriptor.sql.BasicExtractor" level="DEBUG"
/> -->
<!--<logger name="org.hibernate.engine.QueryParameters" level="DEBUG" /> -->
<!--<logger name="org.hibernate.engine.query.HQLQueryPlan" level="DEBUG"
/> --> <!-- <logger name="org.hibernate.SQL" level="DEBUG" /> -->
<logger name="logging.level.com.italktv.platform" level="info" /> <!-- 控制台输出 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>debug</level>
</filter>
</appender> <!-- 按照每天生成日志文件 -->
<appender name="FILE"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${LOG_HOME}/bigdata/data-api.log</file>
<rollingPolicy
class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!--日志文件输出的文件名 -->
<FileNamePattern>${LOG_HOME}/bigdata/data-api.%d{yyyy-MM-dd}.%i.log
</FileNamePattern>
<!--日志文件保留天数 -->
<MaxHistory>30</MaxHistory>
<maxFileSize>1MB</maxFileSize>
<totalSizeCap>10MB</totalSizeCap>
</rollingPolicy>
<encoder
class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{
"level": "%level",
"trace": "%X{X-B3-TraceId:-}",
"requestId": "%X{requestId}",
"remoteIp": "%X{remoteIp}",
"span": "%X{X-B3-SpanId:-}",
"parent":
"%X{X-B3-ParentSpanId:-}",
"thread": "%thread",
"class":
"%logger{40}",
"message": "%message",
"stack_trace":
"%exception{10}"
}
</pattern>
</pattern>
</providers>
</encoder>
<!--日志文件最大的大小 <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
<MaxFileSize>10KB</MaxFileSize> </triggeringPolicy> -->
</appender> <!-- pom dependency <dependency>
<groupId>com.github.danielwegener</groupId>
<artifactId>logback-kafka-appender</artifactId>
<version>0.1.0</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>4.11</version>
</dependency> <dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>1.1.11</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.1.11</version>
</dependency> -->
<!-- other appender
<appender name="kafkaAppenderAnotherEncode"
class="com.github.danielwegener.logback.kafka.KafkaAppender"> <encoder
class="com.github.danielwegener.logback.kafka.encoding.PatternLayoutKafkaMessageEncoder">
<layout class="net.logstash.logback.layout.LogstashLayout">
<includeMdc>true</includeMdc>
<includeContext>true</includeContext>
<includeCallerData>true</includeCallerData>
<customFields>{"system":"test"}</customFields>
<fieldNames class="net.logstash.logback.fieldnames.ShortenedFieldNames" />
</layout>
</encoder> <topic>tv_server_logstash_log</topic>
<keyingStrategy
class="com.github.danielwegener.logback.kafka.keying.HostNameKeyingStrategy" />
<deliveryStrategy
class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />
<producerConfig>bootstrap.servers=211.100.75.227:9092</producerConfig>
<producerConfig>acks=0</producerConfig>
<producerConfig>linger.ms=1000</producerConfig>
<producerConfig>block.on.buffer.full=false</producerConfig>
<appender-ref ref="STDOUT" />
</appender>
--> <!-- https://www.cnblogs.com/maxzhang1985/p/9522507.html
https://logback.qos.ch/manual/layouts.html
-->
<appender name="kafkaAppender"
class="com.github.danielwegener.logback.kafka.KafkaAppender"> <encoder charset="UTF-8"
class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder"> <providers>
<mdc />
<context />
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{ "level": "%level",
"trace": "%X{X-B3-TraceId:-}",
"span":
"%X{X-B3-SpanId:-}",
"parent": "%X{X-B3-ParentSpanId:-}",
"thread":
"%thread",
"class": "%logger{40}",
"message": "%message",
"stack_trace": "%exception{10}"
}
</pattern>
</pattern>
</providers>
</encoder> <topic>tv_server_logstash_log</topic>
<keyingStrategy
class="com.github.danielwegener.logback.kafka.keying.HostNameKeyingStrategy" />
<deliveryStrategy
class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />
<producerConfig>bootstrap.servers=127.0.0.1:9092</producerConfig>
<!-- don't wait for a broker to ack the reception of a batch. -->
<producerConfig>acks=0</producerConfig>
<!-- wait up to 1000ms and collect log messages before sending them as
a batch -->
<producerConfig>linger.ms=1000</producerConfig>
<!-- even if the producer buffer runs full, do not block the application
but start to drop messages -->
<!--<producerConfig>max.block.ms=0</producerConfig> -->
<producerConfig>block.on.buffer.full=false</producerConfig>
<!-- kafka连接失败后,使用下面配置进行日志输出 -->
<appender-ref ref="STDOUT" />
</appender> <appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">
<appender-ref ref="kafkaAppender" />
</appender> <!-- 日志输出级别 -->
<root level="INFO">
<!-- 生产上不输出stdout log -->
<appender-ref ref="STDOUT" />
<!-- <appender-ref ref="FILE" /> --> <!-- <appender-ref ref="kafkaAppender" /> -->
<appender-ref ref="ASYNC" /> </root> </configuration>
3, 添加一个mdc在logback
import java.util.UUID; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import org.springframework.stereotype.Component;
import org.springframework.web.servlet.HandlerInterceptor;
import org.springframework.web.servlet.ModelAndView; @Component
public class LogInterceptor implements HandlerInterceptor { private final static String REQUEST_ID = "requestId";
private static final Logger LOGGER = LoggerFactory.getLogger(LogInterceptor.class); @Override
public boolean preHandle(HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse, Object o) throws Exception {
String xForwardedForHeader = httpServletRequest.getHeader("X-Forwarded-For");
String remoteIp = httpServletRequest.getRemoteAddr();
String uuid = UUID.randomUUID().toString();
LOGGER.info("put requestId ({}) to logger", uuid);
LOGGER.info("request id:{}, client ip:{}, X-Forwarded-For:{}", uuid, remoteIp, xForwardedForHeader);
MDC.put(REQUEST_ID, uuid);
MDC.put("remoteIp", remoteIp);
return true;
} @Override
public void postHandle(HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse, Object o,
ModelAndView modelAndView) throws Exception {
String uuid = MDC.get(REQUEST_ID);
LOGGER.info("remove requestId ({}) from logger", uuid);
MDC.remove(REQUEST_ID);
} @Override
public void afterCompletion(HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse, Object o, Exception e)
throws Exception { }
}
4,添加切面 intercept
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter; @Configuration
public class WebMvcConfigurer extends WebMvcConfigurerAdapter {
@Autowired
private LogInterceptor logInterceptor; @Override
public void addInterceptors(InterceptorRegistry registry) {
registry.addInterceptor(logInterceptor);
super.addInterceptors(registry);
}
}
参考:
logback 手册:https://logback.qos.ch/manual/layouts.html
http://www.importnew.com/28541.html
https://www.jianshu.com/p/a26da0c55255
https://blog.csdn.net/Soinice/article/details/84033382
https://examples.javacodegeeks.com/enterprise-java/logback/logback-kafka-appender-example/
http://stevetarver.github.io/2016/04/20/whole-product-logging.html 讲解详细
logback输出json格式日志(包括mdc)发送到kafka的更多相关文章
- Maven项目配置Logback输出JSON格式日志
最近,项目提出需求,日志需要固定输出为JSON格式,以便后端Flink程序解析. 项目背景 项目为简单的Maven项目,日志由Filebeat采集,因此不需要配置输出至Logstash. 下面为pom ...
- filebeat收集nginx的json格式日志
一.在nginx主机上安装filebeat组件 [root@zabbix_server nginx]# cd /usr/local/src/ [root@zabbix_server src]# wge ...
- Logback 输出 JPA SQL日志 到文件
Logback 输出 JPA SQL日志 到文件 使用Spring Boot 配置 JPA 时可以指定如下配置在控制台查看执行的SQL语句 spring.jpa.show-sql=true Sprin ...
- PHP、Java输出json格式数据
PHP 输出json. $result = mysql_query($sql); //查询结果 $users=array(); $i=0; while($row=mysql_fetch_array ...
- 在JSP页面中输出JSON格式数据
JSON-taglib是一套使在JSP页面中输出JSON格式数据的标签库. JSON-taglib主页: http://json-taglib.sourceforge.net/index.html J ...
- python json.dumps()函数输出json格式,使用indent参数对json数据格式化输出
在python中,要输出json格式,需要对json数据进行编码,要用到函数:json.dumps json.dumps() :是对数据进行编码 #coding=gbkimport json dict ...
- WCF兼容WebAPI输出Json格式数据,从此WCF一举两得
问题起源: 很多时候为了业务层调用(后台代码),一些公共服务就独立成了WCF,使用起来非常方便,添加服务引用,然后简单配置就可以调用了. 如果这个时候Web站点页面需要调用怎么办呢? 复杂的XML , ...
- php直接输出json格式
php直接输出json格式,很多新手有一个误区,以为用echo json_encode($data);这样就是输出json数据了,没错这样输出文本是json格式文本而不是json数据,正确的写法是应该 ...
- ELK之收集Nginx、Tomcat的json格式日志
1.安装Nginx yum -y install nginx vim /etc/nginx/nginx.conf # 修改日志格式为json格式,并创建一个nginxweb的网站目录 log_form ...
随机推荐
- Java开发笔记(八十八)文件字节I/O流
前面介绍了如何使用字符流读写文件,并指出字符流工具的处理局限,进而给出随机文件工具加以改进.随机文件工具除了支持访问文件内部的任意位置,更关键的一点是通过字节数组读写文件数据,采取字节方式比起字符方式 ...
- 设计模式系列13:模板方法模式(Template Method Pattern)
定义 定义一个操作中的算法的骨架,而将一些步骤延迟到子类中.模板方法使得子类可以不改变一个算法的结构即可重定义该算法的某些特定步骤. --<设计模式GoF> UML类图 使用场景 有 ...
- div在另一个div居中对齐
position:fixed; top:0; right:0; left:0; bottom:0; margin:auto;
- Linux学习历程——Centos 7 账户管理命令(用户组篇)groupadd groupmod groupdel
一.命令介绍 groupadd:创建用户组 groupmod:修改用户组属性 groupdel:删除用户组 ---------------------------------------------- ...
- Webstorm 2018 激活破解
本文最后更新于 2018-5-4 可能会因为没有更新而失效.如已失效或需要修正,请留言! 问题 激活 webstorm 2018 最新版 解决步骤 License server:http://hb5. ...
- 进行Spark,Kafka针对Kerberos相关配置
1. 提交任务的命令 spark-submit \--class <classname> \--master yarn \--deploy-mode client \--executor- ...
- [LeetCode] 19. 删除链表的倒数第N个节点
题目链接:https://leetcode-cn.com/problems/remove-nth-node-from-end-of-list/ 题目描述: 给定一个链表,删除链表的倒数第 n 个节点, ...
- nginx学习路线
nginx:熟透,配置.rewrite.黑白名单.脚本.代理.优化等
- React 系列教程2:编写兰顿蚂蚁演示程序
简介 最早接触兰顿蚂蚁是在做参数化的时候,那时候只感觉好奇,以为是很复杂的东西.因无意中看到生命游戏的 React 实现,所以希望通过兰顿蚂蚁的例子再学习一下 React. 兰顿蚂蚁的规则非常简单: ...
- (poj 3662) Telephone Lines 最短路+二分
题目链接:http://poj.org/problem?id=3662 Telephone Lines Time Limit: 1000MS Memory Limit: 65536K Total ...