<logback-kafka-appender.version>0.2.0-RC2</logback-kafka-appender.version>
<dependency>
<groupId>com.github.danielwegener</groupId>
<artifactId>logback-kafka-appender</artifactId>
<version>${logback-kafka-appender.version}</version>
</dependency>
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<property name="ENCODER_PATTERN"
value="pi|${HOSTNAME}|%d{yyyy-MM-dd HH:mm:ss.SSS}|[%thread]|%-5level|%logger{80}|%L|%X{orgCode}|%X{bizType}|%X{jobId}|%X{lid}|%X{fromUuid}|%X{toUuid}|%X{traceId}|%msg%n"/>
<springProperty scope="context" name="service" source="spring.application.name" defaultValue="UnknownService"/>
<springProperty scope="context" name="env" source="elk.env" defaultValue="test"/>
<springProperty scope="context" name="bootstrapServers" source="elk.kafka.bootstrap.servers" defaultValue="10.12.0.33:9092,10.12.0.32:9092,10.12.0.34:9092"/>
<springProperty scope="context" name="springAppName" source="spring.application.name"/>
<appender name="consoleAppender" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${ENCODER_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
</appender>
<appender name="rollingFileAppender" class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>${LOG_PATH}/pi.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${LOG_PATH}/pi.%d{yyyy-MM-dd}.%i.log</FileNamePattern>
<maxHistory>30</maxHistory>
<totalSizeCap>50GB</totalSizeCap>
<TimeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<MaxFileSize>100MB</MaxFileSize>
</TimeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<pattern>${ENCODER_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
</appender>
<appender name="ASYNC-rollingFileAppender" class="ch.qos.logback.classic.AsyncAppender">
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>256</queueSize>
<!-- 添加附加的appender,最多只能添加一个 -->
<appender-ref ref="rollingFileAppender"/>
</appender>
<appender name="kafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder>
<pattern>${ENCODER_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
<topic>edgex</topic>
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
<producerConfig>acks=0</producerConfig>
<producerConfig>linger.ms=1000</producerConfig>
<producerConfig>max.block.ms=0</producerConfig>
<producerConfig>bootstrap.servers=${bootstrapServers}</producerConfig>
</appender>
<appender name="ASYNC-kafkaAppender" class="ch.qos.logback.classic.AsyncAppender">
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>256</queueSize>
<!-- 添加附加的appender,最多只能添加一个 -->
<appender-ref ref="kafkaAppender"/>
</appender>
<logger name="com.netflix" level="WARN" additivity="false">
<appender-ref ref="consoleAppender"/>
<appender-ref ref="ASYNC-rollingFileAppender"/>
<appender-ref ref="ASYNC-kafkaAppender" />
</logger>
<logger name="com.be.pi.interceptor.RequestResponseLoggingInterceptor" level="DEBUG" additivity="false">
<appender-ref ref="consoleAppender"/>
<appender-ref ref="ASYNC-rollingFileAppender"/>
<appender-ref ref="ASYNC-kafkaAppender" />
</logger>
<root level="info">
<appender-ref ref="consoleAppender"/>
<appender-ref ref="ASYNC-rollingFileAppender"/>
<appender-ref ref="ASYNC-kafkaAppender" />
</root>
</configuration>