Releases: sndyuk/logback-more-appenders
Releases · sndyuk/logback-more-appenders
1.8.8
1.8.7
1.8.6
1.8.5
1.8.4
1.8.1
1.7.1
1.5.6
1.5.4
Added JSON encoder
<appender ...>
<!-- The FastJsonEncoder treats the log as JSON. -->
<encoder class="ch.qos.logback.more.appenders.encoder.FastJsonEncoder">
<pattern><![CDATA[{
"date": "%date{HH:mm:ss.SSS}",
"thread": "%thread",
"marker": "%marker",
"level": "%-5level",
"logger": "%logger",
"line": "%line",
"requestURI": "%X{req.requestURI}",
"message": "%msg"
}\n]]></pattern>
<!-- Remove redandant spaces and lines. -->
<compressSpace>true</compressSpace>
</encoder>
</appender>
Added KinesisStreamLogbackAppender
<appender name="KINESIS_SYNC"
class="ch.qos.logback.more.appenders.KinesisStreamLogbackAppender">
<awsConfig>
<!-- [Optional] Place AwsCredentials.properties in class path. -->
<credentialFilePath>AwsCredentials.properties</credentialFilePath>
<region>ap-northeast-1</region>
</awsConfig>
<streamName>test-stream</streamName>
<shardCount>2</shardCount>
<!-- [Optional] Default RandomPartitionKey. You can create custom Partition key by implementing PartitionKey interface.
Tips: If you won't have multiple shard, you can use the partition key for performance.
ch.qos.logback.more.appenders.KinesisStreamLogbackAppender$SinglePartitionKey
-->
<partitionKey class="ch.qos.logback.more.appenders.KinesisStreamLogbackAppender$RandomPartitionKey" />
<!-- [Optional] Create the stream automatically. -->
<createStreamDestination>true</createStreamDestination>
<!-- The minimum interval millis for each Kinesis API call. -->
<emitInterval>100</emitInterval>
<encoder>
<pattern><![CDATA[[%thread] %-5level %logger{15}#%line %msg]]></pattern>
</encoder>
</appender>
<appender name="KINESIS" class="ch.qos.logback.classic.AsyncAppender">
<!-- Max queue size of logs which is waiting to be sent (When it reach to the max size, the log will be disappeared). -->
<queueSize>999</queueSize>
<!-- Never block when the queue becomes full. -->
<neverBlock>true</neverBlock>
<!-- The default maximum queue flush time allowed during appender stop.
If the worker takes longer than this time it will exit, discarding any remaining items in the queue.
10000 millis
-->
<maxFlushTime>100</maxFlushTime>
<appender-ref ref="KINESIS_SYNC" />
</appender>
1.5.2
1. Added CloudWatchLogbackAppender. Please refer to the sample configuration:
<appender name="CLOUDWATCH_SYNC"
class="ch.qos.logback.more.appenders.CloudWatchLogbackAppender">
<awsConfig>
<!-- [Optional] Place AwsCredentials.properties in class path. -->
<credentialFilePath>AwsCredentials.properties</credentialFilePath>
<region>ap-northeast-1</region>
</awsConfig>
<logGroupName>test-log</logGroupName>
<!-- [Optional] Automatically roll current log stream and resume logging in a new stream. -->
<logStreamRolling class="ch.qos.logback.more.appenders.CountBasedStreamName">
<!-- The limit is not exactly checked. The stream can have more than the limit depends on emitInterval and maxFlushTime. -->
<limit>100000</limit>
</logStreamRolling>
<!-- [Optional] Or you can specify static stream name.
<logStreamName>my-log-stream</logStreamName>
-->
<!-- [Optional] Create Log group and stream automatically. -->
<createLogDestination>true</createLogDestination>
<!-- The minimum interval millis for each CloudWatch API call. -->
<emitInterval>100</emitInterval>
<encoder>
<pattern><![CDATA[[%thread] %-5level %logger{15}#%line %msg]]></pattern>
</encoder>
</appender>
<appender name="CLOUDWATCH" class="ch.qos.logback.classic.AsyncAppender">
<!-- Max queue size of logs which is waiting to be sent (When it reach to the max size, the log will be disappeared). -->
<queueSize>999</queueSize>
<!-- Never block when the queue becomes full. -->
<neverBlock>true</neverBlock>
<!-- The default maximum queue flush time allowed during appender stop.
If the worker takes longer than this time it will exit, discarding any remaining items in the queue.
10000 millis
-->
<maxFlushTime>100</maxFlushTime>
<appender-ref ref="CLOUDWATCH_SYNC" />
</appender>
2. Accept dynamic Map<String, ?> as the additional log property. #32
Map<String, String> map = new HashMap<String, String>();
map.put("key1", "value1");
map.put("key2", "value2");
MapMarker mapMarker = new MapMarker("MAP_MARKER", map);
LOG.debug(mapMarker, "Test the marker map.");
3. Added simple marker filter for Appender. #33
<appender ...>
<!-- The simple marker filter instead of writing complex configuration with EvaluatorFilter -->
<filter class="ch.qos.logback.more.appenders.filter.AppendersMarkerFilter">
<marker>SECURITY_ALERT</marker>
</filter>
...