- 簡介
在分布式系統(tǒng)中,為了保證每個(gè)服務(wù)的可用性,通常會(huì)將每個(gè)服務(wù)以集群的方式部署,為了集中管理分布式系統(tǒng)下的日志,便于對日志的分析,引入ELK做日志采集分析,通常的做法是通過logstash監(jiān)聽日志文件,但此方式很難對每條日志具體的切割,針對不同的業(yè)務(wù)場景,我們需要對日志以不同的維度拆分開,所以引入logstash-logback-encoder并通過tcp的方式將日志發(fā)送到logstash。
log stash-logback-encoder項(xiàng)目地址: GITHUB
- 引入logstash-logback-encoder依賴
<!-- logstash-logback-encoder -->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.3</version>
</dependency>
- 在logback配置文件中配置logstash對應(yīng)的appender
<!-- 為logstash輸出的Appender -->
<appender name="LOGSTASHEVENT" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<!-- 填入logstash監(jiān)聽地址 -->
<destination>${logback.address}</destination>
<destination>${logback.address}</destination>
<!-- 負(fù)載均衡策略 輪詢 -->
<connectionStrategy>
<roundRobin>
<connectionTTL>5 minutes</connectionTTL>
</roundRobin>
</connectionStrategy>
<!-- keep alive -->
<keepAliveDuration>5 minutes</keepAliveDuration>
<!-- 連接失敗重試 -->
<reconnectionDelay>1 second</reconnectionDelay>
<!-- write buffer -->
<writeBufferSize>16384</writeBufferSize>
<!-- 日志監(jiān)聽器 -->
<listener class="connect.service.config.LogBackEventListener"/>
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder">
<!--<timeZone>UTC</timeZone>-->
<!-- 時(shí)間戳 -->
<timestampPattern>yyyy-MM-dd'T'HH:mm:ss.SSS</timestampPattern>
<!-- 添加額外字段 -->
<customFields>{"extraFiled":"value"}</customFields>
<!-- 不包含connect字段 -->
<!--<includeContext>false</includeContext>-->
</encoder>
</appender>
- logstash配置
input {
tcp {
port => 4560
codec => json_lines
}
}
filter {
mutate {
remove_field => ["level_value", "index_name", "port"]
}
}
output {
elasticsearch {
hosts => ["http://120.26.233.25:9200"]
index => "%{[app_name]}"
# 模版配置文件地址
template => "/etc/logstash/app.json"
# 模版配置索引名稱
template_name => "templateName*"
template_overwrite => true
}
stdout {codec => rubydebug}
}input {
tcp {
port => 4560
codec => json_lines
}
}
filter {
mutate {
remove_field => ["level_value", "index_name", "port"]
}
}
output {
elasticsearch {
hosts => ["http://120.26.233.25:9200"]
index => "%{[app_name]}"
# 模版配置文件地址
template => "/etc/logstash/app.json"
# 模版配置索引名稱
template_name => "templateName*"
template_overwrite => true
}
stdout {codec => rubydebug}
}input {
tcp {
port => 4560
codec => json_lines
}
}
filter {
mutate {
remove_field => ["level_value", "index_name", "port"]
}
}
output {
elasticsearch {
hosts => ["http://120.26.233.25:9200"]
index => "%{[app_name]}"
# 模版配置文件地址
template => "/etc/logstash/app.json"
# 模版配置索引名稱
template_name => "templateName*"
template_overwrite => true
}
stdout {codec => rubydebug}
}
模版配置文件
{
"index_patterns": "templateName*",
"order" : 0,
"settings": {
"index.number_of_shards": 5,
"number_of_replicas": 0,
"index" : {
"refresh_interval" : "5s"
}
},
"mappings": {
"_default_": {
"dynamic_templates": [{
"message_field": {
"match": "message",
"match_mapping_type": "string",
"mapping": {
"type": "text"
}
}
}, {
"string_fields": {
"match": "*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword"
}
}
}],
"properties": {
"@timestamp": {
"type": "date"
},
"@version": {
"type": "long"
},
"geoip": {
"dynamic": true,
"properties": {
"ip": {
"type": "ip"
},
"location": {
"type": "geo_point"
},
"latitude": {
"type": "float"
},
"longitude": {
"type": "float"
}
}
}
}
}
}
}
- 代碼中使用方式
import static net.logstash.logback.argument.StructuredArguments.*
// 日志信息為: log message value,json列為: "name":"value
logger.info("log message {}", value("name", "value"));
// 日志信息為: log message name=value,json列為: "name":"value
logger.info("log message {}", keyValue("name", "value"));
// 更多使用方式可以參考github項(xiàng)目文檔