ELK部署
elasticsearch安装步骤
-
拉取镜像
docker pull docker.elastic.co/elasticsearch/elasticsearch:8.17.0 -
启动
docker run -d --name elasticsearch -p 9200:9200 -it -m 1GB docker.elastic.co/elasticsearch/elasticsearch:8.17.0 -
复制以及挂载目录
mkdir -p /docker/elasticsearch/data mkdir -p /docker/elasticsearch/logs mkdir -p /docker/elasticsearch/config mkdir -p /docker/elasticsearch/plugins # 权限 sudo chown -R 1000:1000 /docker/elasticsearch sudo chmod -R 775 /docker/elasticsearch docker cp elasticsearch:/usr/share/elasticsearch/config/. /docker/elasticsearch/config/ docker cp elasticsearch:/usr/share/elasticsearch/data/. /docker/elasticsearch/data/ docker cp elasticsearch:/usr/share/elasticsearch/logs/. /docker/elasticsearch/logs/ docker cp elasticsearch:/usr/share/elasticsearch/plugins/. /docker/elasticsearch/plugins/ # 删除原来的容器 docker stop elasticsearch docker rm elasticsearch # 重新跑 9300端口是集群的 docker run -d \ --restart=always\ --name elasticsearch \ -p 9200:9200 \ -e "discovery.type=single-node" \ -m 1GB \ -v /docker/elasticsearch/data/:/usr/share/elasticsearch/data \ -v /docker/elasticsearch/logs/:/usr/share/elasticsearch/logs \ -v /docker/elasticsearch/config/:/usr/share/elasticsearch/config \ -v /docker/elasticsearch/plugins/:/usr/share/elasticsearch/plugins \ -v /etc/localtime:/etc/localtime:ro \ -v /etc/timezone:/etc/timezone:ro \ docker.elastic.co/elasticsearch/elasticsearch:8.17.0 # 设置密码 docker exec -it elasticsearch bin/elasticsearch-reset-password -u elastic # 更改支持 elasticsearch.yml # 注释 # xpack.security.transport.ssl: # enabled: true # verification_mode: certificate # keystore.path: certs/transport.p12 # truststore.path: certs/transport.p12 # xpack.security.http.ssl: # enabled: true # keystore.path: certs/http.p12 # 增加 # 关闭 HTTP API 的 SSL 加密 xpack.security.http.ssl.enabled: false # 关闭节点间通信加密 xpack.security.transport.ssl.enabled: false # elastic uZsbkop*xxx-usR=Lh -
访问 http://192.168.0.33:9200/ 输入密码检测是否安装成功
Kibana
# 和 ES 保持版本一致:
docker pull docker.elastic.co/kibana/kibana:8.17.0
# 运行
docker run -d --name kibana -p 5601:5601 docker.elastic.co/kibana/kibana:8.17.0
#创建目录
sudo mkdir -p /docker/kibana/data
sudo mkdir -p /docker/kibana/logs
sudo mkdir -p /docker/kibana/config
sudo mkdir -p /docker/kibana/plugins
# 权限
sudo chown -R 1000:1000 /docker/kibana
sudo chmod -R 775 /docker/kibana
# 拷贝数据
docker cp kibana:/usr/share/kibana/config/. /docker/kibana/config/
docker cp kibana:/usr/share/kibana/data/. /docker/kibana/data/
docker cp kibana:/usr/share/kibana/logs/. /docker/kibana/logs/
docker cp kibana:/usr/share/kibana/plugins/. /docker/kibana/plugins/
# 删除原来的容器
docker stop kibana
docker rm kibana
# 重新启动
docker run -d \
--name kibana \
--restart=always \
-p 5601:5601 \
-v /docker/kibana/config/:/usr/share/kibana/config \
-v /docker/kibana/data/:/usr/share/kibana/data \
-v /docker/kibana/plugins/:/usr/share/kibana/plugins \
-v /docker/kibana/logs/:/usr/share/kibana/logs \
-v /etc/localtime:/etc/localtime:ro \
-v /etc/timezone:/etc/timezone:ro \
docker.elastic.co/kibana/kibana:8.17.0
# 修改配置信息
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://192.168.0.33:9200" ]
elasticsearch.username: "elastic"
elasticsearch.password: "uZsbkop*XXX-usR=Lh"
monitoring.ui.container.elasticsearch.enabled: true
# 注意! 这里密码不能是超管账号
# 创建kibana 专属账号流程
docker exec -it elasticsearch /bin/bash
bin/elasticsearch-users useradd kibana_user -p althico123 -r kibana_system
- 访问 http://192.168.0.33:5601/ 输入密码检测是否安装成功
Logstash
# 和 ES 保持版本一致:
docker pull docker.elastic.co/logstash/logstash:8.17.0
# 运行
docker run -d --name logstash -p 5000:5000 -p 9600:9600 docker.elastic.co/logstash/logstash:8.17.0
#创建目录
mkdir -p /docker/logstash/data
mkdir -p /docker/logstash/pipeline
mkdir -p /docker/logstash/config
# 权限
sudo chown -R 1000:1000 /docker/logstash
sudo chmod -R 775 /docker/logstash
# 拷贝数据
docker cp logstash:/usr/share/logstash/pipeline/. /docker/logstash/pipeline/
docker cp logstash:/usr/share/logstash/config/. /docker/logstash/config/
docker cp logstash:/usr/share/logstash/data/. /docker/logstash/data/
# 删除原来的容器
docker stop logstash
docker rm logstash
# 重新启动
docker run -d \
--name logstash \
--restart=always \
-p 5000:5000 \
-p 9600:9600 \
-v /docker/logstash/pipeline:/usr/share/logstash/pipeline \
-v /docker/logstash/config:/usr/share/logstash/config \
-v /docker/logstash/data:/usr/share/logstash/data \
-v /etc/localtime:/etc/localtime:ro \
-v /etc/timezone:/etc/timezone:ro \
docker.elastic.co/logstash/logstash:8.17.0
# 写配置
logstash.conf
input {
tcp {
port => 5000
codec => json_lines
host => "0.0.0.0" # 如果希望接收来自所有 IP 的日志,可以使用 0.0.0.0
}
}
filter {
# 你不再需要添加 app_name 字段,Logstash 将直接使用 Logback 中提供的字段
# mutate {
# add_field => {
# "server_host" => "%{host}"
# }
# }
# 修改 timestamp 格式为 ISO8601(如果已经包含 timestamp 字段)
if [timestamp] {
date {
match => ["timestamp", "ISO8601"]
timezone => "Asia/Shanghai"
}
} else {
# 如果没有 timestamp 字段,使用当前时间戳
mutate {
add_field => { "timestamp" => "%{@timestamp}" }
}
}
# 根据日志的 level 添加标签
if [level] {
mutate {
add_tag => ["level_%{level}"]
}
}
}
output {
# 输出到 Elasticsearch
elasticsearch {
hosts => ["http://192.168.0.33:9200"]
index => "java-logs-%{+YYYY.MM.dd}"
user => "elastic"
password => "uZsbkop*mK9Wr-usR=Lh"
}
# 输出到控制台
stdout {
codec => rubydebug
}
}
java集成Logstash
logback-spring.xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<!-- 定义属性 -->
<property name="log.path" value="logs"/>
<property name="log.pattern"
value="%d{yyyy-MM-dd HH:mm:ss.SSS} -%5p ${PID} --- [%15.15t] %-40.40logger{39} : %m%n"/>
<contextListener class="com.althico.common.config.LoggerStartupListener"/>
<!-- Logstash JSON 输出 -->
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>192.168.0.33:5000</destination> <!-- Logstash 接收端口 -->
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp />
<pattern>
<pattern>
{
"level": "%level",
"logger": "%logger",
"thread": "%thread",
"message": "%message",
"app_name": "althico-auth-%property{localIP}",
"timestamp": "%date{ISO8601}"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${log.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>${log.path}/log.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${log.path}/%d{yyyy-MM-dd}.%i.log</FileNamePattern>
<!-- 每产生一个日志文件,该日志文件的保存期限为300天 -->
<maxHistory>300</maxHistory>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<!-- maxFileSize:这是活动文件的大小,默认值是10MB,1KB -->
<maxFileSize>10MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<pattern>${log.pattern}</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--开发环境-->
<springProfile name="dev">
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="LOGSTASH"/>
</root>
</springProfile>
<!--测试环境-->
<springProfile name="test">
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="FILE"/>
</root>
</springProfile>
<!--生产环境-->
<springProfile name="prod">
<root level="INFO">
<appender-ref ref="FILE"/>
<appender-ref ref="STDOUT"/>
</root>
</springProfile>
</configuration>
Pom
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
</dependency>
LoggerStartupListener
package com.althico.common.config;
import ch.qos.logback.classic.Level;
import ch.qos.logback.classic.Logger;
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.spi.LoggerContextListener;
import ch.qos.logback.core.Context;
import ch.qos.logback.core.spi.ContextAwareBase;
import ch.qos.logback.core.spi.LifeCycle;
import com.althico.common.utils.NetworkUtils;
/**
* LoggerStartupListener 类实现了 LoggerContextListener 和 LifeCycle 接口,
* 用于在日志系统启动时执行一些初始化操作。
* 该类主要负责在日志上下文启动时获取本地 IP 地址并将其添加到上下文中。
*/
public class LoggerStartupListener extends ContextAwareBase
implements LoggerContextListener, LifeCycle {
private boolean started = false;
@Override
public boolean isResetResistant() {
return false;
}
@Override
public void onStart(LoggerContext loggerContext) {
// 日志上下文启动时的操作
}
@Override
public void onReset(LoggerContext loggerContext) {
// 日志上下文重置时的操作
}
@Override
public void onStop(LoggerContext loggerContext) {
// 日志上下文停止时的操作
}
@Override
public void onLevelChange(Logger logger, Level level) {
// 日志级别变更时的操作
}
@Override
public void start() {
if (started) {
return;
}
Context context = getContext();
context.putProperty("localIP", NetworkUtils.getRealLocalIP());
started = true;
}
@Override
public void stop() {
// 停止时的操作
}
@Override
public boolean isStarted() {
return false;
}
}

浙公网安备 33010602011771号