Spring Boot2.7.6集成EFK7.17.17 实现日志监控
一、环境搭建
Ubuntu22.04 安装包准备
elasticsearch-7.17.7、filebeat-7.17.17、kibana-7.17.7、ik7.17.7(分词插件-中文分词友好)、logstash-7.17.17(非必需)
EFK和ELK区别在于使用filebeat还是logstash收集日志
filebeat:使用go语言实现 轻量级,内存占用小,功能简单
logstash:使用java实现 重量级,内存占用大,功能全面
也可以两者都用ELFK,使用filebeat收集日志到logstash,再由logstash转到ES,听着就多少有点费劲,不建议使用
上述软件包下载地址:Past Releases of Elastic Stack Software | Elastic
IK分词插件下载地址:索引: analysis-ik/stable/ 先按照下面步骤安装ES 之后解压安装包到 ElasticSearch 所在文件夹中的plugins目录中
****************************** ES环境配置 ******************************************
#因为先用logstash测试了 所以文件夹就elk吧
mkdir elk
#创建一个专用于启动ES的用户
useradd es
passwd es
chown -R es /elk
vim /etc/sysctl.conf
#添加配置:
vm.max_map_count = 262144
#重新加载/etc/sysctl.conf配置
sysctl -p
#修改es进程打开的最大文件数量
vim /etc/security/limits.conf
#添加配置:
* soft nofile 65536
* hard nofile 65536
* soft nproc 4096
* hard nproc 4096
# End of file
****************************** 安装elasticsearch ******************************************
tar -xzf elasticsearch-7.17.17-linux-x86_64.tar.gz
cd /elk/elasticsearch-7.17.17
mkdir data
cd /elk/elasticsearch-7.17.17/config/
# 编辑ES相关配置
vi elasticsearch.yml
#添加配置:
node.name: node-1
# ---------------------------------- Network -----------------------------------
# 本机ip
# network.host: 192.168.0.1
network.host: 0.0.0.0
http.port: 9100
path.data: /elk/elasticsearch-7.17.17/data
path.logs: /elk/elasticsearch-7.17.17/logs
# --------------------------------- Discovery ----------------------------------
# 单机只保留一个node
cluster.initial_master_nodes: ["node-1"]
# ca证书禁用 仅在内网环境使用 所以无需证书
xpack.security.enabled: false
xpack.security.transport.ssl.enabled: false
# 设置jvm
cd jvm.options.d
touch jvm.options
vi jvm.options
#添加配置:
-Xms1g
-Xmx1g
# 设置es jdk环境变量
export ES_JAVA_HOME=/elk/elasticsearch-7.17.17/jdk/
echo "export ES_JAVA_HOME=$ES_JAVA_HOME" >> ~/.bashrc
source ~/.bashrc
echo $ES_JAVA_HOME
****************************** 安装kibana ******************************************
chown -R es /elk/kibana-7.17.17-linux-x86_64/log/kibana.log
vi kibana.yml
server.port: 5601
server.host: "localhost"
elasticsearch.hosts: ["http://localhost:9100"]
i18n.locale: "zh-CN"
****************************** 安装filebeat ******************************************
#安装filebeat
tar -xzf filebeat-7.17.17-linux-x86_64.tar.gz
cd filebeat-7.17.17-linux-x86_64/
vi filebeat.yml
###################### Filebeat Configuration Example #########################
filebeat.inputs:
- type: log
id: sd-log
enabled: true
paths:
- /opt/sd/comf/nohup.out
document_type: efk_sd_log
scan_frequency: 10s
- type: log
id: es-log
enabled: true
paths:
- /elk/elasticsearch-7.17.17/logs/elasticsearch.log
document_type: efk_es_log
scan_frequency: 5s
# ============================== Filebeat modules ==============================
filebeat.config.modules:
# Glob pattern for configuration loading
#path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: true
# ======================= Elasticsearch template setting =======================
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
host: "localhost:5601"
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
setup.ilm.enabled: false
setup.template.name: elk_log
setup.template.pattern: elk_log-*
setup.template.overwrite: true
setup.template.enabled: false
# ---------------------------- Elasticsearch Output ----------------------------
# 和Logstash Output互斥 二选一配置
output.elasticsearch:
hosts: ["http://127.0.0.1:9100"]
index: "efk_log"
indices:
- index: "efk_sd_log"
when:
equals:
fields.document_type: "efk_sd_log"
- index: "efk_es_log"
when:
equals:
fields.document_type: "efk_es_log"
# 单次批量发送的最大事件数和强制上报频率
bulk_max_size: 50
flush_interval: 10s
# Protocol - either `http` (default) or `https`.
#protocol: "https"
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
#username: "elastic"
#password: "changeme"
# ------------------------------ Logstash Output -------------------------------
#output.logstash:
# The Logstash hosts
# hosts: ["127.0.0.1:9061"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# ================================= Processors =================================
processors:
- add_host_metadata:
when:
not:
contains:
tags: "forwarded"
# - add_cloud_metadata: ~
# - add_docker_metadata: ~
# - add_kubernetes_metadata: ~
metricbeat:
enabled: false
monitoring:
enabled: false
logging:
metrics:
enabled: false
period: 0s
filebeat.modules:
- module: system
enabled: false
# 配置完检查一下配置是否生效
./filebeat test config -c filebeat.yml
输出:Config OK 即可
# 配置完检查一下配置是否生效
./filebeat test config -c filebeat.yml
输出:Config OK 即可
****************************** 运行 ******************************************
#运行三个软件 后续建议使用Systemd管理
nohup ./elasticsearch > es.log 2>&1 &
#关闭
sudo pkill elasticsearch
nohup ./kibana > kibana.log 2>&1 &
#关闭
sudo pkill kibana
nohup ./filebeat -e -c filebeat.yml > filebeat.log 2>&1 &
#关闭
sudo pkill filebeat
efk查询测试
打开 kibana进入开发工具
GET elk-log/_search { "query": { "fuzzy": { "message": { "value": "info", "fuzziness": "AUTO" } } }, "sort": [ { "@timestamp": { "order": "DESC" } } ] }
二、Spring Boot集成
未完待续。。。

浙公网安备 33010602011771号