ELK安装部署

 

java环境

# 创建目录
mkdir /usr/local/java/

# 解压
tar -zxvf jdk-8u333-linux-x64.tar.gz -C /usr/local/java/

# 配置环境变量
vim /etc/profile
 
export export JAVA_HOME=/usr/local/java/jdk1.8.0_211
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH

# 环境变量生效
source /etc/profile

# 添加软连接
ln -sf /usr/local/java/jdk1.8.0_211/bin/java /usr/bin/java

# 检查java版本
java -version

groupadd elsearch  #【添加组】【用户组名】
useradd elsearch -g elsearch -p elsearch #【添加用户】【用户名】-g【用户组名】-p【密码值】

安装 Elasticsearch

# 下载
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.16.2-linux-x86_64.tar.gz

# 解压
tar -zxvf elasticsearch-7.16.2-linux-x86_64.tar.gz

# 设置权限到elsearch用户
chown -R elsearch:elsearch /usr/local/elasticsearch-7.16.2

修改配置文件

cd /usr/local/elasticsearch-7.16.2/config

vim elasticsearch.yml

# ========== 修改内容如下 ==================

discovery.type: single-node
# 数据路径(按实际需要配置日志地址)
path.data: /usr/local/elasticsearch-7.16.2/data
# 日志路径(按实际需要配置日志地址)
path.logs: /usr/local/elasticsearch-7.16.2/logs
# 地址(通常使用内网进行配置)
network.host: 0.0.0.0
# 端口号
http.port: 9200
# 跨域(这两项配置手动添加一下)
http.cors.enabled: true 
http.cors.allow-origin: "*"

启动ES 

su elsearch

# 后台方式启动
./elasticsearch -d 

# 查看日志,是否启动成功
cd /usr/local/elasticsearch-7.16.2/logs

tail
-fn 100 /usr/local/elasticsearch-7.16.2/logs/my-elasticsearch.log

config/jvm.options 可以修改启动内存大小 默认4G 可以根据自己服务器情况修改

 

 

安装Logstash

# 下载
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.16.2-linux-x86_64.tar.gz

# 解压
tar -zxvf logstash-7.16.2-linux-x86_64.tar.gz

x修改配置

# 修改配置文件logstash-sample.conf
cd /usr/local/logstash-7.16.2/config

# 将logstash-sample.conf文件复制一份,并命名为logstash.conf
cp logstash-sample.conf logstash.conf

# 修改配置
vim logstash.conf

# ========== 修改内容如下 ==================
input {
  beats{
    port => 5701
  }
}

filter {
  multiline {
    pattern =>"^gateway|^service-1|^hn.kd.ny.adsl"   # 根据关键字段进行分割 展示 
    negate => true
    what => "previous"
  }

  

  if "crm-business" in [fields][project] {
    mutate {
      add_field => { "index_prefix" => "crm-business" }
    }
  }
  else if "crm-businessclue" in [fields][project] {
    mutate {
      add_field => { "index_prefix" => "crm-businessclue" }
    }
  }

  else {
    mutate {
      add_field => { "index_prefix" => "filebeat-default" }
      }
  }
}



# 以下配置为创建用户索引及默认索引情况


output {
  elasticsearch {
    hosts => ["http://localhost:9200"]
    #index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
    index => "%{[index_prefix]}-%{+YYYY.MM.dd}"
    user => "elastic"
    password => "admin123"
    }
}

 

Logstash启动

cd /usr/local/logstash-7.16.2/bin

# 启动命令
nohup ./logstash -f /usr/local/logstash-7.16.2/config/logstash.conf >../logs/logstash.log &

# 使用jps命令查看运行的进程
jps

# config/jvm.options 这个可以更改logstash的内存大小,可以进行调测

Kibana 安装

# 下载
wget https://artifacts.elastic.co/downloads/kibana/kibana-7.16.2-linux-x86_64.tar.gz

# 解压
tar -zxvf kibana-7.16.2-linux-x86_64.tar.gz

# 权限
chown -R elsearch:elsearch /usr/local/kibana-7.16.2-linux-x86_64



cd /usr/local/kibana-7.16.2-linux-x86_64/config
vim kibana.yml

# ========== 修改内容如下 ==================

# 服务端口(按实际需求)
server.port: 5601
# 服务主机(这里是服务器内网地址)
server.host: "0.0.0.0"
# 服务名(按实际需求)
server.name: "kibana"
# elasticsearch地址
elasticsearch.hosts: ["http://127.0.0.1:9200"]

elasticsearch.username: "kibana_system"
elasticsearch.password: "admin123"


# 设置简体中文
i18n.locale: "zh-CN"

es数据库跟 kibana 都不能用root启动

 

启动 

cd /usr/local/kibana-7.16.2-linux-x86_64/bin

# 切换用户
su elsearch

#非后台启动,关闭shell窗口即退出
./bin/kibana

# 后台启动
nohup ./kibana &

# 查看进程
netstat -tunlp | grep 5601

 

如果启动报错:

Kibana环境
Kibana server is not ready yet
kibana ES版本不一致
ES 服务器地址和 Kibana的 ES。host 配置不通 ***  (一般是这个)
kibana.yml esalstearch.hosts配置    (容器的话可能会忘记配置这个)
ES 中禁止跨域访问
esalstearch.hosts配置
防护墙端口   还有就是端口问题 
ES 所在磁盘不足90%

 

 

 

Filebeat 安装

官网下周  filebeat 版本尽量一致 

- type: filestream

  # Unique ID among all inputs, an ID is required.
  id: my-filestream-id

  # Change to true to enable this input configuration.
  enabled: false

  # Paths that should be crawled and fetched. Glob based paths.
  paths:
    - /var/log/null.log
    #- c:\programdata\elasticsearch\logs\*



- type: log
  enabled: true
  paths:
    - /var/log/service/oss-service-prod1.log
  fields:
    project: "oss-service-prod1"


- type: log
  enabled: true
  paths:
    - /var/log/service/oss-service-prod2.log
  fields:
    project: "oss-service-prod2"

# filebeat 使用的事 logstash output 的方式

# ---------------------------- Elasticsearch Output ----------------------------
#output.elasticsearch:
# Array of hosts to connect to.
#hosts: ["localhost:9200"]


# Protocol - either `http` (default) or `https`.
#protocol: "https"


# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
#username: "elastic"
#password: "changeme"


# ------------------------------ Logstash Output -------------------------------
output.logstash:
  # The Logstash hosts
  hosts: ["127.0.0.1:5044"]

#hosts 是 logstash的地址 需要注意内外网 跟端口 

 


 

 

清理过期的es日志  两种方式 第一个清理7天的数据

PUT /_ilm/policy/auto_delete_policy
{
    "policy": {
        "phases": {
            "delete": {
                "min_age": "7d",
                "actions": {
                    "delete": {}
                }
            }
        }
    }
}
#!/bin/bash
#删除ELK30天前的日志
#计算索引名称包含的日期,比如这里是 %Y.%m.%d (2023.07.04)
DATE=`date -d "30 days ago" +%Y.%m.%d`

 #查询所有索引,根据索引名称创建的特性,业务编码+日期,匹配要删除的索引
curl -s  -XGET http://127.0.0.1:9200/_cat/indices?v| grep $DATE | awk -F '[ ]+' '{print $3}' >/tmp/elk.log

# 调用接口删除
for elk in `cat /tmp/elk.log`
do
  curl  -XDELETE  "http://127.0.0.1:9200/$elk"         
done

 

posted @ 2024-03-29 16:05  不会游泳的鱼丶  阅读(17)  评论(0编辑  收藏  举报