es

Elasticsearch官方下载链接:https://www.elastic.co/cn/downloads/
Kibana官方下载链接:https://www.elastic.co/cn/downloads/

es1:192.168.0.11

es2.192.168.0.22


yum install java

tar -zxvf elasticsearch-7.9.3-linux-x86_64.tar.gz
mkdir /usr/elasticsearch
mv /root/elasticsearch-7.9.3/* /usr/elasticsearch/

vim /usr/elasticsearch/config/elasticsearch.yml

cluster.name: myes
node.name: node-1
#每台es节点的node.name 不可相同 path.data: /usr/elasticsearch/data path.logs: /usr/elasticsearch/log network.host: 192.168.0.11 http.port: 9200 #discovery.seed_hosts: ["192.168.0.11", "192.168.0.12"]
#发现集群用,在配置第二台的yml时候,取消node-2这个注释 cluster.initial_master_nodes: [
"node-1"] http.cors.enabled: true http.cors.allow-origin: "*"

 

vim /usr/elasticsearch/config/jvm.options

-Xms512m
-Xmx512m

 

useradd estest
passwd estest


chown -R estest /usr/elasticsearch/

vim /etc/sysctl.conf

vm.max_map_count=655360

 


sysctl -p

vim /etc/security/limits.conf

* soft nofile 65536
* hard nofile 65536
* soft nproc 4096
* hard nproc 4096

 

su esteset

./bin/elasticsearch


curl ip:9200/_cat 测试

#######################################################

mv /root/kibana*.*/* /usr/kibana/

chown -R estest /usr/kibana/

vim /usr/kibana/config/kibana.yml

server.port: 5601
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://localhost:9200"]
il8n.locale: "zh-CN"

 


su estest
./bin/kibana

curl ip:5601

 

#######################

下载filebeat-7.9.3-linux-x86_64.tar.gz

tar zxvf filebeat-7.9.3-linux-x86_64.tar.gz

mv ~/filebeat-7.9.3-linux-x86_64/* /usr/filebeat/

filebeat.yml

output.elasticsearch:
  hosts: ["192.168.0.11:9200"]
  username: "estest"
  password: "111111"
setup.kibana:
  host: "192.168.0.11:5601"

 ./filebeat modules enable system

配置文件在modules.d/system.yml

setup命令加载仪表

./filebeat setup

./filebeat -e

 

################################

es部署脚本

#!/bin/bash
yum install java -y

tar -zxvf elasticsearch-7.9.3-linux-x86_64.tar.gz
mkdir /usr/elasticsearch
mv /root/elasticsearch-7.9.3/* /usr/elasticsearch/

#vim /usr/elasticsearch/config/elasticsearch.yml
#node.name: node-1
#network.host: 0.0.0.0
#http:port: 9200
#cluster.inital_master_nodes:["node-1"]

sed -i '/^\#node.name/c\node.name: node-1' /usr/elasticsearch/config/elasticsearch.yml
sed -i '/^\#network.host/c\network.host: 0.0.0.0' /usr/elasticsearch/config/elasticsearch.yml
sed -i '/^\#http.port/c\http.port: 9200' /usr/elasticsearch/config/elasticsearch.yml
sed -i '/^\#cluster.initial_master_nodes/c\cluster.initial_master_nodes: ["node-1"]' /usr/elasticsearch/config/elasticsearch.yml



#vim /usr/elasticsearch/config/jvm.options
#-Xms512m
#-Xmx512m
sed -i 's#Xms1g#Xms512m#' /usr/elasticsearch/config/jvm.options
sed -i 's#Xmx1g#Xmx512m#' /usr/elasticsearch/config/jvm.options

useradd estest
echo 111111|passwd --stdin estest


chown -R estest /usr/elasticsearch/

cat >> /etc/sysctl.conf<<EOF
vm.max_map_count=655360
EOF

sysctl -p

cat >>/etc/security/limits.conf<<EOF
* soft nofile 65536
* hard nofile 65536
* soft nproc 4096
* hard nproc 4096
EOF
View Code

 

es 启动文件配置

cat > /usr/lib/systemd/system/elastic.service << EOF
[Unit]
Description=elasticsearch service
After=syslog.target
After=network.target

[Service]
User=estest
Group=estest
LimitNOFILE=128000
LimitNPROC=128000
LimitMEMLOCK=infinity
Restart=on-failure
KillMode=process
ExecStart=/usr/elasticsearch/bin/elasticsearch
ExecReload=/bin/kill -HUP \$MAINPID
RestartSec=10s

[Install]
WantedBy=multi-user.target
EOF
View Code

 

es 一些高级配置,待研究

cat elasticsearch.yml
#  #集群名称,只有cluster.name相同时,节点才能加入同一个集群。建议使用描述性名称,不建议在不同环境中使用相同的集群名
cluster.name: k8s-es
 #节点描述名称,默认情况下,Elasticsearch将使用随机生成的UUID的前7个字符作为节点名称。此值支持系统变量。
node.name:  ${HOSTNAME} 
#启动后锁定内存,禁用swap交换,提高ES性能。伴随这个参数还需要调整其他配置,后面讨论。
bootstrap.memory_lock: true
# 禁用 SecComp
bootstrap.system_call_filter: false
# 监听的主机地址,客户端通过哪个地址访问此节点。
network.host: 192.168.2.175
 #监听的WEB端口。
http.port: 9200
# 设置压缩tcp传输时的数据
transport.tcp.compress: true
#集群内节点发现,通过扫描9300-9305端口。列出集群中所有符合主节点的节点地址。
discovery.seed_hosts: ["192.168.2.175","192.168.2.176", "192.168.2.177"]
#在一个全新的集群中设置符合主节点条件的初始节点集。默认情况下,此列表为空,这意味着这个节点希望加入已经引导的集群
cluster.initial_master_nodes:  ["192.168.2.175","192.168.2.176", "192.168.2.177"]
# 选主过程中需要 有多少个节点通信
discovery.zen.minimum_master_nodes: 2
# 只要指定数量的节点加入集群,就开始进行恢复
gateway.recover_after_nodes: 2
# 如果期望的节点数量没有达标,那么会等待一定的时间,然后就开始进行shard recovery
gateway.recover_after_time: 10m
# 要求必须有多少个节点在集群中,当加入集群中的节点数量达到这个期望数值之后,每个node的local shard的恢复就会理解开始,默认的值是0,也就是不会做任何的等待
gateway.expected_nodes: 3
# 初始化数据恢复时,并发恢复线程的个数
cluster.routing.allocation.node_initial_primaries_recoveries: 8
# 设置在节点中最大允许同时进行分片分布的个数
cluster.routing.allocation.node_concurrent_recoveries: 8
#  数据在节点间传输最大带宽
indices.recovery.max_bytes_per_sec: 100mb
# 一台机子能运行的节点数目
node.max_local_storage_nodes: 1
# #此节点是否具有成为主节点的资格。 # 192.168.2.175-177 设置为true 192.168.2.185,187,3.62 设置为false
node.master: true 
# 此节点是否作为数据节点存储数据。 # 192.168.2.175-177,3.62 设置为false  192.168.2.185,187 设置为true
node.data: false
#  内存的限额
indices.fielddata.cache.size: 30%
# 请求中熔断器
network.breaker.inflight_requests.limit: 80%
# (收费,需要预先设置xpack.ml.enabled=true,本文不考虑)
node.ml: false
# (收费,需要预先设置xpack.ml.enabled=true,本文不考虑)
xpack.ml.enabled: false
# 开启X-Pack监视功能
xpack.monitoring.enabled: true
# ES线程池设置
thread_pool:
    write:
       queue_size: 200
# 开启 es 安装 设置
xpack.security.enabled: true
# 开启集群ssl 连接 配置集群账号密码必须开启
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: ./elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: ./elastic-certificates.p12
# 9200 端口 https 连接 启用
#xpack.security.http.ssl.enabled: true
#xpack.security.http.ssl.keystore.path: ./elastic-certificates.p12
#xpack.security.http.ssl.truststore.path: ./elastic-certificates.p12
# jvm.options 根据自己服务器配置修改
View Code

 

kibana启动文件

cat > /usr/lib/systemd/system/kibana.service << EOF
[Unit]
Description=kibana service daemon
After=network.target
[Service]
User=estest
Group=estest
LimitNOFILE=65536
LimitNPROC=65536
ExecStart=/usr/kibana/bin/kibana
ExecReload=/bin/kill -HUP \$MAINPID
KillMode=process
Restart=on-failure
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
View Code

 

filebeat启动文件

cat > /usr/lib/systemd/system/filebeat.service << EOF
[Unit]
Description=filebeat Server Daemon
After=network.target
[Service]
User=root
Group=root
ExecStart=/apps/elk/filebeat-7.9.0-linux-x86_64/filebeat -e -c /apps/elk/filebeat-7.9.0-linux-x86_64/filebeat.yml
ExecReload=/bin/kill -HUP \$MAINPID
KillMode=process
Restart=on-failure
RestartSec=5s
[Install]
WantedBy=multi-user.target
EOF
View Code

 

 nginx代理kibana 设置登陆账号

#先生成一个验证登陆账号admin
htpasswd -cm /etc/nginx/htpasswd admin
#nginx.conf 中设置转发请求,并通过账号登陆。
location / {
                proxy_pass http://127.0.0.1:5601$request_uri;

        auth_basic "login";
        auth_basic_user_file /etc/nginx/htpasswd;
        }
View Code

 

filebeat 收集nginx 日志到redis

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log
  json.keys_under_root: true
  json.overwrite_keys: true
  tags: ["access"]
setup.template.settings:
  index.number_of_shards: 3
setup.kibana:
  host: "0.0.0.0:5601"
output.redis:
  hosts: ["localhost"]
  keys:
    - key: "nginx_access"
      when.contains:
        tags: "access"
View Code

 

logstrash 消费redis的日志,消费后redis中nginx_acces 列表长度逐渐消减

cat >/etc/logstash/conf.d/redis.conf<EOF    
input {
  redis {
    host => "127.0.0.1"
    port => "6379"
    db => "0"
    key => "nginx_access"
    data_type => "list"
  }
  redis {
    host => "127.0.0.1"
    port => "6379"
    db => "0"
    key => "nginx_error"
    data_type => "list"
  }
}

filter {
  mutate {
    convert => ["upstream_time", "float"]
    convert => ["request_time", "float"]
  }
}

output {
    stdout {}
    if "access" in [tags] {
      elasticsearch {
        hosts => "http://localhost:9200"
        manage_template => false
        index => "nginx_access-%{+yyyy.MM.dd}"
      }
    }
    if "error" in [tags] {
      elasticsearch {
        hosts => "http://localhost:9200"
        manage_template => false
        index => "nginx_error-%{+yyyy.MM.dd}"
      }
    }
}
EOF
View Code

 

 借鉴文章https://blog.51cto.com/juestnow/2533134

#########################

 

坑1  es-head 无法连接,检查跨域问题

 /elasticsearch.yml

http.cors.enabled: true

http.cors.allow-origin: "*"

 

坑2 es-head 操作406:Not Acceptable错误

修改vendor.js 

 contentType: "application/x-www-form-urlencoded" 修改为 contentType: "application/json;charset=UTF-8"

var inspectData = s.contentType === "application/x-www-form-urlencoded" && 修改为 var inspectData = s.contentType === "application/json;charset=UTF-8" &&

坑3  集群各个节点不能聚合,各自成为独立集群

discovery.seed_hosts: ["192.168.0.11", "192.168.0.12"]

在第一个节点上不能使用这个,否则将各自成为一个集群,无法将两个节点组成一个集群。

 

如果cluster.initial_master_nodes:["node-1","node-2"] 应该也可以解决问题,待测试。

坑4 filebeat 配置中使用默认模板的问题

setup.ilm.enabled: false   #不继承索引模板

 配置示例

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log
  json.keys_under_root: true
  json.overwrite_keys: true
  tags: ["access"]
- type: log
  enabled: true
  paths:
    - /var/log/nginx/error.log
  tags: ["error"]

setup.ilm.enabled: false

setup.template.settings:
  index.number_of_shards: 3
setup.kibana:
  host: "211.149.144.72:5601"
output.elasticsearch:
  hosts: ["211.149.144.72:9200"]
  indices:
    - index: "nginx_access-%{[agent.version]}-%{+yyyy.MM.dd}"
      when.contains:
        tags: "access"
    - index: "nginx_error-%{[agent.version]}-%{+yyyy.MM.dd}"
      when.contains:
        tags: "error"
setup.template.name: "nginx"
setup.template.pattern: "nginx_*"
setup.template.enabled: false
setup.template.overwrite: true

 

posted @ 2020-11-10 17:48  乌鸦yy  阅读(186)  评论(0编辑  收藏  举报