EFK安装
一.架构图
这种架构解决了 Logstash 在各服务器节点上占用系统资源高的问题。相比 Logstash,Beats 所占系统的 CPU 和内存几乎可以忽略不计。另外,Beats 和 Logstash 之间支持 SSL/TLS 加密传输,客户端和服务器双向认证,保证了通信安全。
因此这种架构适合对数据安全性要求较高,同时各服务器性能比较敏感的场景。
二.安装elasticsearch(3个节点的集群)
1.cd /app
tar xvf elasticsearch-6.3.2.tar.gz
cp -r elasticsearch-6.3.2 elasticsearch1
cp -r elasticsearch-6.3.2 elasticsearch2
cp -r elasticsearch-6.3.2 elasticsearch3
2.vi /app/elasticsearch1/config/elasticsearch.yml
cluster.name: es_zxy
node.name: node-1
#如果是master节点设置成true 如果是
node.master: true
#如果是data节点设置成true
node.data: true
path.data: /app/elasticsearch1/data
path.logs: /app/elasticsearch1/logs
network.host: 10.168.178.159
http.port: 9200
transport.tcp.port: 9300
discovery.zen.ping.unicast.hosts: ["10.168.178.159:9300", "10.168.178.159:9301","10.168.178.159:9302"]
discovery.zen.minimum_master_nodes: 1
bootstrap.memory_lock: true
bootstrap.system_call_filter: false
http.cors.enabled: true
http.cors.allow-origin: "*"
3.vi /app/elasticsearch2/config/elasticsearch.yml
cluster.name: es_zxy
node.name: node-2
#如果是master节点设置成true 如果是
node.master: true
#如果是data节点设置成true
node.data: true
path.data: /app/elasticsearch2/data
path.logs: /app/elasticsearch2/logs
network.host: 10.168.178.159
http.port: 9201
transport.tcp.port: 9301
discovery.zen.ping.unicast.hosts: ["10.168.178.159:9300", "10.168.178.159:9301","10.168.178.159:9302"]
discovery.zen.minimum_master_nodes: 1
bootstrap.memory_lock: true
bootstrap.system_call_filter: false
http.cors.enabled: true
http.cors.allow-origin: "*"
4.vi /app/elasticsearch3/config/elasticsearch.yml
cluster.name: es_zxy
node.name: node-3
#如果是master节点设置成true 如果是
node.master: true
#如果是data节点设置成true
node.data: true
path.data: /app/elasticsearch3/data
path.logs: /app/elasticsearch3/logs
network.host: 10.168.178.159
http.port: 9202
transport.tcp.port: 9302
discovery.zen.ping.unicast.hosts: ["10.168.178.159:9300", "10.168.178.159:9301","10.168.178.159:9302"]
discovery.zen.minimum_master_nodes: 1
bootstrap.memory_lock: true
bootstrap.system_call_filter: false
http.cors.enabled: true
http.cors.allow-origin: "*"
5.vi /etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536
* soft nproc 65536
* hard nproc 65536
6.vi /etc/security/limits.d/90-nproc.conf
* soft nproc 10240
root soft nproc unlimited
* soft memlock unlimited
* hard memlock unlimited
7.useradd es
chown -R es:es /app/elasticsearch1
chown -R es:es /app/elasticsearch2
chown -R es:es /app/elasticsearch3
8.安装jdk1.8并配置环境变量
vi /etc/profile
JAVA_HOME=/app/jdk1.8.0_101
CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar
PATH=$JAVA_HOME/bin:$PATH
export JAVA_HOME CLASSPATH PATH
9.启动elasticsearch
su es
/app/elasticsearch1/bin/elasticsearch -d
/app/elasticsearch2/bin/elasticsearch -d
/app/elasticsearch3/bin/elasticsearch -d
三.安装 logstash
1.安装jdk1.8并配置环境变量
vi /etc/profile
JAVA_HOME=/app/jdk1.8.0_101
CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar
PATH=$JAVA_HOME/bin:$PATH
export JAVA_HOME CLASSPATH PATH
2.cd /app
tar xvf logstash-6.3.2.tar.gz
mv logstash-6.3.2 logstash
3.编辑/app/logstash/bin/logstash,在文件开头插入如下内容
export JAVA_CMD="/app/jdk1.8.0_101/bin"
export JAVA_HOME="/app/jdk1.8.0_101/"
4./app/logstash/bin/logstash.lib.sh,在文件开头插入如下内容
export JAVA_CMD="/app/jdk1.8.0_101/bin"
export JAVA_HOME="/app/jdk1.8.0_101/"
5./app/logstash/config/zxy.conf的内容
input {
beats {
host => "10.168.178.159"
codec => plain{ charset => "UTF-8" }
port => 5044
}
}
filter {
grok {
#提取程序日志中的时间,比如2018-08-02 11:07:08.130 INFO [http-nio-8080-exec-5] [com.zxy.product.message.web.util.JPushUtil.java:144]
match => ["message","%{TIMESTAMP_ISO8601:logdate}\s+(?<Level>(\S+)).*"]
if [Level] == "DEBUG" {
drop {}
}
if [Level] == "INFO" {
drop {}
}
if [Level] == "" {
drop {}
}
}
date {
match => ["logdate", "yyyy-MM-dd HH:mm:ss.SSS"]
target => "@timestamp" #将符合格式的时间,转换到elasticsearch中的列@timestamp进行显示
}
mutate {
remove_field => "Level" #设置不显示
remove_field => "logdate" #设置不显示
remove_field => "@version" #设置不显示
remove_field => "_id" #设置不显示
remove_field => "tags" #设置不显示
remove_field => "_type" #设置不显示
remove_field => "_score" #设置不显示
}
}
output {
elasticsearch {
codec => plain{ charset => "UTF-8" }
hosts => "http://10.168.178.159:9200"
manage_template => false
index => "%{[fields][log_topics]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}
6.修改/app/logstash/config/vm.options,将内存调大,注释掉XX:+HeapDumpOnOutOfMemoryError
-Xms4G
-Xmx4G
#-XX:+HeapDumpOnOutOfMemoryError
7.启动logstash
nohup sh /app/logstash/bin/logstash -f /app/logstash/config/zxy.conf --path.data /app/logstash/data > nohup.log &
8.由于elasticsearch日志太多,对于半个月之前的索引我们设置一个定时任务每天删除,脚本如下
[root@iZ230uaejn4Z logstash]# cat /app/delete15dayAgoELK.sh
#! /bin/sh
for((i=15;i<=200;i++));
do
before15day=`date +%Y.%m.%d -d "$i days ago"`
echo $before15day
curl -XDELETE http://10.168.178.159:9200/10.81.49.214-${before15day}
curl -XDELETE http://10.168.178.159:9200/.monitoring-es-6-${before15day}
done
[root@iZ230uaejn4Z logstash]# crontab -l
0 0 * * * /app/delete15dayAgoELK.sh > /dev/null
四.安装filebeat
在业务服务器上执行以下操作,安装filebeat, 安装程序下载地址https://zxytest.zhixueyun.com/installer/filebeat-6.3.2-linux-x86_64.tar.gz
1.cd /app
tar xvf filebeat-6.3.2-linux-x86_64.tar.gz
mv filebeat-6.3.2-linux-x86_64 filebeat
cd /app/
find /app/contaniner_data/ -name "*.log" -mtime +1 | xargs rm -rf
2.vi /app/filebeat/filebeat.yml,内容如下(zxy9.zhixueyun.com为系统的域名,10.168.178.159为logstash server IP)
filebeat.prospectors:
- type: log
enabled: true
#下面这个代表60秒查询一次日志文件是否有变化
scan_frequency: 60
paths:
- /app/contaniner_data/*/*.log
# 下面这段配置限制filebeat 同时监控的日志文件数,控制filebeat内存占用
harvester_limit: 10
ignore_older: 3m
close_inactive: 2m
clean_inactive: 5m
close_removed: true
clean_removed: true
close_renamed: true
close_timeout: 2m
#下面这段配置代表不监控info,debug日志
exclude_lines: [".+? INFO[^*].+", ".+? DEBUG[^*].+"]
exclude_files: [".gz$", ".tmp"]
multiline.pattern: '^[[:space:]]+'
multiline.negate: false
multiline.match: after
fields:
log_topics: zxy9.zhixueyun.com
logging.level:
error
output.logstash:
hosts: ["10.168.178.159:5044"]
3.启动filebeat
nohup /app/filebeat/filebeat -c /app/filebeat/filebeat.yml &> /app/filebeat/filebeat.log &
4.启动完filebeat,注意需要修改/app/delete15dayAgoELK.sh,配置elasticsearch日志只保留3天,加上curl -XDELETE http://10.168.178.159:9200/saicmotor.zhixueyun.com-${before15day},
saicmotor.zhixueyun.com为新增加的log_topics名称:
[root@iZ230uaejn4Z app]# crontab -l
*/25 * * * * /app/delete15dayAgoELK.sh > /dev/null
[root@iZ230uaejn4Z app]# cat /app/delete15dayAgoELK.sh
#! /bin/sh
for((i=3;i<=50;i++));
do
before15day=`date +%Y.%m.%d -d "$i days ago"`
echo $before15day
curl -XDELETE http://10.168.178.159:9200/.monitoring-es-6-${before15day}
curl -XDELETE http://10.168.178.159:9200/.monitoring-kibana-6-${before15day}
curl -XDELETE http://10.168.178.159:9200/zxy9.zhixueyun.com-${before15day}
curl -XDELETE http://10.168.178.159:9200/prezxy9.zhixueyun.com-${before15day}
curl -XDELETE http://10.168.178.159:9200/demo9.zhixuey.com-${before15day}
curl -XDELETE http://10.168.178.159:9200/gdttest.zhixueyun.com-${before15day}
curl -XDELETE http://10.168.178.159:9200/dj.zhixueyun.com-${before15day}
curl -XDELETE http://10.168.178.159:9200/zwydemo.zhixueyun.com-${before15day}
curl -XDELETE http://10.168.178.159:9200/zwytest.zhixueyun.com-${before15day}
curl -XDELETE http://10.168.178.159:9200/sdtest.zhixueyun.com-${before15day}
curl -XDELETE http://10.168.178.159:9200/cela.cbead.cn-${before15day}
curl -XDELETE http://10.168.178.159:9200/saicdev.zhixueyun.com-${before15day}
curl -XDELETE http://10.168.178.159:9200/saicmotor.zhixueyun.com-${before15day}
done
五.安装kibana
1.cd /app
tar xvf kibana-6.3.2-linux-x86_64.tar.gz
mv kibana-6.3.2-linux-x86_64 kibana
2.vi /app/kibana/config/kibana.yml
server.port: 5601
server.host: "10.168.178.159"
elasticsearch.url: "http://10.168.178.159:9200"
3.启动kibana
nohup /app/kibana/bin/kibana -H 10.168.178.159 &> run.log &
五.安装head插件
1.cd /app
wget https://github.com/mobz/elasticsearch-head/archive/master.zip
unzip elasticsearch-head-master.zip
wget https://npm.taobao.org/mirrors/node/latest-v4.x/node-v4.4.7-linux-x64.tar.gz
tar xvf node-v4.4.7-linux-x64.tar.gz
2.配置环境变量
vi /etc/profile
export NODE_HOME=/app/node-v4.4.7-linux-x64
export PATH=$PATH:$NODE_HOME/bin
export NODE_PATH=$NODE_HOME/lib/node_modules
3.source /etc/profile
4.安装grunt
cd /app/elasticsearch-head-master
npm install -g grunt-cli
5.修改head插件源码/app/elasticsearch-head-master/Gruntfile.js
connect: {
server: {
options: {
port: 9100,
base: '.',
keepalive: true,
hostname: '10.168.178.159'
}
}
}
6.修改/app/elasticsearch-head-master/_site/app.js
init: function(parent) {
this._super();
this.prefs = services.Preferences.instance();
this.base_uri = this.config.base_uri || this.prefs.get("app-base_uri") || "http://10.168.178.159:9200";
if( this.base_uri.charAt( this.base_uri.length - 1 ) !== "/" ) {
//XHR request fails if the URL is not ending with a "/"
this.base_uri += "/";
}
7.下载运行head必要的文件(放置在文件夹/tmp下)
cd /tmp
wget https://github.com/Medium/phantomjs/releases/download/v2.1.1/phantomjs-2.1.1-linux-x86_64.tar.bz2
yum -y install bzip2
8.运行head
cd /app/elasticsearch-head-master
npm install
nohup grunt server > /dev/null &
9.web页面验证
访问http://10.168.178.159:9100
六.kibana配置logstash
1.访问http://10.168.178.159:5601,点击Management
2.Index pattern输入索引的模糊名称,比如 zxy9.zhixueyun.com*,点Next step
3.Time Filter field name输入@timestamp,点击create index pattern
4.点左边的Discover菜单就可以看到刚才新建的索引,显示效果了
七.配置kibana的用户名与密码
1.安装nginx
2.编辑/app/nginx/conf/nginx.conf,内容如下
user root root;
worker_processes 2;
events {
worker_connections 65535;
}
http {
include mime.types;
default_type application/octet-stream;
server_names_hash_bucket_size 128;
client_header_buffer_size 32k;
large_client_header_buffers 4 32k;
client_max_body_size 20480m;
sendfile on;
tcp_nopush on;
keepalive_timeout 150;
tcp_nodelay on;
server_tokens off;
fastcgi_connect_timeout 300;
fastcgi_send_timeout 300;
fastcgi_read_timeout 300;
fastcgi_buffer_size 128k;
fastcgi_buffers 8 128k;
fastcgi_busy_buffers_size 256k;
fastcgi_temp_file_write_size 256k;
##cache##
proxy_connect_timeout 150;
proxy_read_timeout 150;
proxy_send_timeout 150;
proxy_buffer_size 32k;
proxy_buffers 8 128k;
proxy_busy_buffers_size 256k;
proxy_temp_file_write_size 256k;
proxy_temp_path /app/nginx/temp;
proxy_cache_path /app/nginx/cache levels=1:2 keys_zone=cache_one:200m inactive=1d max_size=30g;
##end##
access_log /dev/null;
error_log /dev/null;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 8k;
gzip_http_version 1.1;
gzip_types text/plain application/x-javascript text/css application/xml;
gzip_disable "MSIE [1-6]\.";
server {
listen 8180;
server_name 121.41.36.121;
location / {
auth_basic "secret";
auth_basic_user_file /app/nginx/conf/passwd.db;
proxy_pass http://10.168.178.159:5601;
proxy_set_header Host $host:5601;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Via "nginx";
}
access_log off;
}
}
3.运行如下命令修改admin用户的密码
htpasswd -c /app/nginx/conf/passwd.db admin
4.启动nginx





浙公网安备 33010602011771号