111111

ELK-第二集

收集nginx日志和系统日志写到kafaka在用logstash读取出来写到elasticsearch
##node1 把nginx日志写到kafka里面
[root@node1 conf.d]# vim /etc/logstash/conf.d/nginx.conf

input{
  file {
    path => "/var/log/nginx/access.log"
    type => "nginx-access-log-1105"
    start_position => "beginning"
    stat_interval => "2"
    codec => "json"
  }
  file {
    path => "/var/log/messages"
    type => "system-log-1105"
    start_position => "beginning"
    stat_interval => "2"
  }
}


output {
  if [type] == "nginx-access-log-1105" {
  kafka {
    bootstrap_servers => "192.168.1.106:9092"
    topic_id => "nginx-accesslog-1105"
    codec => "json"
}
}
  if [type] == "system-log-1105" {
    kafka {
    bootstrap_servers => "192.168.1.106:9092"
    topic_id => "system-log-1105"
    codec => "json"
}}
}


node2 ##从kafaka读出来写到elasticsearch

input {
  kafka {
    bootstrap_servers => "192.168.1.105:9092"
    topics => "nginx-accesslog-1105"
    group_id => "nginx-access-log"
    codec => "json"
    consumer_threads => 1
    decorate_events => true
  }
    kafka {
    bootstrap_servers => "192.168.1.105:9092"
    topics => "system-log-1105"
    group_id => "nginx-access-log"
    codec => "json"
    consumer_threads => 1
    decorate_events => true
  }
}

output {
#  stdout {
#    codec => "rubydebug"
#  }
  if [type] == "nginx-access-log-1105" {
  elasticsearch {
  hosts => ["192.168.1.105:9200"]
  index => "logstash-nginx-access-log-1105-%{+YYYY.MM.dd}"
  }}
  if [type] == "system-log-1105" {
    elasticsearch {
  hosts => ["192.168.1.106:9200"]
  index => "logstash-systemzzz-log-1105-%{+YYYY.MM.dd}"
}
}
}

 在添加到kibana里面

 

##使用fileeat来收集日志写入kafka

node1 上传filebeat-5.6.5-x86_64.rpm

yum install filebeat-5.6.5-x86_64.rpm  -y

systemctl stop logstash.service

[root@node1 tmp]# grep -v "#"  /etc/filebeat/filebeat.yml | grep -v "^$"
filebeat.prospectors:
- input_type: log
  paths:
    - /var/log/*.log
    - /var/log/messages
  exclude_lines: ["^DBG"]
  exclude_files: [".gz$"]
  document_type: "system-log-1105-filebeat"
output.file:
  path: "/tmp"
  filename: "filebeat.txt"

 output.logstash:
  hosts: ["192.168.1.105:5044"]  #logstash 服务器地址,可以是多个
  enabled: true #是否开启输出至logstash,默认即为true
  worker: 1 #工作线程数
  comperession_level: 3 #压缩级别
  #loadbalance: true #多个输出的时候开启负载

[root@node1 src]# vim /etc/logstash/conf.d/filebate.conf

input {
  beats {
    port => "5044"
    codec => "json"
}
}
output {
  if [type] == "system-log-1105-filebeat" {
  kafka {
    bootstrap_servers => "192.168.1.105:9092"
    topic_id => "system-log-filebeat-1105"
    codec => "json"
  }
}
}

 [root@node1 conf.d]# systemctl restart logstash.service

[root@node1 conf.d]# /usr/local/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.1.105:2181,192.168.1.106:2181,192.168.1.101:2181

 

node2##

[root@node2 conf.d]# vim kafka-es.conf

input {
  kafka {
    bootstrap_servers => "192.168.1.105:9092"
    topics => "system-log-filebeat-1105"
    group_id => "system-log-filebeat"
    codec => "json"
    consumer_threads => 1
    decorate_events => true
  }
}

output {
#  stdout {
#    codec => "rubydebug"
#  }
  if [type] == "system-log-1105-filebeat" {
    elasticsearch {
  hosts => ["192.168.1.106:9200"]
  index => "system-log-1105-filebeat-%{+YYYY.MM.dd}"
}
}
}

[root@node2 conf.d]# systemctl restart logstash.service

#测试  在node1 /var/log/messages里添加东西,然后去9100端口去检查,如果有证明正常,然后添加到kibana里面。

##流程是,filebeat配置文件从messages里面读取然后输出到192.168.1.105:5044端口上,然后logstash从本地的5044端口上读取写入kafka:9092端口,然后node2的logstash从input 192.168.1.105的9092端口读取内容output输出到elasticsearch上。

 

###收集nginx

[root@node1 conf.d]# vim /etc/filebeat/filebeat.yml

- input_type: log
  paths:
    - /var/log/nginx/access.log
  exclude_lines: ["^DBG"]
  exclude_files: [".gz$"]
  document_type: "nginx-accesslog-1105-filebeat"

output.logstash:
  hosts: ["192.168.1.105:5044"]  #logstash 服务器地址,可以是多个
  enabled: true #是否开启输出至logstash,默认即为true
  worker: 1 #工作线程数
  comperession_level: 3 #压缩级别
  #loadbalance: true #多个输出的时候开启负载

[root@node1 src]# vim /etc/logstash/conf.d/filebate.conf

output {
  if [type] == "nginx-accesslog-1105-filebeat" {
  kafka {
    bootstrap_servers => "192.168.1.105:9092"
    topic_id => "nginx-accesslog-filebeat-1105"
    codec => "json"
}}
}

 

node2

input {
  kafka {
  bootstrap_servers => "192.168.1.105:9092"
  topics => "nginx-accesslog-filebeat-1105"
  group_id => "nginx-accesslog-filebeat"
    codec => "json"
    consumer_threads => 1
    decorate_events => true
  }
}

output {
  if [type] == "nginx-accesslog-1105-filebeat" {
    elasticsearch {
  hosts => ["192.168.1.106:9200"]
  index => "logstash-nginx-accesslog-1105-filebeat-%{+YYYY.MM.dd}"
}
}
}

[root@node2 conf.d]# systemctl restart logstash.service

 

##收集java日志

[root@node1 conf.d]# vim /etc/logstash/conf.d/java.conf

input {
  file {
    path => "/var/log/logstash/logstash-plain.log"
    type => "javalog"
    codec => multiline {
    pattern => "^\[(\d{4}-\d{2}-\d{2})"
    negate => true
    what => "previous"
   }}
}

output {
  elasticsearch {
    hosts => ["192.168.1.105:9200"]
    index => "javalog-1105-%{+YYYY.MM}"
}
}

 

posted @ 2018-06-02 17:29  赵SIR  阅读(128)  评论(0编辑  收藏  举报