linux系统ElK基础(3)

一、Lostash将数据收集到Redis

1.准备环境

主机 IP 部署的服务
web01 172.16.1.7 nginx,tomcat,logstash
db01 172.16.1.51 es,kibana,redis
db02 172.16.1.52 es
db03 172.16.1.53 es

2.安装redis、ES、kibana、logstash

3.配置收集Nginx日志到redis

[root@web01 ~]# vim /etc/logstash/conf.d/nginx_to_redis.conf
input {
  file {
    path => "/var/log/nginx/access.log"
    start_position => "beginning"
    codec => "json"
  }
}
output {
  redis {
    host => "172.16.1.51"
    port => "6379"
    data_type => "list"
    db => "0"
    key => "nginx_log"
  }
}

4.收集Nginx和tomcat日志到redis

[root@web01 ~]# vim /etc/logstash/conf.d/more_to_redis.conf
input {
  file {
    type => "nginx_log"
    path => "/var/log/nginx/access.log"
    start_position => "beginning"
    codec => "json"
  }
  file {
    type => "tomcat_log"
    path => "/usr/local/tomcat/logs/tomcat_access_json.*.log"
    start_position => "beginning"
    codec => "json"
  }
}
output {
  if [type] == "nginx_log" {
    redis {
      host => "172.16.1.51"
      port => "6379"
      data_type => "list"
      db => "0"
      key => "nginx_log"
    }
  }
  if [type] == "tomcat_log" {
    redis {
      host => "172.16.1.51"
      port => "6379"
      data_type => "list"
      db => "1"
      key => "tomcat_log"
    }
  }
}

#验证:访问Nginx和tomcat页面,查看redis里面有没有key
127.0.0.1:6379> LLEN nginx_log
(integer) 1
127.0.0.1:6379> LLEN nginx_log
(integer) 888
127.0.0.1:6379> LRANGE nginx_log 0 -1

5.配置将redis取出,写入ES

[root@db02 ~]# yum localinstall -y logstash-6.6.0.rpm
[root@db02 ~]# vim /etc/logstash/conf.d/redis_to_es.conf
input {
  redis {
    host => "172.16.1.51"
    port => "6379"
    db => "0"
    data_type => "list"
    key => "nginx_log"
  }
  redis {
    host => "172.16.1.51"
    port => "6379"
    db => "1"
    data_type => "list"
    key => "tomcat_log"
  }
}
output {
  if [type] == "nginx_log" {
    elasticsearch {
      hosts => ["10.0.0.51:9200"]
      index => "nginx_log_%{+YYYY-MM-dd}"
    }
  }
  if [type] == "tomcat_log" {
    elasticsearch {
      hosts => ["10.0.0.51:9200"]
      index => "tomcat_log_%{+YYYY-MM-dd}"
    }
  }
}

二、通过TCP/UDP收集日志

1.配置收集日志

[root@web01 ~]# vim /etc/logstash/conf.d/tcp.conf
input {
  tcp {
    port => "1234"
    mode => "server"
  }
}
output {
  stdout {}
} 

2.使用telnet测试

[root@db02 ~]# telnet 172.16.1.7 1234
Trying 172.16.1.7...
Connected to 172.16.1.7.
Escape character is '^]'.
123
345

#输出内容
{
    "@timestamp" => 2020-08-17T02:23:05.833Z,
          "host" => "172.16.1.52",
          "port" => 33002,
       "message" => "\r",
      "@version" => "1"
}
{
    "@timestamp" => 2020-08-17T02:23:32.562Z,
          "host" => "172.16.1.52",
          "port" => 33002,
       "message" => "123\r",
      "@version" => "1"
}
{
    "@timestamp" => 2020-08-17T02:23:38.300Z,
          "host" => "172.16.1.52",
          "port" => 33002,
       "message" => "345\r",
      "@version" => "1"
}

3.使用nc工具测试

#安装
[root@db02 ~]# yum install -y nc

#使用nc工具
[root@db02 ~]# nc 172.16.1.7 1234
123
456

#使用nc工具收集日志到logstash的服务器
[root@web01 ~]# tail -f /var/log/nginx/access.log | nc 10.0.0.7 1234 &
[1] 29595

#发送伪设备数据
[root@web01 ~]# echo "伪设备测试" > /dev/tcp/10.0.0.7/1234

4.收集日志到ES

[root@web01 ~]# vim /etc/logstash/conf.d/tcp.conf
input {
  tcp {
    port => "1234"
    mode => "server"
  }
}
output {
  elasticsearch {
    hosts => ["10.0.0.51:9200"]
    index => "tcp_log_%{+YYYY-MM-dd}"
  }
}

三、Logstash配合rsyslog收集haproxy日志

1.rsyslog介绍

在centos 6及之前的版本叫做syslog,centos 7开始叫做rsyslog,根据官方的介绍,rsyslog(2013年版本)可以达到每秒转发百万条日志的级别,官方网址:http://www.rsyslog.com/

2.安装

[root@web01 ~]# yum isntall -y rsyslog

3.配置rsyslog

[root@web01 ~]# vim /etc/rsyslog.conf
#打开注释
$ModLoad imudp
$UDPServerRun 514
$ModLoad imtcp
$InputTCPServerRun 514
#添加日志收集级别
local6.*       @@172.16.1.52:2222

4.安装haproxy

[root@web01 ~]# yum install -y haproxy

5.配置haproxy

[root@web01 ~]# vim /etc/haproxy/haproxy.cfg
global
maxconn 100000
chroot /var/lib/haproxy
uid 99
gid 99
daemon
nbproc 1
pidfile /var/run/haproxy.pid
log 127.0.0.1 local6 info

defaults
option http-keep-alive
option  forwardfor
maxconn 100000
mode http
timeout connect 300000ms
timeout client  300000ms
timeout server  300000ms

listen stats
 mode http
 bind 0.0.0.0:9999
 stats enable
 log global
 stats uri     /haproxy-status
 stats auth    haadmin:123456

#frontend web_port
frontend web_port
        bind 0.0.0.0:80
        mode http
        option httplog
        log global
        option  forwardfor
###################ACL Setting##########################
        acl pc          hdr_dom(host) -i www.elk.com
        acl mobile      hdr_dom(host) -i m.elk.com
###################USE ACL##############################
        use_backend     pc_host        if  pc
        use_backend     mobile_host    if  mobile
########################################################

backend pc_host
        mode    http
        option  httplog
        balance source
        server web1  10.0.0.53:8081 check inter 2000 rise 3 fall 2 weight 1

backend mobile_host
        mode    http
        option  httplog
        balance source
        server web1  10.0.0.53:8080 check inter 2000 rise 3 fall 2 weight 1
        
        
[root@web01 ~]# vim /etc/haproxy/haproxy.cfg
#全局配置
global
#最大并发
maxconn 100000
#安全机制
chroot /var/lib/haproxy
#指定启动的用户和组
uid 99
gid 99
#守护进程
daemon
#haproxy的进程数
nbproc 1
#指定pid文件
pidfile /var/run/haproxy.pid
#指定日志级别
log 127.0.0.1 local6 info

#默认配置
defaults
#开启长连接
option http-keep-alive
#获取用户真实IP
option  forwardfor
#最大连接数
maxconn 100000
#支持http协议
mode http
#设置连接超时时间
timeout connect 300000ms
timeout client  300000ms
timeout server  300000ms

#监控状态
listen status
 #支持http
 mode http
 #监听端口
 bind 0.0.0.0:9999
 #启动
 stats enable
 #日志级别
 log global
 #访问uri地址
 stats uri     /haproxy-status
 #状态页用户名和密码
 stats auth    haadmin:123456

#frontend web_port
frontend web_port
        bind 0.0.0.0:80
        mode http
        option httplog
        log global
        option  forwardfor
###################ACL Setting##########################
        acl nginx       hdr_dom(host) -i www.nginx.com
        acl tomcat      hdr_dom(host) -i www.tomcat.com
###################USE ACL##############################
        use_backend     nginx_host     if  nginx
        use_backend     tomcat_host    if  tomcat
########################################################

backend nginx_host
        mode    http
        option  httplog
        balance source
        server web01  10.0.0.7:8081 check inter 2000 rise 3 fall 2 weight 1

backend tomcat_host
        mode    http
        option  httplog
        balance source
        server web01  10.0.0.7:8080 check inter 2000 rise 3 fall 2 weight 1

6.修改Nginx启动端口

[root@web01 ~]# vim /etc/nginx/nginx.conf
    server {
        listen       8081 default_server;
        ...

7.启动服务

#启动haproxy
[root@web01 ~]# systemctl start haproxy.service

#启动rsyslog
[root@web01 ~]# systemctl start rsyslog

#验证
[root@web01 ~]# netstat -lntp

8.访问状态页面

http://10.0.0.7:9999/haproxy-status
haadmin
123456

9.测试访问Nginx和tomcat

#配置本地hosts
10.0.0.7 www.nginx.com
10.0.0.7 www.tomcat.com

#访问页面
http://www.nginx.com/
http://www.tomcat.com/

10.测试配置收集proxy日志

[root@db02 ~]# vim /etc/logstash/conf.d/haproxy.conf
input {
  syslog {
    port => "2222"
  }
}
output {
  stdout {}
}

#访问haproxy的页面,查看有无输出

11.配置收集proxy日志到ES

[root@db02 ~]# vim /etc/logstash/conf.d/haproxy_es.conf
input {
  syslog {
    port => "2222"
  }
}
output {
  elasticsearch {
    hosts => ["10.0.0.51:9200"]
    index => "haproxy_log_%{+YYYY-MM-dd}"
  }
}

四、filebeat学习

1.简介

Filebeat附带预构建的模块,这些模块包含收集、解析、充实和可视化各种日志文件格式数据所需的配置,每个Filebeat模块由一个或多个文件集组成,这些文件集包含摄取节点管道、Elasticsearch模板、Filebeat勘探者配置和Kibana仪表盘。

Filebeat模块很好的入门,它是轻量级单用途的日志收集工具,用于在没有安装java的服务器上专门收集日志,可以将日志转发到logstash、elasticsearch或redis等场景中进行下一步处理。

Filebeat与logstash作用是一样
Logstash是java写的,需要java环境
filebeat是go写的,不需要安装java环境,并且非常的轻量

2.安装filebeat

#上传包
[root@web01 ~]# rz filebeat-6.6.0-x86_64.rpm

#安装
[root@web01 ~]# rpm -ivh filebeat-6.6.0-x86_64.rpm

3.配置文件

[root@web01 ~]# rpm -qc filebeat
/etc/filebeat/filebeat.yml

五、filebeat收集本地日志到文件

1.配置

[root@web01 ~]# vim /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
  enable: true
  paths:
    - /var/log/messages
output.file:
  path: "/tmp"
  filename: "filebeat_messages.log"

2.启动

[root@web01 ~]# systemctl start filebeat.service

#验证
[root@web01 ~]# ps -ef | grep filebeat

3.测试

[root@web01 ~]# tail -f /tmp/filebeat_messages.log

#输入内容
[root@web01 ~]# echo "123" >> /var/log/messages

六、filebeat收集本地日志到ES

1.配置

filebeat.inputs:
- type: log
  enable: true
  paths:
    - /var/log/nginx/access.log

output.elasticsearch:
  hosts: ["10.0.0.51:9200"]

2.启动

[root@web01 ~]# systemctl restart filebeat.service

#验证
[root@web01 ~]# ps -ef | grep filebeat

3.测试

#访问Nginx
http://10.0.0.7:8081/

#查看ES页面
posted @ 2020-08-19 14:36  王顺子  阅读(148)  评论(0编辑  收藏  举报