ELK部署安装[一]
实验环境
linux-node1
[root@linux-node1 ~]# ip a | grep 172 inet 172.31.202.211/20 brd 172.31.207.255 scope global dynamic eth0 [root@linux-node1 ~]# cat /etc/redhat-release CentOS Linux release 7.2.1511 (Core) [root@linux-node1 ~]# uname -r 3.10.0-514.26.2.el7.x86_64
linux-node2
[root@linux-node2 ~]# ip a | grep 172 inet 172.31.202.212/20 brd 172.31.207.255 scope global dynamic eth0 [root@linux-node2 ~]# cat /etc/redhat-release CentOS Linux release 7.2.1511 (Core) [root@linux-node2 ~]# uname -r 3.10.0-514.26.2.el7.x86_64
在node1安装elasticsearch
[root@linux-node1 ~]# mkdir -pv /elk [root@linux-node1 ~]# chown -R elasticsearch.elasticsearch /elk [root@linux-node1 ~]# sed -n "38,40p" /usr/lib/systemd/system/elasticsearch.service # Specifies the maximum file descriptor number that can be opened by this process LimitNOFILE=65536 LimitMEMLOCK=infinity [root@linux-node1 ~]# grep "^[a-Z]" /etc/elasticsearch/elasticsearch.yml cluster.name: elk-cluster1 node.name: elk-node-1 path.data: /elk/data path.logs: /elk/logs bootstrap.memory_lock: true network.host: 172.31.202.211 http.port: 9200 discovery.zen.ping.unicast.hosts: ["172.31.202.211", "172.31.202.212"] http.cors.enabled: true http.cors.allow-origin: "*"
在node1安装elasticsearch-head插件
[root@linux-node1 src]# tar xf elasticsearch-head.tar.gz [root@linux-node1 src]# cd elasticsearch-head/ [root@linux-node1 elasticsearch-head]# npm run start &
在node1安装logstash
[root@linux-node1 ~]# chown -R logstash.logstash /var/log/logstash/ [root@linux-node1 ~]# chmod 644 /var/log/logstash/ [root@linux-node1 ~]# chmod 644 /var/log/messages
日志格式
[root@linux-node1 ~]# sed -n "25,38p" /usr/local/nginx/conf/nginx.conf log_format access_json '{"@timestamp":"$time_iso8601",' '"host":"$server_addr",' '"clientip":"$remote_addr",' '"size":$body_bytes_sent,' '"responsetime":$request_time,' '"upstreamtime":"$upstream_response_time",' '"upstreamhost":"$upstream_addr",' '"http_host":"$host",' '"url":"$uri",' '"domain":"$host",' '"xff":"$http_x_forwarded_for",' '"referer":"$http_referer",' '"status":"$status"}'; access_log /var/log/nginx/access.log access_json;
[root@linux-node2 ~]# sed -n "135,137p" /usr/local/tomcat/conf/server.xml <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs" prefix="tomcat_access_log" suffix=".log" pattern="{"clientip":"%h","ClientUser":"%l","authenticated":"%u","AccessTime":"%t","method":"%r","status":"%s","SendBytes":"%b","Query?string":"%q","partner":"%{Referer}i","AgentVersion":"%{User-Agent}i"}"/>
如何收集日志
[root@linux-node1 ~]# cat /etc/logstash/conf.d/nginx-system.conf input { file { type => "nginx-log" path => "/var/log/nginx/access.log" start_position => "end" stat_interval => "2" codec => "json" } file { type => "system-log" path => "/var/log/messages" start_position => "beginning" stat_interval => "2" } } output { if [type] == "system-log" { elasticsearch { hosts => ["172.31.202.211:9200"] index => "logstash-system-log-%{+YYYY.MM.dd}" }} if [type] == "nginx-log" { elasticsearch { hosts => ["172.31.202.211:9200"] index => "logstash-nginx-log-%{+YYYY.MM.dd}" }} }
[root@linux-node2 ~]# cat /etc/logstash/conf.d/system-tomcat.conf input { file { type => "tomcat-log" path => "/usr/local/tomcat/logs/tomcat_access_log.*.log" start_position => "end" stat_interval => "2" codec => "json" } file { type => "tomcat-system-log" path => "/var/log/messages" start_position => "beginning" stat_interval => "2" } } output { if [type] == "tomcat-log" { elasticsearch { hosts => ["172.31.202.212:9200"] index => "logstash-tomcat-log-%{+YYYY.MM.dd}" }} if [type] == "tomcat-system-log" { elasticsearch { hosts => ["172.31.202.212:9200"] index => "logstash-tomcat-system-log-%{+YYYY.MM.dd}" }} }
在node1安装kibana
[root@linux-node1 ~]# grep "^[a-Z]" /etc/kibana/kibana.yml server.port: 5601 server.host: "172.31.202.211" elasticsearch.url: "http://172.31.202.212:9200"

浙公网安备 33010602011771号