极客时间运维进阶训练营第六周作业
1、基于 logstash filter 功能将 nginx 默认的访问日志及 error log 转换为 json 格式并写入 elasticsearch
tee /etc/logstash/conf.d/nginxlog-to-es.conf << "EOF"
input {
file {
path => "/apps/nginx/logs/access.log"
type => "nginx-accesslog"
stat_interval => "1"
start_position => "beginning"
}
file {
path => "/apps/nginx/logs/error.log"
type => "nginx-errorlog"
stat_interval => "1"
start_position => "beginning"
}
}
filter {
if [type] == "nginx-accesslog" {
grok {
match => { "message" => ["%{IPORHOST:clientip} - %{DATA:username} \[%{HTTPDATE:request-time}\] \"%{WORD:request-method} %{DATA:request-uri} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent_bytes} \"%{DATA:referrer}\" \"%{DATA:useragent}\""] }
remove_field => "message"
add_field => { "project" => "magedu"}
}
mutate {
convert => [ "[response_code]", "integer"]
}
}
if [type] == "nginx-errorlog" {
grok {
match => { "message" => ["(?<timestamp>%{YEAR}[./]%{MONTHNUM}[./]%{MONTHDAY} %{TIME}) \[%{LOGLEVEL:loglevel}\] %{POSINT:pid}#%{NUMBER:threadid}\: \*%{NUMBER:connectionid} %{GREEDYDATA:message}, client: %{IPV4:clientip}, server: %{GREEDYDATA:server}, request: \"(?:%{WORD:request-method} %{NOTSPACE:request-uri}(?: HTTP/%{NUMBER:httpversion}))\", host: %{GREEDYDATA:domainname}"]}
remove_field => "message"
}
}
}
output {
if [type] == "nginx-accesslog" {
elasticsearch {
hosts => ["192.168.56.111:9200"]
index => "magedu-nginx-accesslog-%{+yyyy.MM.dd}"
user => "magedu"
password => "123456"
}}
if [type] == "nginx-errorlog" {
elasticsearch {
hosts => ["192.168.56.111:9200"]
index => "magedu-nginx-errorlog-%{+yyyy.MM.dd}"
user => "magedu"
password => "123456"
}}
}
EOF
systemctl restart logstash
2、基于 logstash 收集 json 格式的 nginx 访问日志
cp /apps/nginx/conf/nginx.conf{,.bak}
tee /apps/nginx/conf/nginx.conf << "EOF"
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format access_json '{"@timestamp":"$time_iso8601",'
'"host":"$server_addr",'
'"clientip":"$remote_addr",'
'"size":$body_bytes_sent,'
'"responsetime":$request_time,'
'"upstreamtime":"$upstream_response_time",'
'"upstreamhost":"$upstream_addr",'
'"http_host":"$host",'
'"uri":"$uri",'
'"domain":"$host",'
'"xff":"$http_x_forwarded_for",'
'"referer":"$http_referer",'
'"tcp_xff":"$proxy_protocol_addr",'
'"http_user_agent":"$http_user_agent",'
'"status":"$status"}';
access_log /apps/nginx/logs/access.log access_json;
sendfile on;
keepalive_timeout 65;
server {
listen 80;
server_name localhost;
location / {
root html;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}
EOF
nginx -t
tee /etc/logstash/conf.d/nginx-json-log-to-es.conf << "EOF"
input {
file {
path => "/apps/nginx/logs/access.log"
start_position => "end"
type => "nginx-json-accesslog"
stat_interval => "1"
codec => json
}
}
output {
if [type] == "nginx-json-accesslog" {
elasticsearch {
hosts => ["192.168.56.111:9200"]
index => "nginx-accesslog-2.107-%{+YYYY.MM.dd}"
user => "magedu"
password => "123456"
}}
}
EOF
systemctl restart logstash
3、基于 logstash 收集 java 日志并实现多行合并
#收集java日志
tee /etc/logstash/conf.d/eslog2es.conf << "EOF"
input {
file {
path => "/data/eslogs/magedu-es-cluster1.log"
type => "eslog"
stat_interval => "1"
start_position => "beginning"
codec => multiline {
#pattern => "^\["
pattern => "^\[[0-9]{4}\-[0-9]{2}\-[0-9]{2}"
negate => "true"
what => "previous"
}
}
}
output {
if [type] == "eslog" {
elasticsearch {
hosts => ["192.168.56.113:9200"]
index => "magedu-eslog-%{+YYYY.ww}"
user => "magedu"
password => "123456"
}}
}
EOF
systemctl restart logstash.service
4、基于 logstash 收集 syslog 类型日志 (以 haproxy 替代网络设备)
#安装haproxy 模拟syslog 2022年12月3日 13:49:55
apt update && apt install haproxy
cp /etc/haproxy/haproxy.cfg{,.bak}
## 模拟接收syslog日志
tee -a /etc/haproxy/haproxy.cfg << "EOF"
listen kibana-5601
bind 192.168.56.117:5601
mode http
log global
server kibana-server1 192.168.56.111:5601 check inter 3s fall 3 rise 3
EOF
systemctl restart haproxy.service
cp /etc/rsyslog.d/49-haproxy.conf{,.bak}
sed -i 's#/var/log/haproxy.log#@@192.168.56.116:514#g' /etc/rsyslog.d/49-haproxy.conf
systemctl restart rsyslog.service
tee /etc/logstash/conf.d/syslog-haproxy-to-es.conf << "EOF"
input{
syslog {
type => "rsyslog-haproxy"
port => "514" #监听一个本地的端口
}}
output{
if [type] == "rsyslog-haproxy" {
elasticsearch {
hosts => ["192.168.56.111:9200"]
index => "magedu-rsyslog-haproxy-%{+YYYY.ww}"
user => "magedu"
password => "123456"
}}
}
EOF
systemctl restart logstash
5、logstash 收集日志并写入 Redis、再通过其它 logstash 消费至 elasticsearch 并保持 json 格式日志的解析
# 安装redis
apt update
apt-cache madison redis
apt install -y redis
cp /etc/redis/redis.conf{,.bak}
sed -i 's/bind 127.*/bind 0.0.0.0/g' /etc/redis/redis.conf
sed -i '/save.*0/d' /etc/redis/redis.conf
echo "requirepass 123456" >> /etc/redis/redis.conf
echo 'save ""' >> /etc/redis/redis.conf
systemctl restart redis
telnet 127.0.0.1 6379
# auth 123456
# keys *
# info
# lpop magedu-nginx-accesslog
# select 1 # 选择redis数据库
# 日志-to-redis
# redis为缓存的日志收集
tee /etc/logstash/conf.d/nginx-log-to-redis.conf << "EOF"
input {
file {
path => "/apps/nginx/logs/access.log"
type => "magedu-nginx-accesslog"
start_position => "beginning"
stat_interval => "1"
codec => "json" #对json格式日志进行json解析
}
file {
path => "/apps/nginx/logs/error.log"
type => "magedu-nginx-errorlog"
start_position => "beginning"
stat_interval => "1"
}
}
filter {
if [type] == "magedu-nginx-errorlog" {
grok {
match => { "message" => ["(?<timestamp>%{YEAR}[./]%{MONTHNUM}[./]%{MONTHDAY} %{TIME}) \[%{LOGLEVEL:loglevel}\] %{POSINT:pid}#%{NUMBER:threadid}\: \*%{NUMBER:connectionid} %{GREEDYDATA:message}, client: %{IPV4:clientip}, server: %{GREEDYDATA:server}, request: \"(?:%{WORD:request-method} %{NOTSPACE:request-uri}(?: HTTP/%{NUMBER:httpversion}))\", host: %{GREEDYDATA:domainname}"]}
remove_field => "message" #删除源日志
}
}
}
output {
if [type] == "magedu-nginx-accesslog" {
redis {
data_type => "list"
key => "magedu-nginx-accesslog"
host => "192.168.56.115"
port => "6379"
db => "0"
password => "123456"
}
}
if [type] == "magedu-nginx-errorlog" {
redis {
data_type => "list"
key => "magedu-nginx-errorlog"
host => "192.168.56.115"
port => "6379"
db => "0"
password => "123456"
}
}
}
EOF
systemctl restart logstash
# 日志流 redis-to-es
tee /etc/logstash/conf.d/redis-to-es.conf << "EOF"
input {
redis {
data_type => "list"
key => "magedu-nginx-accesslog"
host => "192.168.56.115"
port => "6379"
db => "0"
password => "123456"
}
redis {
data_type => "list"
key => "magedu-nginx-errorlog"
host => "192.168.56.115"
port => "6379"
db => "0"
password => "123456"
}
}
output {
if [type] == "magedu-nginx-accesslog" {
elasticsearch {
hosts => ["192.168.56.111:9200"]
index => "magedu-nginx-redis-accesslog-%{+yyyy.MM.dd}"
user => "magedu"
password => "123456"
}
}
if [type] == "magedu-nginx-errorlog" {
elasticsearch {
hosts => ["192.168.56.111:9200"]
index => "magedu-nginx-redis-errorlog-%{+yyyy.MM.dd}"
user => "magedu"
password => "123456"
}
}
}
EOF
systemctl restart logstash
6、基于 docker-compose 部署单机版本 ELK
install -d /data/es cd /data/es && git clone https://gitee.com/jiege-gitee/elk-docker-compose.git cd /data/es/elk-docker-compose &&\ docker-compose up -d elasticsearch docker exec -it elasticsearch /usr/share/elasticsearch/bin/elasticsearch-setup-passwords interactive #设置账户密码 magedu123 #如果密码不同需要改密码 # vim kibana/config/kibana.yml # vim logstash/config/logstash.conf # vim logstash/config/logstash.conf docker-compose up -d

浙公网安备 33010602011771号