centos stream 9安装kafka,连通ELK

1.centos stream 9安装kafka

1.安装 Java 和基础工具
dnf install -y java-17-openjdk-headless wget tar

2.创建运行用户和目录
useradd --system --home-dir /opt/kafka --shell /sbin/nologin kafka || true
mkdir -p /opt /etc/kafka /var/lib/kafka/data

3.下载并解压 Kafka
cd /usr/local/src
wget https://downloads.apache.org/kafka/4.2.0/kafka_2.13-4.2.0.tgz
tar -xzf kafka_2.13-4.2.0.tgz -C /opt
ln -sfn /opt/kafka_2.13-4.2.0 /opt/kafka
chown -R kafka:kafka /opt/kafka_2.13-4.2.0 /opt/kafka /etc/kafka /var/lib/kafka

4.配置kafka broker /etc/kafka/server.properties
process.roles=broker,controller
node.id=1
listeners=PLAINTEXT://0.0.0.0:9092,CONTROLLER://127.0.0.1:9093
advertised.listeners=PLAINTEXT://10.10.10.1:9092
listener.security.protocol.map=PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT
controller.listener.names=CONTROLLER
inter.broker.listener.name=PLAINTEXT
controller.quorum.voters=1@127.0.0.1:9093
log.dirs=/var/lib/kafka/data
num.partitions=2
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
group.initial.rebalance.delay.ms=0
auto.create.topics.enable=false
log.retention.hours=168
log.message.timestamp.type=LogAppendTime

5.初始化 KRaft 存储
export KAFKA_CLUSTER_ID=$(/opt/kafka/bin/kafka-storage.sh random-uuid)
/opt/kafka/bin/kafka-storage.sh format -t "$KAFKA_CLUSTER_ID" -c /etc/kafka/server.properties
chown -R kafka:kafka /var/lib/kafka/data

6.配置kafka为系统服务 /etc/systemd/system/kafka.service
[Unit]
Description=Apache Kafka (KRaft)
After=network.target
[Service]
Type=simple
User=kafka
Group=kafka
Environment="JAVA_HOME=/usr/lib/jvm/jre-17"
ExecStart=/opt/kafka/bin/kafka-server-start.sh /etc/kafka/server.properties
ExecStop=/opt/kafka/bin/kafka-server-stop.sh
Restart=on-failure
RestartSec=5
LimitNOFILE=100000
[Install]
WantedBy=multi-user.target

7.启动kafka,开放防火墙端口
systemctl daemon-reload
systemctl enable --now kafka
systemctl status kafka --no-pager -l
firewall-cmd --permanent --add-port=9092/tcp
firewall-cmd --reload

8.kafka创建两个topic
/opt/kafka/bin/kafka-topics.sh --create \
  --topic filebeat-suricata \
  --bootstrap-server 10.10.10.1:9092 \
  --partitions 2 \
  --replication-factor 1
/opt/kafka/bin/kafka-topics.sh --create \
  --topic filebeat-zeek \
  --bootstrap-server 10.10.10.1:9092 \
  --partitions 2 \
  --replication-factor 1
/opt/kafka/bin/kafka-topics.sh --list --bootstrap-server 10.10.10.1:9092

2.logstash新增kafka pipeline

1.新增/etc/logstash/conf.d/kafka-suricata-in.conf
input {
  kafka {
    bootstrap_servers => "10.10.10.1:9092"
    topics => ["filebeat-suricata"]
    group_id => "logstash-suricata"
    client_id => "logstash-suricata-10.10.10.1"
    auto_offset_reset => "earliest"
    consumer_threads => 2
    decorate_events => "basic"
    codec => json
  }
}
output {
  pipeline { send_to => "suricata" }
}

2.新增/etc/logstash/conf.d/kafka-zeek-in.conf
input {
  kafka {
    bootstrap_servers => "10.10.10.1:9092"
    topics => ["filebeat-zeek"]
    group_id => "logstash-zeek"
    client_id => "logstash-zeek-10.10.10.1"
    auto_offset_reset => "earliest"
    consumer_threads => 2
    decorate_events => "basic"
    codec => json
  }
}
output {
  pipeline { send_to => "zeek" }
}

3.修改 pipelines.yml,添加kafka配置
- pipeline.id: beats_in
  path.config: "/etc/logstash/conf.d/beats-in.conf"
- pipeline.id: kafka_suricata_in
  path.config: "/etc/logstash/conf.d/kafka-suricata-in.conf"
- pipeline.id: kafka_zeek_in
  path.config: "/etc/logstash/conf.d/kafka-zeek-in.conf"
- pipeline.id: suricata-beats

4.检验并重启logstash
sudo -u logstash /usr/share/logstash/bin/logstash --path.settings /etc/logstash -t
systemctl restart logstash
journalctl -u logstash -n 100 --no-pager

3.修改filebeat,按topic进行分流

1.修改/etc/filebeat/filebeat.yml
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~
output.kafka:
  hosts: ["10.10.10.1:9092"]
  version: "2.1.0"
  required_acks: 1
  compression: gzip
  client_id: "filebeat-10.10.10.2"
  topic: "filebeat-misc"
  topics:
    - topic: "filebeat-suricata"
      when.equals:
        event.module: "suricata"
    - topic: "filebeat-zeek"
      when.equals:
        event.module: "zeek"
  codec.json:
    pretty: false

2.检验并重启filebeat
filebeat test config -e -c /etc/filebeat/filebeat.yml
systemctl restart filebeat
journalctl -u filebeat -n 100 --no-pager

4.filebeat加载suricata、zeek的ingest pipeline、index management、dashboard

1.加载ingest pipelines
filebeat setup --pipelines --modules suricata,zeek \
  --force-enable-module-filesets \
  -c /etc/filebeat/filebeat.yml \
  -E output.kafka.enabled=false \
  -E output.elasticsearch.hosts='["https://10.10.10.1:9200"]' \
  -E output.elasticsearch.username='elastic' \
  -E output.elasticsearch.password='jq5SSXIrIUXW=xDUWQRP' \
  -E output.elasticsearch.ssl.certificate_authorities='["/etc/filebeat/certs/http_ca.crt"]'

2.加载index template 和 dashboards
filebeat setup --index-management --dashboards \
  -c /etc/filebeat/filebeat.yml \
  -E output.kafka.enabled=false \
  -E output.elasticsearch.hosts='["https://10.10.10.1:9200"]' \
  -E output.elasticsearch.username='elastic' \
  -E output.elasticsearch.password='jq5SSXIrIUXW=xDUWQRP' \
  -E output.elasticsearch.ssl.certificate_authorities='["/etc/filebeat/certs/http_ca.crt"]' \
  -E setup.kibana.host='10.10.10.1:5601' \
  -E setup.kibana.username='elastic' \
  -E setup.kibana.password='jq5SSXIrIUXW=xDUWQRP'

5.检验数据通路filebeat -> kafka -> logstash -> ElasticSearch

1.检验kafka是否收到两类事件
/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server 10.10.10.1:9092 --topic filebeat-suricata --from-beginning --max-messages 5
/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server 10.10.10.1:9092 --topic filebeat-zeek --from-beginning --max-messages 5

2.logstash是否成功消费
journalctl -u logstash -f | egrep -i 'kafka|suricata|zeek|error|exception'

3.ElasticSearch是否入库
curl --cacert /etc/logstash/certs/http_ca.crt -u elastic:jq5SSXIrIUXW=xDUWQRP \
  "https://10.10.10.1:9200/_cat/indices/filebeat-suricata*,filebeat-zeek*?v"

 

posted @ 2026-03-09 16:18  岐岐卡卡西  阅读(0)  评论(0)    收藏  举报