部署 KRaft 模式下的 Kafka

环境介绍

节点说明

kafka1: 192.168.174.100
kafka2: 192.168.174.101
kafka3: 192.168.174.102

安装 java

# yum -y install java

or

apt -y install openjdk-17-jdk

创建 kafka 用户

# groupadd kafka && useradd -M -N -g kafka  -s /bin/false -c "kafka Server"  kafka

下载 kafka

# wget https://dlcdn.apache.org/kafka/3.8.0/kafka_2.13-3.8.0.tgz

解压 kafka

# tar xf kafka_2.13-3.8.0.tgz -C /data/kafka
# ln -sv /data/kafka/kafka_2.13-3.8.0 /data/kafka/kafka

配置 kafka

配置文件路径:config/kraft/server.properties
############################# Server Basics #############################

# The role of this server. Setting this puts us in KRaft mode
process.roles=broker,controller

# The node id associated with this instance's roles
node.id=2

# The connect string for the controller quorum
controller.quorum.voters=1@192.168.174.100:9093,2@192.168.174.101:9093,3@192.168.174.103:9093

listeners=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094
inter.broker.listener.name=PLAINTEXT
advertised.listeners=PLAINTEXT://localhost:9092,EXTERNAL://localhost:9094
listener.security.protocol.map=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,EXTERNAL:PLAINTEXT


log.dirs=/data/kafka/tmp/kraft-combined-logs

数据目录授权

# chown -R kafka:kafka /data/kafka/

部署 kafka

生成集群 UUID

$ KAFKA_CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)"

设置日志目录的格式

$ bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c config/kraft/server.properties

命令行启动 Kafka 服务器

$ bin/kafka-server-start.sh -daemon config/kraft/server.properties

查看服务端口

# netstat -tnlp
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp6       0      0 :::9092                :::*                    LISTEN      619535/java         
tcp6       0      0 :::9093                :::*                    LISTEN      619535/java   

firewalld 放行端口

firewall-cmd --permanent --zone=public --add-rich-rule='rule family="ipv4" source address="192.168.174.100" port protocol="tcp" port="9092" accept'
firewall-cmd --permanent --zone=public --add-rich-rule='rule family="ipv4" source address="192.168.174.100" port protocol="tcp" port="9093" accept'
firewall-cmd --reload

测试 kafka

创建 topic

# bin/kafka-topics.sh --create --topic wgs-test-event --bootstrap-server 192.168.174.100:9092 --partitions 3 --replication-factor 2
Created topic wgs-test-event.

查看 topic

# bin/kafka-topics.sh --list --bootstrap-server 192.168.174.100:9092
__consumer_offsets
wgs-test-event

产生消息 

# bin/kafka-console-producer.sh --topic wgs-test-event --bootstrap-server 192.168.174.100:9092

消费消息

# bin/kafka-console-consumer.sh --topic wgs-test-event --from-beginning --bootstrap-server 192.168.174.100:9092

删除 topic

# bin/kafka-topics.sh --delete --topic wgs-test-event --bootstrap-server 192.168.174.100:9092

systemctl 启动 kafka

cat > /lib/systemd/system/kafka.service << EOF
[Unit]
Description=Apache Kafka 
Documentation=http://kafka.apache.org/
After=network.target 

[Service]
Type=simple
User=kafka 
ExecStart=/data/kafka/kafka/bin/kafka-server-start.sh /data/kafka/kafka/config/kraft/server.properties
ExecStop=/data/kafka/kafka/bin/kafka-server-stop.sh
LimitNOFILE=1000000
TimeoutStopSec=180
Restart=on-failure

[Install]
WantedBy=multi-user.target                   
EOF

kafka 常用命令

对已有 topic 增加分区

bin/kafka-topics.sh --alter --topic your_topic_name --partitions new_partition_count --bootstrap-server your_kafka_broker
your_topic_name:要增加分区的 topic 名称。
new_partition_count:新的总分区数(例如,如果原来有 3 个分区,且想增加到 5 个分区,指定为 5)。
your_kafka_broker:Kafka 代理的地址(如 localhost:9092)

重置 offset

按时间点重置Offset

# 示例:重置到2023年11月1日下午3点30分
kafka-consumer-groups.sh --bootstrap-server kafka1:9092,kafka2:9092 \
--group collect_network_device_sn_group \
--topic network_device_metrics \
--reset-offsets \
--to-datetime "2023-11-01T15:30:00.000" \
--execute

重置所有 topic

kafka-consumer-groups.sh --bootstrap-server <Kafka地址:端口> \
--group collect_network_device_sn_group \
--all-topics \
--reset-offsets \
--to-datetime "YYYY-MM-DDTHH:MM:SS.sss" \
--execute

指定分区重置

kafka-consumer-groups.sh --bootstrap-server <Kafka地址:端口> \
--group collect_network_device_sn_group \
--topic <主题名称> \
--partition 0,1,2 \
--reset-offsets \
--to-datetime "YYYY-MM-DDTHH:MM:SS.sss" \
--execute

验证重置结果

kafka-consumer-groups.sh --bootstrap-server <Kafka地址:端口> \
--group collect_network_device_sn_group \
--describe

参考文档

https://kafka.apache.org/documentation/

posted @ 2024-10-25 13:45  小吉猫  阅读(395)  评论(0)    收藏  举报