kafka配置

| 参数名称 | 作用 | 默认值 |
| ----------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------------------------------------- |
| advertised.host.name | Kafka broker 的主机名或 IP 地址,用于向客户端报告自己的位置 | 无 |
| advertised.listeners | Kafka broker 的监听器列表,用于向客户端报告其可用的网络地址和端口 | 无 |
| advertised.port | Kafka broker 的端口号,已弃用,建议使用 `advertised.listeners` | 无 |
| alter.config.policy.class.name | 用于改变配置的策略类 | 无 |
| alter.log.dirs.replication.quota.window\.num | 更改日志目录复制配额的窗口数量 | 11 |
| alter.log.dirs.replication.quota.window\.size.seconds | 更改日志目录复制配额的窗口大小(秒) | 1 |
| authorizer.class.name | 授权器类,用于访问控制 | kafka.security.auth.SimpleAclAuthorizer |
| auto.create.topics.enable | 是否启用自动创建主题 | true |
| auto.leader.rebalance.enable | 是否启用自动 leader 重新平衡 | true |
| background.threads | 后台线程数 | 10 |
| broker.id | Kafka broker 的唯一标识符 | 1 |
| broker.id.generation.enable | 是否启用 broker ID 自动生成 | true |
| broker.rack | Kafka broker 的机架标识符,用于机架感知 | null |
| client.quota.callback.class | 客户端配额回调类 | null |
| compression.type | 压缩类型 | producer |
| connection.failed.authentication.delay.ms | 连接失败后重新认证的延迟(毫秒) | 100 |
| connections.max.idle.ms | 最大空闲连接时间(毫秒) | 600000 |
| connections.max.reauth.ms | 最大重新认证时间(毫秒) | 0 |
| control.plane.listener.name | 控制平面监听器名称 | null |
| controlled.shutdown.enable | 是否启用受控关闭 | true |
| controlled.shutdown.max.retries | 受控关闭的最大重试次数 | 3 |
| controlled.shutdown.retry.backoff.ms | 受控关闭重试的退避时间(毫秒) | 5000 |
| controller.socket.timeout.ms | 控制器套接字超时时间(毫秒) | 30000 |
| create.topic.policy.class.name | 创建主题的策略类 | null |
| default.replication.factor | 创建主题时的默认副本数 | 2 |
| delegation.token.expiry.check.interval.ms | 委托令牌过期检查间隔(毫秒) | 3600000 |
| delegation.token.expiry.time.ms | 委托令牌过期时间(毫秒) | 86400000 |
| delegation.token.master.key | 委托令牌主密钥 | null |
| delegation.token.max.lifetime.ms | 委托令牌最大生命周期(毫秒) | 604800000 |
| delete.records.purgatory.purge.interval.requests | 删除记录净化间隔(请求次数) | 1 |
| delete.topic.enable | 是否启用删除主题 | true |
| fetch.purgatory.purge.interval.requests | 获取请求净化间隔(请求次数) | 1000 |
| group.initial.rebalance.delay.ms | 消费者组初始重新平衡延迟(毫秒) | 3000 |
| group.max.session.timeout.ms | 消费者组最大会话超时时间(毫秒) | 300000 |
| group.max.size | 消费者组最大大小 | 2147483647 |
| group.min.session.timeout.ms | 消费者组最小会话超时时间(毫秒) | 6000 |
| host.name | Kafka broker 的主机名 | 无 |
| inter.broker.listener.name | broker 之间通信的监听器名称 | null |
| inter.broker.protocol.version | broker 之间通信的协议版本 | 2.2-IV1 |
| kafka.metrics.polling.interval.secs | Kafka 指标轮询间隔(秒) | 10 |
| kafka.metrics.reporters | Kafka 指标报告器列表 | \[] |
| leader.imbalance.check.interval.seconds | 检查 leader 不平衡的间隔(秒) | 300 |
| leader.imbalance.per.broker.percentage | 每个 broker 的 leader 不平衡百分比 | 10 |
| listener.security.protocol.map | 监听器安全协议映射 | PLAINTEXT:PLAINTEXT,SSL:SSL,SASL\_PLAINTEXT:SASL\_PLAINTEXT,SASL\_SSL:SASL\_SSL |
| listeners | Kafka broker 的监听器列表 | SASL\_PLAINTEXT://0.0.0.0:29092 |
| log.cleaner.backoff.ms | 日志清理回退时间(毫秒) | 15000 |
| log.cleaner.dedupe.buffer.size | 日志清理去重缓冲区大小 | 134217728 |
| log.cleaner.delete.retention.ms | 日志清理删除保留时间(毫秒) | 86400000 |
| log.cleaner.enable | 是否启用日志清理 | true |
| log.cleaner.io.buffer.load.factor | 日志清理 I/O 缓冲区加载因子 | 0.9 |
| log.cleaner.io.buffer.size | 日志清理 I/O 缓冲区大小 | 524288 |
| log.cleaner.io.max.bytes.per.second | 日志清理 I/O 最大字节数(每秒) | 1.7976931348623157E308 |
| log.cleaner.min.cleanable.ratio | 日志清理最小可清理比率 | 0.5 |
| log.cleaner.min.compaction.lag.ms | 日志清理最小压缩延迟(毫秒) | 0 |
| log.cleaner.threads | 日志清理线程数 | 1 |
| log.cleanup.policy | 日志清理策略 | \[delete] |
| log.dir | 日志存储目录 | /tmp/kafka-logs |
| log.dirs | 日志存储目录列表 | /qaxdata/s/services/kafka/kafka\_29092/data |
| log.flush.interval.messages | 日志刷新间隔(消息数) | 9223372036854775807 |
| log.flush.interval.ms | 日志刷新间隔(毫秒) | null |
| log.flush.offset.checkpoint.interval.ms | 日志刷新偏移检查点间隔(毫秒) | 60000 |
| log.flush.scheduler.interval.ms | 日志刷新调度间隔(毫秒) | 9223372036854775807 |
| log.flush.start.offset.checkpoint.interval.ms | 日志刷新起始偏移检查点间隔(毫秒) | 60000 |
| log.index.interval.bytes | 日志索引间隔(字节数) | 4096 |
| log.index.size.max.bytes | 日志索引最大大小(字节数) | 10485760 |
| log.message.downconversion.enable | 是否启用消息降级转换 | true |
| log.message.format.version | 日志消息格式版本 | 2.2-IV1 |
| log.message.timestamp.difference.max.ms | 消息时间戳差异最大值(毫秒) | 9223372036854775807 |
| log.message.timestamp.type | 消息时间戳类型 | CreateTime |
| log.preallocate | 是否预分配日志文件 | false |
| log.retention.bytes | 日志保留大小(字节数),-1 表示不限制 | -1 |
| log.retention.check.interval.ms | 日志保留检查间隔(毫秒) | 300000 |
| log.retention.hours | 日志保留时间(小时) | 72 |
| log.retention.minutes | 日志保留时间(分钟) | null |
| log.retention.ms | 日志保留时间(毫秒) | null |
| log.roll.hours | 日志滚动时间(小时) | 168 |
| log.roll.jitter.hours | 日志滚动随机延迟(小时) | 0 |
| log.roll.jitter.ms | 日志滚动随机延迟(毫秒) | null |
| log.roll.ms | 日志滚动时间(毫秒) | null |
| log.segment.bytes | 日志段大小(字节数) | 1073741824 |
| log.segment.delete.delay.ms | 日志段删除延迟(毫秒) | 60000 |
| max.connections.per.ip | 每个 IP 地址的最大连接数 | 2147483647 |
| max.connections.per.ip.overrides | 每个 IP 地址的最大连接数覆盖列表 | 无 |
| max.incremental.fetch.session.cache.slots | 最大增量获取会话缓存槽位数 | 1000 |
| message.max.bytes | 消息最大大小(字节数) | 10485760 |
| metric.reporters | 指标报告器列表 | \[] |
| metrics.num.samples | 指标样本数量 | 2 |
| metrics.recording.level | 指标记录级别 | INFO |
| metrics.sample.window\.ms | 指标样本窗口时间(毫秒) | 30000 |
| min.insync.replicas | 最小同步副本数 | 1 |
| num.io.threads | I/O 线程数 | 8 |
| num.network.threads | 网络线程数 | 3 |
| num.partitions | 默认分区数 | 32 |
| num.recovery.threads.per.data.dir | 每个数据目录的恢复线程数 | 1 |
| num.replica.alter.log.dirs.threads | 更改复制日志目录的线程数 | null |
| num.replica.fetchers | 复制获取器线程数 | 3 |
| offset.metadata.max.bytes | 偏移元数据最大大小(字节数) | 4096 |
| offsets.commit.required.acks | 偏移提交所需确认数 | -1 |
| offsets.commit.timeout.ms | 偏移提交超时时间(毫秒) | 5000 |
| offsets.load.buffer.size | 加载偏移的缓冲区大小(字节数) | 5242880 |
| offsets.retention.check.interval.ms | 偏移保留检查间隔(毫秒) | 600000 |
| offsets.retention.minutes | 偏移保留时间(分钟) | 315360000 |
| offsets.topic.compression.codec | 偏移主题压缩 codec | 0 |
| offsets.topic.num.partitions | 偏移主题分区数 | 50 |
| offsets.topic.replication.factor | 偏移主题副本数 | 3 |
| offsets.topic.segment.bytes | 偏移主题段大小(字节数) | 104857600 |
| password.encoder.cipher.algorithm | 密码编码器密码算法 | AES/CBC/PKCS5Padding |
| password.encoder.iterations | 密码编码器迭代次数 | 4096 |
| password.encoder.key.length | 密码编码器密钥长度 | 128 |
| password.encoder.keyfactory.algorithm | 密码编码器密钥工厂算法 | null |
| password.encoder.old.secret | 密码编码器旧密钥 | null |
| password.encoder.secret | 密码编码器密钥 | null |
| port | Kafka broker 的端口号,已弃用,建议使用 `listeners` | 29092 |
| principal.builder.class | principal 构建器类 | null |
| producer.purgatory.purge.interval.requests | 生产者请求净化间隔(请求次数) | 1000 |
| queued.max.request.bytes | 队列中最大请求字节数,-1 表示不限制 | -1 |
| queued.max.requests | 队列中最大请求数 | 500 |
| quota.consumer.default | 默认消费者配额 | 9223372036854775807 |
| quota.producer.default | 默认生产者配额 | 9223372036854775807 |
| quota.window\.num | 配额窗口数量 | 11 |
| quota.window\.size.seconds | 配额窗口大小(秒) | 1 |
| replica.fetch.backoff.ms | 复制获取回退时间(毫秒) | 1000 |
| replica.fetch.max.bytes | 复制获取最大字节数 | 1048576 |
| replica.fetch.min.bytes | 复制获取最小字节数 | 1 |
| replica.fetch.response.max.bytes | 复制获取响应最大字节数 | 10485760 |
| replica.fetch.wait.max.ms | 复制获取等待最大时间(毫秒) | 500 |
| replica.high.watermark.checkpoint.interval.ms | 复制高水位标记检查点间隔(毫秒) | 5000 |
| replica.lag.time.max.ms | 复制滞后最大时间(毫秒) | 10000 |
| replica.socket.receive.buffer.bytes | 复制套接字接收缓冲区大小(字节数) | 65536 |
| replica.socket.timeout.ms | 复制套接字超时时间(毫秒) | 30000 |
| replication.quota.window\.num | 复制配额窗口数量 | 11 |
| replication.quota.window\.size.seconds | 复制配额窗口大小(秒) | 1 |
| request.timeout.ms | 请求超时时间(毫秒) | 30000 |
| reserved.broker.max.id | 预留的最大 broker ID | 1000 |
| sasl.client.callback.handler.class | SASL 客户端回调处理器类 | null |
| sasl.enabled.mechanisms | 启用的 SASL 机制列表 | \[PLAIN] |
| sasl.jaas.config | SASL JAAS 配置 | null |
| sasl.kerberos.kinit.cmd | Kerberos kinit 命令路径 | /usr/bin/kinit |
| sasl.kerberos.min.time.before.relogin | Kerberos 最小重新登录时间(毫秒) | 60000 |
| sasl.kerberos.principal.to.local.rules | Kerberos principal 到本地规则列表 | \[DEFAULT] |
| sasl.kerberos.service.name | Kerberos 服务名称 | null |
| sasl.kerberos.ticket.renew\.jitter | Kerberos 票据更新抖动 | 0.05 |
| sasl.kerberos.ticket.renew\.window\.factor | Kerberos 票据更新窗口因子 | 0.8 |
| sasl.login.callback.handler.class | SASL 登录回调处理器类 | null |
| sasl.login.class | SASL 登录类 | null |
| sasl.login.refresh.buffer.seconds | SASL 登录刷新缓冲时间(秒) | 300 |
| sasl.login.refresh.min.period.seconds | SASL 登录刷新最小周期(秒) | 60 |
| sasl.login.refresh.window\.factor | SASL 登录刷新窗口因子 | 0.8 |
| sasl.login.refresh.window\.jitter | SASL 登录刷新窗口抖动 | 0.05 |
| sasl.mechanism.inter.broker.protocol | broker 之间通信的 SASL 机制 | PLAIN |
| sasl.server.callback.handler.class | SASL 服务器回调处理器类 | null |
| socket.receive.buffer.bytes | 套接字接收缓冲区大小(字节数) | 102400 |
| socket.request.max.bytes | 套接字请求最大字节数 | 104857600 |
| socket.send.buffer.bytes | 套接字发送缓冲区大小(字节数) | 102400 |
| ssl.cipher.suites | SSL 密码套件列表 | \[] |
| ssl.client.auth | SSL 客户端认证模式 | non |

 

 

[2025-06-27 17:43:18,722] INFO KafkaConfig values: 
	advertised.host.name = xiantest04v.xian.zzt.qianxin-inc.cn
	advertised.listeners = SASL_PLAINTEXT://xiantest04v.xian.zzt.qianxin-inc.cn:29092
	advertised.port = null
	alter.config.policy.class.name = null
	alter.log.dirs.replication.quota.window.num = 11
	alter.log.dirs.replication.quota.window.size.seconds = 1
	authorizer.class.name = kafka.security.auth.SimpleAclAuthorizer
	auto.create.topics.enable = true
	auto.leader.rebalance.enable = true
	background.threads = 10
	broker.id = 1
	broker.id.generation.enable = true
	broker.rack = null
	client.quota.callback.class = null
	compression.type = producer
	connection.failed.authentication.delay.ms = 100
	connections.max.idle.ms = 600000
	connections.max.reauth.ms = 0
	control.plane.listener.name = null
	controlled.shutdown.enable = true
	controlled.shutdown.max.retries = 3
	controlled.shutdown.retry.backoff.ms = 5000
	controller.socket.timeout.ms = 30000
	create.topic.policy.class.name = null
	default.replication.factor = 2
	delegation.token.expiry.check.interval.ms = 3600000
	delegation.token.expiry.time.ms = 86400000
	delegation.token.master.key = null
	delegation.token.max.lifetime.ms = 604800000
	delete.records.purgatory.purge.interval.requests = 1
	delete.topic.enable = true
	fetch.purgatory.purge.interval.requests = 1000
	group.initial.rebalance.delay.ms = 3000
	group.max.session.timeout.ms = 300000
	group.max.size = 2147483647
	group.min.session.timeout.ms = 6000
	host.name = 
	inter.broker.listener.name = null
	inter.broker.protocol.version = 2.2-IV1
	kafka.metrics.polling.interval.secs = 10
	kafka.metrics.reporters = []
	leader.imbalance.check.interval.seconds = 300
	leader.imbalance.per.broker.percentage = 10
	listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
	listeners = SASL_PLAINTEXT://0.0.0.0:29092
	log.cleaner.backoff.ms = 15000
	log.cleaner.dedupe.buffer.size = 134217728
	log.cleaner.delete.retention.ms = 86400000
	log.cleaner.enable = true
	log.cleaner.io.buffer.load.factor = 0.9
	log.cleaner.io.buffer.size = 524288
	log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
	log.cleaner.min.cleanable.ratio = 0.5
	log.cleaner.min.compaction.lag.ms = 0
	log.cleaner.threads = 1
	log.cleanup.policy = [delete]
	log.dir = /tmp/kafka-logs
	log.dirs = /qaxdata/s/services/kafka/kafka_29092/data
	log.flush.interval.messages = 9223372036854775807
	log.flush.interval.ms = null
	log.flush.offset.checkpoint.interval.ms = 60000
	log.flush.scheduler.interval.ms = 9223372036854775807
	log.flush.start.offset.checkpoint.interval.ms = 60000
	log.index.interval.bytes = 4096
	log.index.size.max.bytes = 10485760
	log.message.downconversion.enable = true
	log.message.format.version = 2.2-IV1
	log.message.timestamp.difference.max.ms = 9223372036854775807
	log.message.timestamp.type = CreateTime
	log.preallocate = false
	log.retention.bytes = -1
	log.retention.check.interval.ms = 300000
	log.retention.hours = 72
	log.retention.minutes = null
	log.retention.ms = null
	log.roll.hours = 168
	log.roll.jitter.hours = 0
	log.roll.jitter.ms = null
	log.roll.ms = null
	log.segment.bytes = 1073741824
	log.segment.delete.delay.ms = 60000
	max.connections.per.ip = 2147483647
	max.connections.per.ip.overrides = 
	max.incremental.fetch.session.cache.slots = 1000
	message.max.bytes = 10485760
	metric.reporters = []
	metrics.num.samples = 2
	metrics.recording.level = INFO
	metrics.sample.window.ms = 30000
	min.insync.replicas = 1
	num.io.threads = 8
	num.network.threads = 3
	num.partitions = 32
	num.recovery.threads.per.data.dir = 1
	num.replica.alter.log.dirs.threads = null
	num.replica.fetchers = 3
	offset.metadata.max.bytes = 4096
	offsets.commit.required.acks = -1
	offsets.commit.timeout.ms = 5000
	offsets.load.buffer.size = 5242880
	offsets.retention.check.interval.ms = 600000
	offsets.retention.minutes = 315360000
	offsets.topic.compression.codec = 0
	offsets.topic.num.partitions = 50
	offsets.topic.replication.factor = 3
	offsets.topic.segment.bytes = 104857600
	password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding
	password.encoder.iterations = 4096
	password.encoder.key.length = 128
	password.encoder.keyfactory.algorithm = null
	password.encoder.old.secret = null
	password.encoder.secret = null
	port = 29092
	principal.builder.class = null
	producer.purgatory.purge.interval.requests = 1000
	queued.max.request.bytes = -1
	queued.max.requests = 500
	quota.consumer.default = 9223372036854775807
	quota.producer.default = 9223372036854775807
	quota.window.num = 11
	quota.window.size.seconds = 1
	replica.fetch.backoff.ms = 1000
	replica.fetch.max.bytes = 1048576
	replica.fetch.min.bytes = 1
	replica.fetch.response.max.bytes = 10485760
	replica.fetch.wait.max.ms = 500
	replica.high.watermark.checkpoint.interval.ms = 5000
	replica.lag.time.max.ms = 10000
	replica.socket.receive.buffer.bytes = 65536
	replica.socket.timeout.ms = 30000
	replication.quota.window.num = 11
	replication.quota.window.size.seconds = 1
	request.timeout.ms = 30000
	reserved.broker.max.id = 1000
	sasl.client.callback.handler.class = null
	sasl.enabled.mechanisms = [PLAIN]
	sasl.jaas.config = null
	sasl.kerberos.kinit.cmd = /usr/bin/kinit
	sasl.kerberos.min.time.before.relogin = 60000
	sasl.kerberos.principal.to.local.rules = [DEFAULT]
	sasl.kerberos.service.name = null
	sasl.kerberos.ticket.renew.jitter = 0.05
	sasl.kerberos.ticket.renew.window.factor = 0.8
	sasl.login.callback.handler.class = null
	sasl.login.class = null
	sasl.login.refresh.buffer.seconds = 300
	sasl.login.refresh.min.period.seconds = 60
	sasl.login.refresh.window.factor = 0.8
	sasl.login.refresh.window.jitter = 0.05
	sasl.mechanism.inter.broker.protocol = PLAIN
	sasl.server.callback.handler.class = null
	security.inter.broker.protocol = SASL_PLAINTEXT
	socket.receive.buffer.bytes = 102400
	socket.request.max.bytes = 104857600
	socket.send.buffer.bytes = 102400
	ssl.cipher.suites = []
	ssl.client.auth = non

 

# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# see kafka.server.KafkaConfig for additional details and defaults

############################# Server Basics #############################

# The id of the broker. This must be set to a unique integer for each broker.
broker.id=1

############################# Socket Server Settings #############################

# The address the socket server listens on. It will get the value returned from 
# java.net.InetAddress.getCanonicalHostName() if not configured.
#   FORMAT:
#     listeners = listener_name://host_name:port
#   EXAMPLE:
#     listeners = PLAINTEXT://your.host.name:9092
#listeners=PLAINTEXT://:9092

# Hostname and port the broker will advertise to producers and consumers. If not set, 
# it uses the value for "listeners" if configured.  Otherwise, it will use the value
# returned from java.net.InetAddress.getCanonicalHostName().
#advertised.listeners=PLAINTEXT://your.host.name:9092

# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL

# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3

# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8

# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400

# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400

# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600


############################# Log Basics #############################

# A comma separated list of directories under which to store log files
#log.dirs=/da2/data/kafka-sec/data,/da1/s/data/kafka-sec-nfs/vrs18/data
log.dirs=/da2/data/kafka-sec/data

# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=32
default.replication.factor=2
num.replica.fetchers=3

# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=3

############################# Internal Topic Settings  #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=3
transaction.state.log.min.isr=1

############################# Log Flush Policy #############################

# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
#    1. Durability: Unflushed data may be lost if you are not using replication.
#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.

# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000

#message.max.bytes=5242880
message.max.bytes=20971520
#replica.fetch.max.bytes=6291456
replica.fetch.max.bytes=21971520
fetch.message.max.bytes=21971520

# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000

############################# Log Retention Policy #############################

# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.

# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
offsets.retention.minutes=315360000

# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824

# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824

# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000

############################# Zookeeper #############################

# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=vrs18.ops.zzyc.360es.cn:12181,vrs20.ops.zzyc.360es.cn:12181,vrs21.ops.zzyc.360es.cn:12181/kafka

# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000


############################# Group Coordinator Settings #############################

# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0

listeners=SASL_PLAINTEXT://0.0.0.0:19092
advertised.listeners=SASL_PLAINTEXT://vrs18.ops.zzyc.360es.cn:19092
security.inter.broker.protocol=SASL_PLAINTEXT 
sasl.enabled.mechanisms=PLAIN 
sasl.mechanism.inter.broker.protocol=PLAIN 
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
allow.everyone.if.no.acl.found=true

  

posted on 2025-07-01 13:02  吃草的青蛙  阅读(22)  评论(0)    收藏  举报

导航