springboot利用kafkaAdminClient初始化topic及扩容分区
topic和分区处理方法
如果不初始化分区,则kafka自动创建的topic默认只有一个分区,影响性能
import com.ahsz.uomp.common.enums.ExporterTypeEnum;
import com.alibaba.fastjson.JSON;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.*;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.concurrent.TimeUnit;
/**
 * @author: huang wei
 * @create: 2024-08-12 15:57
 */
@Slf4j
@Component
public class initKafkaTopic implements InitializingBean {
    @Autowired
    private AdminClient adminClient;
    @Value("${spring.kafka.listener.concurrency:5}")
    private Integer concurrency;
    @Override
    public void afterPropertiesSet() {
        initCollectTask();
    }
    /**
    * @Description: topic初始化
    * @Param:
    * @return:
    * @throws Exception
    * @author: hw
    * @date: 2025/3/11 15:46
    */
    private void initCollectTask() {
        log.info("开始初始化topic任务……");
        try {
            // 获取已经存在的topic
            Set<String> topics = getAllTopic();
            // 获取要初始化的topic
            ExporterTypeEnum[] values = ExporterTypeEnum.values();
            for (ExporterTypeEnum value : values) {
                String name = value.getName();
                try {
                    // 判断topic是否存在,不存在则创建
                    if(!topics.contains(name)){
                        testCreateTopic(name, concurrency);
                        log.info("初始化topic: {},numPartitions: {}", name, concurrency);
                    }else {
                        // 判断分区数
                        DescribeTopicsResult describeTopics = adminClient.describeTopics(Collections.singleton(name));
                        TopicDescription topicDescription = describeTopics.values().get(name).get(); // 获取主题描述信息
                        int partitionCount = topicDescription.partitions().size(); // 获取分区数
                        // 分区扩容
                        if(partitionCount < concurrency){
                            NewPartitions newPartitions = NewPartitions.increaseTo(concurrency);
                            CreatePartitionsResult result = adminClient.createPartitions(Collections.singletonMap(name, newPartitions));
                            result.all().get(30, TimeUnit.SECONDS); // 设置超时时间,同步等待操作完成
                            log.info("扩容topic: {}, oldPartition:{},newPartition: {}", name, partitionCount, concurrency);
                        }
                    }
                }catch (Exception e){
                    log.error("topic:{}初始化出错", name, e);
                }
            }
        } catch (Exception e) {
            log.error("初始化topic任务出错", e);
        } finally {
            adminClient.close();
        }
    }
    /***
     * numPartitions : 分区数
     * replicationFactor : 副本因子
     * @return
     */
    public void testCreateTopic(String topicName, Integer numPartitions) {
        NewTopic topic = new NewTopic(topicName, numPartitions, (short) 1);
        adminClient.createTopics(Arrays.asList(topic));
    }
    /**
     * 获取所有的topic
     * @throws Exception
     */
    public Set<String> getAllTopic() throws Exception {
        ListTopicsResult listTopics = adminClient.listTopics();
        Set<String> topics = listTopics.names().get();
        log.info("获取已存在topic:{}", JSON.toJSONString(topics));
        if(topics == null || topics.isEmpty()){
            return new HashSet<>();
        }
        return topics;
    }
}
kafka配置文件
spring:
  #kafka配置
  kafka:
    bootstrap-servers: 127.0.0.1:10004
    listener:
      # 在侦听器容器中运行的线程数,一般设置为 机器数*分区数
      concurrency: 3
    #kafka消费者配置
    consumer:
      # 消费者组的唯一标识符。在消费者组中的所有消费者将共享消费者组的工作负载
      group-id: ${spring.application.name}
      enable-auto-commit: true
      auto-offset-reset: latest
      auto-commit-interval: 1000
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    #kafka生产者配置
    producer:
      # 发生错误后,消息重发的次数,开启事务必须设置大于0。
      retries: 0
      batch:
        size: 16384
      linger: 1
      buffer:
        memory: 1024000
      # 指定消息key和消息体的编解码方式
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
kafka生产者配置类
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.core.*;
import java.util.HashMap;
import java.util.Map;
/**
 * @author: huang wei
 * @create: 2024-08-07 15:25
 */
@Configuration
@EnableKafka
public class KafkaProducerConfig {
    @Value("${spring.kafka.bootstrap-servers}")
    private String servers;
    @Value("${spring.kafka.producer.retries}")
    private int retries;
    @Value("${spring.kafka.producer.batch.size}")
    private int batchSize;
    @Value("${spring.kafka.producer.linger}")
    private int linger;
    @Value("${spring.kafka.producer.buffer.memory}")
    private int bufferMemory;
    @Value("${spring.kafka.producer.key-serializer}")
    private String keyDeserializer;
    @Value("${spring.kafka.producer.value-serializer}")
    private String valueDeserializer;
    public Map<String, Object> producerConfigs() {
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        props.put(ProducerConfig.RETRIES_CONFIG, retries);
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
        props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keyDeserializer);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueDeserializer);
        return props;
    }
    @Bean
    public ProducerFactory<String, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(producerConfigs());
    }
    @Bean
    public KafkaTemplate<String, String> kafkaTemplate() {
        Thread.currentThread().setContextClassLoader(null);
        return new KafkaTemplate<String, String>(producerFactory());
    }
    @Bean
    public KafkaAdmin kafkaAdmin() {
        KafkaAdmin admin = new KafkaAdmin(producerConfigs());
        return admin;
    }
    @Bean
    public AdminClient adminClient() {
        return AdminClient.create(kafkaAdmin().getConfigurationProperties());
    }
}
kafka消费者配置类
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
/**
 * @author: huang wei
 * @create: 2024-08-07 15:25
 */
@Configuration
public class KafkaConsumerConfig {
    @Value("${spring.kafka.bootstrap-servers}")
    private String bootstrapServers;
    @Value("${spring.kafka.consumer.enable-auto-commit}")
    private String enableAutoCommit;
    @Value("${spring.kafka.consumer.auto-offset-reset}")
    private String autoOffsetReset;
    @Value("${spring.kafka.consumer.auto-commit-interval}")
    private String autoCommitInterval;
    @Value("${spring.kafka.consumer.key-deserializer}")
    private String keyDeserializer;
    @Value("${spring.kafka.consumer.value-deserializer}")
    private String valueDeserializer;
    @Value("${spring.kafka.consumer.group-id}")
    private String groupId;
    @Value("${spring.kafka.listener.concurrency}")
    private String concurrency;
    public Map<String, Object> getCommonProperties() {
        Map<String, Object> properties = new HashMap<String, Object>();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        /**默认 group id */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        return properties;
    }
    /**
     * kafka bean 容器
     */
    @Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
        factory.setConsumerFactory(consumerFactory());
        if (StringUtils.isNotBlank(concurrency)) {
            factory.setConcurrency(Integer.valueOf(concurrency));
        } else {
            factory.setConcurrency(5);
        }
        factory.getContainerProperties().setPollTimeout(3000);
        return factory;
    }
    @Bean
    public ConsumerFactory<String, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(getCommonProperties());
    }
}
kafka消息发送
@Resource
private KafkaTemplate<String, String> kafkaTemplate;
public void metricPushKafka(String message, String topic) {
        try {
            ListenableFuture<SendResult<String, String>> listenableFuture = kafkaTemplate.send(topic, message);
            listenableFuture.addCallback(new SuccessCallback<SendResult<String, String>>() {
                @Override
                public void onSuccess(SendResult<String, String> result) {
                    log.info("[{}:{}]metric推送kafka成功", topic, instance);
                }
            }, new FailureCallback() {
                @Override
                public void onFailure(Throwable ex) {
                    log.error("[{}:{}]metric推送kafka失败", topic, instance, ex);
                }
            });
        } catch (Exception e) {
            log.error("[{}]推送到kafka异常", topic, e);
        }
    }
kafka消息监听
@KafkaListener(id = "消费id",  topics = "topic", containerFactory = "kafkaListenerContainerFactory")
    public void receiveQueue(String message) {
        log.info("kafka消费数据:{}", message);
        if (StringUtils.isNotBlank(message)) {
            try {
                //数据处理
                BaseHandle.exporterHandlerExecutor.execute(new HandleThread(message));
            } catch (Exception e) {
                log.error("snmp_exporter数据处理:{}出错,", message, e);
            }
        }
    }
    ========================================================================================== 
我希望每一篇文章的背后,都能看到自己对于技术、对于生活的态度。
我相信乔布斯说的,只有那些疯狂到认为自己可以改变世界的人才能真正地改变世界。面对压力,我可以挑灯夜战、不眠不休;面对困难,我愿意迎难而上、永不退缩。
其实我想说的是,我只是一个程序员,这就是我现在纯粹人生的全部。
==========================================================================================

 
                
            
         浙公网安备 33010602011771号
浙公网安备 33010602011771号