springboot整合kafka,实现消费者使用redis作为偏移量存储的库

一. 环境

springboot 2.0.4

二. pom添加依赖

<dependency>
    <groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
</dependency>

<dependency>
	<groupId>redis.clients</groupId>
	<artifactId>jedis</artifactId>
	<version>3.3.0</version>
</dependency>

三. 添加配置文件

spring:
  kafka:
    #集群输入集群IP,这里展示单机
    bootstrap-servers: cdh1:9092
    listener:
      ack-mode: manual
    consumer:
      # 自动提交的时间间隔 在spring boot 2.X 版本中这里采用的是值的类型为Duration 需要符合特定的格式,如1S,1M,2H,5D
      auto-commit-interval: 10S
      # 该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理:
      # latest(默认值)在偏移量无效的情况下,消费者将从最新的记录开始读取数据(在消费者启动之后生成的记录)
      # earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
      auto-offset-reset: latest
      # 是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量
      enable-auto-commit: false
      # 键的反序列化方式
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      # 值的反序列化方式
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer

四. 创建配置文件

@Configuration
public class KafkaConf {

    @Bean(name = "kafkaConsumerRebalanceListener")
    public KafkaConsumerRebalanceListener getKafkaConsumerRebalanceListener(){
        return new KafkaConsumerRebalanceListener();
    }

    /**
     * MANUAL   当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后, 手动调用Acknowledgment.acknowledge()后提交
     * @return
     */
    @Bean("manualListenerContainerFactory")
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> manualListenerContainerFactory(
            ConsumerFactory<String, String> consumerFactory, @Qualifier("kafkaConsumerRebalanceListener") KafkaConsumerRebalanceListener listener) {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory);
        factory.getContainerProperties().setPollTimeout(1500);
        factory.setBatchListener(true);
        //配置手动提交offset
        factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL);
        factory.getContainerProperties().setConsumerRebalanceListener(listener);

        return factory;
    }

监听器

public class KafkaConsumerRebalanceListener implements ConsumerAwareRebalanceListener {

    private Map<TopicPartition, Long> currentOffset = new HashMap<>();


    // 此方法会在PartitionsRevoked(撤销分区)后 直接被调用
    @Override
    public void onPartitionsRevokedBeforeCommit(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {

    }

    // 此方法会在PartitionsRevoked(撤销分区)后,提交任何挂起的偏移量后进行调用
    @Override
    public void onPartitionsRevokedAfterCommit(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
        commitOffset(currentOffset);

    }

    @Override
    public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
        currentOffset.clear();
        for (TopicPartition partition : partitions) {
            // 定位到最近提交的 offset 位置继续消费
            consumer.seek(partition, getOffset(partition));
        }
    }

    //获取某分区的最新 offset
    public long getOffset(TopicPartition partition) {
        Jedis jedis= new Jedis("localhost",6379);
        String offset = jedis.get(partition.toString());
        if(offset == null){
            return 0;
        }else{
            return Long.parseLong(offset)+1;
        }

    }

    //提交该消费者所有分区的 offset
    public void commitOffset(Map<TopicPartition, Long> currentOffset) {
        Set<TopicPartition> set = currentOffset.keySet();
        for (TopicPartition partition : set){
            Jedis jedis= new Jedis("localhost",6379);
            String offset = jedis.set(partition.toString(),(currentOffset.get(partition)).toString());
        }


    }

    public Map<TopicPartition, Long> getCurrentOffset() {
        return currentOffset;
    }

    public void setCurrentOffset(Map<TopicPartition, Long> currentOffset) {
        this.currentOffset = currentOffset;
    }
}

 五. 创建消费者

@Service
public class KafkaConsumer {

    Logger log = LoggerFactory.getLogger(KafkaConsumer.class);

    @Autowired
    private KafkaConsumerRebalanceListener listen;


    @KafkaListener(containerFactory = "manualListenerContainerFactory" , topics = "mytopic", groupId = "consumetest")
    public void onMessageManual(List<ConsumerRecord<?, ?>> messageList, Acknowledgment ack){
        log.info("manualListenerContainerFactory 处理数据量:{}",messageList.size());
        messageList.stream().forEach(record -> {
            System.out.println(record);
            listen.getCurrentOffset().put(new TopicPartition(record.topic(), record.partition()), record.offset());
        });
        listen.commitOffset(listen.getCurrentOffset());
        ack.acknowledge();//直接提交offset
    }
}
posted @ 2021-07-30 15:51  cxylm  阅读(1213)  评论(0)    收藏  举报