/**
* 入库topic
*/
@Value("${kafka.ob.topic}")
private String OB_SIGN_TOPIC;
public void sendToCloudWmsTpc(ObSignMq obSignMq) {
kafkaTemplate.send(OB_SIGN_TOPIC, JSONObject.toJSONString(obSignMq)).addCallback(success -> {
log.info("[WMS][WMS_OB_SIGN_PROD]消息投递成功:" + JSONObject.toJSONString(obSignMq));
}, failure -> {
log.error("[WMS][WMS_OB_SIGN_PROD]消息投递失败:" + failure.getMessage());
});
}
package com.sijibao.miniwms.config;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
import org.springframework.kafka.listener.SeekToCurrentErrorHandler;
import org.springframework.util.backoff.FixedBackOff;
/**
* @author yuan.zhu
* @since 2021.10.29 10:59
*/
@Configuration
public class KafkaConsumerConfig {
@Value("${spring.kafka.bootstrap-servers}")
private String kafkaServer;
@Autowired
KafkaTemplate<String, String> kafkaTemplate;
@Bean
KafkaListenerContainerFactory<?> batchFactory() {
ConcurrentKafkaListenerContainerFactory<Integer, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(3);
factory.getContainerProperties().setPollTimeout(100);
factory.setBatchListener(true);
return factory;
}
@Bean
KafkaListenerContainerFactory<?> singleFactory() {
ConcurrentKafkaListenerContainerFactory<Integer, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.getContainerProperties().setPollTimeout(100);
factory.setBatchListener(false);
return factory;
}
@Bean
public ConsumerFactory<Integer, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
@Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServer);
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, Boolean.TRUE);
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.CLIENT_ID_CONFIG, "wms_consumer_client_id");
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, "wms_consumer_group_id");
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 50);
return propsMap;
}
@Bean
KafkaListenerContainerFactory<?> retryKafkaFactory() {
ConcurrentKafkaListenerContainerFactory<Integer, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
// 最大重试次数3次,重试间隔10秒,超过3次还没成功,进入死信队列
// 注意:目前自动创建主题的配置关闭了,需要提前手动去创建好死信队列主题!!! 死信队列主题的命名方式:原主题名称 + .DLT
factory.setErrorHandler(new SeekToCurrentErrorHandler(new DeadLetterPublishingRecoverer(Collections.singletonMap(Object.class, kafkaTemplate)
, (cr, e) -> new TopicPartition(cr.topic() + ".DLT", cr.partition()))
, new FixedBackOff(10 * 1000L, 3L)));
return factory;
}
}
@KafkaListener(id = MqConstant.CONSUMER_WMS_OB_ID,groupId = MqConstant.GROUP_ID,topics ={"${kafka.ob.topic}"},containerFactory = "retryKafkaFactory")
public void consumerMsg(ConsumerRecord<String,String> record){
log.info("[CreateObSignConsumer] kafka 消费,values:{},keys:{},topics:{},partitions:{},offsets:{}", record.value(), record.key(), record.topic(), record.partition(), record.offset());
}