kafka集成springBoot实例

1、Maven准备

<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.8.3</version>
</dependency>

2、配置文件

#============== kafka ===================

kafka:
bootstrap-servers: ${cust.tams.kafka}:9092
producer:
# 发生错误后,消息重发的次数。
retries: 16
#当有多个消息需要被发送到同一个分区时,生产者会把它们放在同一个批次里。该参数指定了一个批次可以使用的内存大小,按照字节数计算。
batch-size: 16384
# 设置生产者内存缓冲区的大小。
buffer-memory: 33554432
# 键的序列化方式
key-serializer: org.apache.kafka.common.serialization.StringSerializer
# 值的序列化方式
value-serializer: org.apache.kafka.common.serialization.StringSerializer
# acks=0 : 生产者在成功写入消息之前不会等待任何来自服务器的响应。
# acks=1 : 只要集群的首领节点收到消息,生产者就会收到一个来自服务器成功响应。
# acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。
acks: 1
consumer:
# 自动提交的时间间隔 在spring boot 2.X 版本中这里采用的是值的类型为Duration 需要符合特定的格式,如1S,1M,2H,5D
auto-commit-interval: 1S
# 该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理:
# latest(默认值)在偏移量无效的情况下,消费者将从最新的记录开始读取数据(在消费者启动之后生成的记录)
# earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
auto-offset-reset: earliest
# 是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量
enable-auto-commit: false
# 键的反序列化方式
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# 值的反序列化方式
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
listener:
# 在侦听器容器中运行的线程数。
concurrency: 5
#listner负责ack,每调用一次,就立即commit
ack-mode: manual_immediate
missing-topics-fatal: false

3、发送者

 

/**
* @Description: 基础消息体
*/
public class BaseMsgDto {

/**
* 消息唯一标识
*/
private String MSG_UUID;

public String getMSG_UUID() {
return MSG_UUID;
}

public void setMSG_UUID(String MSG_UUID) {
this.MSG_UUID = MSG_UUID;
}
}
/**
* @Description: Kafka工具类
*/
@Component
public class ApplicationKafkaUtils {
private ApplicationKafkaUtils() {
}

private static final Logger log = LoggerFactory.getLogger(ApplicationKafkaUtils.class);

@Autowired
KafkaTemplate<String, String> kafkaTemplate;

public void sendMsg(String topic, String msgDto) {
kafkaTemplate.send(topic, msgDto)
.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
@Override
public void onFailure(Throwable e) {
log.warn("ApplicationKafkaUtils..sendMsg..send msg fail..msgDto:{}..error:{}", msgDto, e);
throw new BizException(RetCode.SYS_69001);
}

@Override
public void onSuccess(SendResult<String, String> stringStringSendResult) {
log.info("ApplicationKafkaUtils..sendMsg..sen msg success..msgDto:{}", msgDto);
}
});
}

}


/**
* @Description: 事务提交后的处理器,action执行严格依赖调用方的事务提交
*/
@Component
public class TransactionCommitHandler {

public void handle(Runnable action) {
if (TransactionSynchronizationManager.isActualTransactionActive()) {
TransactionSynchronizationManager.registerSynchronization(new TransactionSynchronizationAdapter() {
@Override
public void afterCommit() {
// 具体的异步操作
action.run();
}
});
}
}
}

 

 

@Autowired
ApplicationKafkaUtils applicationKafkaUtils;

@Autowired
TransactionCommitHandler transactionCommitHandler;

// 同步 dm更新数据
// synchronousOrderData(order);
// 异步 dm更新数据

transactionCommitHandler.omhandle(() -> synchronousOrderData(order));

/**
* 同步om数据至dm_delivermanage表
*/
private void synchronousOrderData(Order order) {
log.info("OrderServiceImpl..synchronousOrderData..start!!");
Objects.requireNonNull(order, "order can be not null!");
Objects.requireNonNull(order.getOrderId(), "orderId can be not null!");

//组装数据
DeliverymanageUpdateMsgDto deliverymsgDto = new DeliverymanageUpdateMsgDto();
deliverymsgDto.setMSG_UUID(UUID.randomUUID().toString());
deliverymsgDto.setOrderId(order.getOrderId());
deliverymsgDto.setManagername(order.getCmManagerName());
deliverymsgDto.setCmCustomerOwnershipId(order.getCmCustomerOwnershipId());
deliverymsgDto.setCmCustomerOwnershipName(order.getCmCustomerOwnershipName());
deliverymsgDto.setCmCustomerSigningCompanyId(order.getCmCustomerSigningCompanyId());
deliverymsgDto.setCmCustomerSigningCompanyName(order.getCmCustomerSigningCompanyName());

// 消息推送
kafkaTemplate.send(KafkaConstants.DM_OM_CUSTOMER, JSON.toJSONString(deliverymsgDto))
.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
@Override
public void onFailure(Throwable e) {
log.warn("OrderServiceImpl..synchronousOrderData..send msg fail..customer:{}..msgDto:{}..error:{}", order, deliverymsgDto, e);
throw new BizException(RetCode.SYS_69001);
}

@Override
public void onSuccess(SendResult<String, String> stringStringSendResult) {
log.info("OrderServiceImpl..synchronousOrderData..sen msg success..msgDto:{}", deliverymsgDto);
}
});
log.info("OrderServiceImpl..synchronousOrderData..end!!");

}

3、接受者

/**
* redicache 工具类
*
*/
@SuppressWarnings("unchecked")
@Component
public class RedisUtils {

/**
* 获取key集合
* @param pattern
* @return
*/
public Set<String> getKeys(String pattern){
return redisTemplate.keys(pattern);
}

/**
* 写入缓存
* @param key
* @param value
* @return
*/
public boolean setnx(final String key, Object value, Long expireTime) {
try {
ValueOperations<Serializable, Object> operations = redisTemplate.opsForValue();
Boolean setResult = operations.setIfAbsent(key, value, expireTime, TimeUnit.SECONDS);
return Boolean.TRUE.equals(setResult);
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
}

 

/**
* @Description: kafka常量
*/
public class KafkaConstants {
private KafkaConstants() {
}

/**
* 默认消息分组
*/
public static final String DEFAULT_MSG_GROUP = "default-msg-group";

/**
* OM系统消息分组
*/
public static final String OM_MSG_GROUP = "oderManger-msg-group";
}



**  监听消费者
* @Description: om数据更新对象

*/
@Component
public class KafkaMsgOrder {

private static final Logger log = LoggerFactory.getLogger(KafkaMsgOrder.class);

private static final String key_prefix = "odm:kafka:%s:%s";

private static final String key_prefix1 = "tm:kafka:%s:%s";

private static final String key_prefix2 = "tm:kafka:%s:%s";

private static final String key_prefix3 = "cm:kafka:%s:%s";


@Autowired
RedisUtils redisUtils;

/**
* 监听om更新信息
*/
@KafkaListener(topics = KafkaConstants.DM_OM_CUSTOMER, groupId = KafkaConstants.DEFAULT_MSG_GROUP)
public void consumerMsg(ConsumerRecord<String, String> record, Acknowledgment ack) {
if (StringUtils.isBlank(record.value())) {
log.warn("KafkaMsgOrder..consumerMsg..received an exception message!!..record:{}", record);
return;
}

// 解析数据
DeliverymanageUpdateMsgDto msgDto = JSON.parseObject(record.value(), DeliverymanageUpdateMsgDto.class);
if (msgDto.getOrderId() == null || StringUtils.isBlank(msgDto.getMSG_UUID())) {
log.warn("KafkaMsgOrder..consumerMsg..abnormal data check!!..record:{}", record);
return;
}
// 重复校验
String redisKey = String.format(key_prefix, KafkaConstants.DM_OM_CUSTOMER, msgDto.getMSG_UUID());
if (!redisUtils.setnx(redisKey, msgDto.getMSG_UUID(), 30L)) {
log.info("KafkaMsgOrder..consumerMsg..this message in processing!!!..msgDto:{}", msgDto);
return;
}

try {
// 1. 更新数据
// 1.1 取出待更新的数据
List<CustomerDelivery> updateCusDeliveryList = customerDeliveryService.list(
new LambdaQueryWrapper<CustomerDelivery>()
.eq(CustomerDelivery::getOrderId, msgDto.getOrderId())
);
// 没有要更新的数据
if (CollectionUtils.isEmpty(updateCusDeliveryList)) {
return;
}

//替换数据
updateCusDeliveryList.forEach(customerDelivery -> {
customerDelivery.setSigningCompanyId(msgDto.getCmCustomerSigningCompanyId());
customerDelivery.setSigningCompanyName(msgDto.getCmCustomerSigningCompanyName());
customerDelivery.setManagerName(msgDto.getManagername());
customerDelivery.setOwnershipId(msgDto.getCmCustomerOwnershipId());
customerDelivery.setOwnershipName(msgDto.getCmCustomerOwnershipName());
});


// 2.3更新数据
customerDeliveryService.updateBatchById(updateCusDeliveryList);
// 2.4 确认消息已被消费
ack.acknowledge();

} finally {
redisUtils.remove(redisKey);
}

}
}
}

4、测试消息


posted @ 2022-04-18 18:43  htmlx  阅读(329)  评论(0)    收藏  举报