liveInPresent

导航

 

Kafka在大型网站中应用广泛,主要用来日志收集和消息系统。
Kafka是一种发布-订阅的消息系统,生产者称发布者,消费者称订阅者。

 

下面先介绍Kafka实例类属性及配置:

@ConfigurationProperties(prefix = "kafka.producer")
@Data(这是Lambok插件的简单应用)
public class KafkaProducerProperties {

/**
* servers
*/
private String servers;
/**
* retries
*/
private int retries;
/**
* batchSize
*/
private int batchSize;
/**
* bufferMemory
*/
private int bufferMemory;
/**
* autoCommitInterval
*/
private String autoCommitInterval;
/**
* sessionTimeout
*/
private String sessionTimeout;
/**
* autoOffsetReset
*/
private String autoOffsetReset;
/**
* groupId
*/
private String groupId;
/**
* concurrency
*/
private int concurrency;
/**
* pollTimeout
*/
private int pollTimeout;

}

Kafka生产者的实例:

@Configuration
@EnableConfigurationProperties(KafkaProducerProperties.class)
@EnableKafka
public class KafkaProducerConfig {
/**
* properties
*/
@Autowired
KafkaProducerProperties properties;

@Autowired
private KafkaSendResultHandler kafkaSendResultHandler;

/**
*
* 〈一句话功能简述〉 〈功能详细描述〉
*
* @return props
*/
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, properties.getServers());
props.put(ProducerConfig.RETRIES_CONFIG, properties.getRetries());
props.put(ProducerConfig.BATCH_SIZE_CONFIG, properties.getBatchSize());
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, properties.getBufferMemory());
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
}

/**
*
* 〈一句话功能简述〉 〈功能详细描述〉
*
* @return new DefaultKafkaProducerFactory
*/
private ProducerFactory<Object, Object> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}

/**
*
* 〈一句话功能简述〉 〈功能详细描述〉
*
* @return new KafkaTemplate<String, String>
*/
@Bean(name = "kafkaTemplate")
public KafkaTemplate<Object, Object> kafkaTemplate() {
KafkaTemplate<Object, Object> kafkaTemplate = new KafkaTemplate<Object, Object>(producerFactory());
kafkaTemplate.setProducerListener(kafkaSendResultHandler);
return kafkaTemplate;
}
}

 

@Component
public class KafkaSendResultHandler implements ProducerListener<Object, Object> {

private static final Logger LOGGER = LoggerFactory.getLogger(KafkaSendResultHandler.class);

@Autowired
private CmbsKafkaSendErrMapper cmbsKafkaSendErrMapper;

@Override
public void onSuccess(String topic, Integer partition, Object key, Object value, RecordMetadata recordMetadata) {
// log.info("kafka消息发送成功,回调时间 = {}, topic = {}, partition = {}, key =
// {}, value = {}, recordMetadata = {}",
// LocalDateTime.now(), topic, partition, key, value,
// recordMetadata.toString());
if (CommonUtil.isNotNull(key)) {
// 更新状态为成功,次数+1,再次发送成功不更新原有错误,留记录
CmbsKafkaSendErr kafkaErr = new CmbsKafkaSendErr();
kafkaErr.setId(Long.parseLong(String.valueOf(key)));
kafkaErr.setSendStatus(KafkaSendErrStatus.SUCCESS.getStatus());
cmbsKafkaSendErrMapper.updateStatusAndSendTimesById(kafkaErr);
}
}

@Override
public void onError(String topic, Integer partition, Object key, Object value, Exception exception) {
String json = "";
if (CommonUtil.isNotNull(value)) {
json = String.valueOf(value);
}
// log.error("kafka消息发送失败,回调时间 = {} topic = {}, partition = {}, key =
// {}, value = {}, exception = {}",
// LocalDateTime.now(), topic, partition, key, json,
// exception.getMessage());
if (CommonUtil.isNull(key)) {
try {
CmbsKafkaSendErr cmbsKafkaSendErr = new CmbsKafkaSendErr();
cmbsKafkaSendErr.setTopic(topic);
cmbsKafkaSendErr.setMsgContent(json);
cmbsKafkaSendErr.setFailDesc(StringUtils.substring(exception.getMessage(), 0, 65535));
cmbsKafkaSendErr.setSendStatus(KafkaSendErrStatus.FAIL.getStatus());
cmbsKafkaSendErr.setSendTimes(MktCampaignConst.Number.ZERO);
cmbsKafkaSendErr.setGmtCreate(new Date());
cmbsKafkaSendErr.setGmtModified(new Date());
cmbsKafkaSendErr.setState(MktCampaignConst.DataStates.NORMAL_STATE);
cmbsKafkaSendErr.setType(KafkaErrTypeStatus.SEND_ERR.getStatus());
cmbsKafkaSendErrMapper.insert(cmbsKafkaSendErr);
} catch (Exception e) {
LOGGER.error("KafkaSendResultHandler.onError error:", e);
}
} else {
// 更新状态为失败,次数+1
CmbsKafkaSendErr kafkaErr = new CmbsKafkaSendErr();
kafkaErr.setId(Long.parseLong(String.valueOf(key)));
kafkaErr.setSendStatus(KafkaSendErrStatus.FAIL.getStatus());
kafkaErr.setFailDesc(StringUtils.substring(exception.getMessage(), 0, 256));
cmbsKafkaSendErrMapper.updateStatusAndSendTimesById(kafkaErr);
}

}

@Override
public boolean isInterestedInSuccess() {
return true;
}

}

下面是消费者实例:

@ConfigurationProperties(prefix = "kafka.consumer")
@Data
public class KafkaConsumerProperties {

/**
* servers
*/
private String servers;
/**
* retries
*/
private int retries;
/**
* batchSize
*/
private int batchSize;
/**
* bufferMemory
*/
private int bufferMemory;
/**
* autoCommitInterval
*/
private String autoCommitInterval;
/**
* sessionTimeout
*/
private String sessionTimeout;
/**
* autoOffsetReset
*/
private String autoOffsetReset;
/**
* groupId
*/
private String groupId;
/**
* concurrency
*/
private int concurrency;
/**
* pollTimeout
*/
private int pollTimeout;

}

@Configuration
@EnableConfigurationProperties(KafkaConsumerProperties.class)
@EnableKafka
public class KafkaConsumerConfig {
/**
* properties
*/
@Autowired
KafkaConsumerProperties properties;

/**
* 〈一句话功能简述〉
* 〈功能详细描述〉
*
* @return new DefaultKafkaConsumerFactory
*/
public ConsumerFactory<String, String> consumerFactory() {
Map<String, Object> configs = new HashMap<String, Object>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, properties.getServers());
configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
configs.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
configs.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, properties.getAutoCommitInterval());
configs.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, properties.getSessionTimeout());
configs.put(ConsumerConfig.GROUP_ID_CONFIG, properties.getGroupId());
configs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, properties.getAutoOffsetReset());
return new DefaultKafkaConsumerFactory<>(configs);
}

/**
* 〈一句话功能简述〉
* 〈功能详细描述〉
*
* @return factory
*/
@Bean(name = "kafkaListenerContainerFactory")
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>>kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(properties.getConcurrency());
factory.getContainerProperties().setPollTimeout(properties.getPollTimeout());
factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL);
factory.setBatchListener(true);
return factory;
}

}

 

@Bean(name = "kafkaTemplate")
public KafkaTemplate<Object, Object> kafkaTemplate() {
KafkaTemplate<Object, Object> kafkaTemplate = new KafkaTemplate<Object, Object>(producerFactory());
kafkaTemplate.setProducerListener(kafkaSendResultHandler);
return kafkaTemplate;
}

依赖注入,使用实例。

拓展:Jafka是在Kafka之上孵化而来的,即Kafka的一个升级版。具有以下特性:快速持久化,可以在O(1)的系统开销下进行消息持久化;高吞吐,在一台普通的服务器上既可以达到10W/s的吞吐速率;完全的分布式系统,Broker、Producer、Consumer都原生自动支持分布式,自动实现负载均衡;支持Hadoop数据并行加载,对于像Hadoop的一样的日志数据和离线分析系统,但又要求实时处理的限制,这是一个可行的解决方案。

posted on 2019-10-08 09:58  liveInPresent  阅读(1801)  评论(0)    收藏  举报