使用kafka客戶端例子(开启kerberos验证)
1.pom配置
<!--kafka支持-->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version> 2.3.0</version>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka-test</artifactId>
<scope>test</scope>
</dependency>
2.kafka配置
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
/**
* @Description:kafka配置
* @author op
*
*/
@Configuration
public class KafkaConfig {
@Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
@Value("${spring.kafka.consumer.enable-auto-commit}")
private Boolean autoCommit;
@Value("${spring.kafka.consumer.auto-commit-interval-ms}")
private Integer autoCommitInterval;
@Value("3000")
private Integer maxNum;
@Value("1048576")//最大数据大小为10M
private Integer maxBytes;
@Value("${spring.kafka.consumer.auto-offset-reset}")
private String autoOffsetReset;
@Value("${spring.kafka.consumer.key-deserializer}")
private String keyDeserializer;
@Value("${spring.kafka.consumer.value-deserializer}")
private String valDeserializer;
/**
* 监听器工厂 批量消费
* @return
*/
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>> kafkaListenerFactory() {
ConcurrentKafkaListenerContainerFactory<Integer, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
//设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
factory.setBatchListener(true);
//设置要监听的线程数。(设置10个消费者进行消费。)
factory.setConcurrency(10);
return factory;
}
@Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valDeserializer);
props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG,maxBytes);//每一批
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxNum);//每一批最多数量
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
//kerberos安全认证
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name);
props.put(SaslConfigs.SASL_MECHANISM, "GSSAPI");
props.put(SaslConfigs.SASL_KERBEROS_SERVICE_NAME, "kafka");
return props;
}
}
3.消费者
import java.util.List;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;
import lombok.extern.slf4j.Slf4j;
/**
* @Description:kafka消费者监听
* @author op
*
*/
@Slf4j
@Component
public class KafkaConsumerListener {
// 配置要监听的主题、消费者组、消费者工厂
@KafkaListener(
groupId = "自定义",
topics = "test",
containerFactory = "kafkaListenerFactory"
)
public void newbulkWebsiteAlert(List<ConsumerRecord<?, ?>> records){
log.newTrace();
if (CollectionUtils.isEmpty(records)) {
return;
}
try {
for (ConsumerRecord<?, ?> record : records) {
String value = record.value().toString();
//业务代码
xxx
}
} catch (Exception e) {
log.info("订阅kafka消息出现异常" + e.getMessage(),e);
}
}
}
4.配置
spring.kafka.bootstrap-servers=ip:端口 spring.kafka.consumer.properties.group.id=自定义 spring.kafka.consumer.enable-auto-commit=true spring.kafka.consumer.auto-commit-interval-ms=1000 spring.kafka.consumer.auto-offset-reset=earliest spring.kafka.consumer.properties.session.timeout.ms=120000 spring.kafka.consumer.properties.request.timeout.ms=180000 spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer spring.kafka.listener.missing-topics-fatal=false
5.在启动服务的时候加上kerberos配置文件的读取,比如 java -jar -Djava.security.krb5.conf=xxx/krb5.conf -Djava.security.auth.login.config=xxx/jaas.conf xxx.jar &
-Djava.security.krb5.conf=xxx/krb5.conf -Djava.security.auth.login.config=xxx/jaas.conf
需要注意jaas.conf文件中serviceName和principal读取的时候是合起来用的,用户名密码是在kafka目录下的,如果serviceName存在principal只是用户名加密码,没有serviceName字段的时候principal需要配置为kafka/用户名密码
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
serviceName="kafka"
keyTab="D:/logs/kafka.keytab"
principal="用户名密码";
};
浙公网安备 33010602011771号