springboot 接入kafka
1. pom
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.1.6.RELEASE</version>
</parent>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<!-- kafka-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.2.14.RELEASE</version>
</dependency>
</dependencies>
配置文件
application.yml
server:
port: 8888
spring:
kafka:
bootstrap-servers: 122.9.143.126:9092
producer:
retries: 0
acks: 1
batch-size: 16384
properties:
linger.ms: 0
max:
request:
size: 5242880
buffer-memory: 33554432
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
bootstrap-servers: 122.9.143.126:9092
consumer:
properties:
group:
id: defaultConsumerGroup
session:
timeout:
ms: 120000
request:
timeout:
ms: 180000
auto-commit-interval: 1000
auto-offset-reset: latest
bootstrap-servers: 122.9.143.126:9092
#
# ###########【初始化消费者配置】###########
# # 默认的消费组ID
# spring.kafka.consumer.properties.group.id=defaultConsumerGroup
# # 是否自动提交offset
# spring.kafka.consumer.enable-auto-commit=true
# # 提交offset延时(接收到消息后多久提交offset)
# spring.kafka.consumer.auto.commit.interval.ms=1000
# # 当kafka中没有初始offset或offset超出范围时将自动重置offset
# # earliest:重置为分区中最小的offset;
# # latest:重置为分区中最新的offset(消费分区中新产生的数据);
# # none:只要有一个分区不存在已提交的offset,就抛出异常;
# spring.kafka.consumer.auto-offset-reset=latest
# # 消费会话超时时间(超过这个时间consumer没有发送心跳,就会触发rebalance操作)
# spring.kafka.consumer.properties.session.timeout.ms=120000
# # 消费请求超时时间
# spring.kafka.consumer.properties.request.timeout.ms=180000
# # Kafka提供的序列化和反序列化类
# spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
# spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
#
# ###########【Kafka集群】###########
# spring.kafka.bootstrap-servers=ip:端口
# ###########【初始化生产者配置】###########
# # 重试次数
# spring.kafka.producer.retries=0
# # 应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
# spring.kafka.producer.acks=1
# # 批量大小
# spring.kafka.producer.batch-size=16384
# # 提交延时
# spring.kafka.producer.properties.linger.ms=0
# # 当生产端积累的消息达到batch-size或接收到消息linger.ms后,生产者就会将消息提交给kafka
# # linger.ms为0表示每接收到一条消息就提交给kafka,这时候batch-size其实就没用了
# # 生产端缓冲区大小
# spring.kafka.producer.buffer-memory = 33554432
# # Kafka提供的序列化和反序列化类
# spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
# spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
# #请求最大字节
# spring.kafka.producer.properties.max.request.size=5242880
# # 自定义分区器
# # spring.kafka.producer.properties.partitioner.class=com.felix.kafka.producer.CustomizePartitioner
3. 启动类
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class,args);
}
}
4. controller
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
@RequestMapping
public class Democontroller {
@GetMapping("test01")
public String test01() {
kafkaTemplate.send("mykafkaaa", "kldsdsfjifowefl666666");
kafkaTemplate.send("mykafkaaa", "lsfd", "klsdflkjdsf");
return "this is test01 method";
}
@Autowired
private KafkaTemplate kafkaTemplate;
}

浙公网安备 33010602011771号