spring boot整合kafka记录
pom文件
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
yml配置
spring:
kafka:
# bootstrap-servers: 192.168.10.45:9092
bootstrap-servers: 192.168.198.128:9092
producer:
# 重试次数,默认Integer.MAX_VALUE
retries: 1
# 同一批次内存大小(默认16K)
batch-size: 16384
# 生产者内存缓存区大小(32M)
buffer-memory: 33554432
# key和value的序列化(默认,可以不设置)
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
# ack应答机制,默认1,即只需要确认leader收到消息
acks: 1
# springboot1.5.16自动装配中不支持properties下的其他配置,不知道为啥。2.x版本可以
#properties:
# 使用自定义的分区选择器
#{partitioner.class: com.msy.kafka.MyPartition, acks: all}
consumer:
group-id: test
enable-auto-commit: false
# earliest:从头开始消费 latest:从最新的开始消费 默认latest
auto-offset-reset: latest
# key和value反序列化(默认,可以不设置)
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
#最大消费数
max-poll-records: 100
listener:
# 消费者并发能力 这个并发量根据分区数决定,必须小于等于分区数,否则会有线程一直处于空闲状态 创建一个分区数为8的Topic。
concurrency: 6
# 设置手动提交的时候,需要设置ackMode
ack-mode: MANUAL
topic: test5
生产者需要调用生成数据
package com.example.sms.middleware.sms_middleware.kafka;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.ApplicationArguments;
import org.springframework.boot.ApplicationRunner;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Component;
/**
* @Author kang
* @Date 2020/6/19$ 10:03$
**/
@Component
public class Producer {
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@Value("${spring.kafka.topic}")
private String topic;
public void sendMessage(){
kafkaTemplate.send(topic,"message");
}
// @Override
// public void run(ApplicationArguments args) throws Exception {
// System.out.println("11111");
new Producer().sendMessage();
// }
}
消费者
package com.example.sms.middleware.sms_middleware.kafka;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
/**
* @Author kang
* @Date 2020/6/19$ 10:04$
**/
@Component
@Slf4j
public class Consumer {
@KafkaListener(topics = "test5") // 支持监听多个topic的消息
public void consumerMessage(ConsumerRecord<String, String> consumerRecord, Acknowledgment ack) {
try {
System.out.println("1212121");
String value = consumerRecord.value();
System.out.println("1212121"+value);
log.info("监听到的消息为:{}", value);
// 业务处理......
} catch (Exception e) {
e.printStackTrace();
} finally {
ack.acknowledge();
}
}
}
可以直接在application.properties中添加spring.kafka.listener.concurrency=3,然后使用@KafkaListener并发消费。
对于只有一个分区的topic,不需要分区消费,因为没有意义。下面的例子是针对有2个分区的情况(我的中有4个listenPartitionX方法,我的topic设置了4个分区)
@KafkaListener(id = "id0", topicPartitions = { @TopicPartition(topic = TPOIC, partitions = { "0" }) })
public void listenPartition0(List<ConsumerRecord<?, ?>> records) {
log.info("Id0 Listener, Thread ID: " + Thread.currentThread().getId());
log.info("Id0 records size " + records.size());
for (ConsumerRecord<?, ?> record : records) {
String value = record.value();
}
}
}
@KafkaListener(id = "id1", topicPartitions = { @TopicPartition(topic = TPOIC, partitions = { "1" }) })
public void listenPartition1(List<ConsumerRecord<?, ?>> records) {
log.info("Id1 Listener, Thread ID: " + Thread.currentThread().getId());
log.info("Id1 records size " + records.size());
for (ConsumerRecord<?, ?> record : records) {
String value = record.value();
}
}
错误接口访问不到时报错404
解决:
@SpringBootApplication默认的扫描位置就是Application所在的同级目录和子目录,我们修改一下
还没有评论,来说两句吧...