导入Kafka
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>1.2.3.RELEASE</version>
</dependency>
Spring Boot使用 1.4.0.RELEASE 版本,否则出现版本不一致错误;
生产者配置,消费配置,kafkaTemplate初始化,KafkaListenerContainerFactory初始化(对应@KafkaListener 中的containerFactory())未初始化kafka消费监听器无法使用
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
/**
* kafka config
*/
@Configuration
@EnableKafka
public class KafkaConfiguration {
//ConcurrentKafkaListenerContainerFactory为创建Kafka监听器的工程类,这里只配置了消费者
@Bean
public ConcurrentKafkaListenerContainerFactory<Integer, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(3);
factory.getContainerProperties().setPollTimeout(3000);
factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
return factory;
}
//根据consumerProperties填写的参数创建消费者工厂
@Bean
public ConsumerFactory<Integer, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerProperties());
}
//根据senderProperties填写的参数创建生产者工厂
@Bean
public ProducerFactory<Integer, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(senderProperties());
}
//kafkaTemplate实现了Kafka发送接收等功能
@Bean
public KafkaTemplate<Integer, String> kafkaTemplate() {
KafkaTemplate template = new KafkaTemplate<Integer, String>(producerFactory());
return template;
}
//消费者配置参数
private Map<String, Object> consumerProperties() {
Map<String, Object> props = new HashMap<>();
//连接地址
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.187.130:9092,192.168.187.132:9092,192.168.187.137:9092");
//GroupID
props.put(ConsumerConfig.GROUP_ID_CONFIG, "sun-group");
//是否自动提交
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
//是否自动提交超时时间,6秒
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "6000");
//从offset最后一个开始读取
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
//Session超时设置,这里为12秒
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "12000");
//每次间隔poll时间,这里为6秒
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG,"6000");
//批量获取记录
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,"20");
//键的反序列化方式
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
//值的反序列化方式
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
return props;
}
//生产者配置
private Map<String, Object> senderProperties (){
Map<String, Object> props = new HashMap<>();
//连接地址
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.187.130:9092,192.168.187.132:9092,192.168.187.137:9092");
props.put(ProducerConfig.ACKS_CONFIG, "all");
//重试,0为不启用重试机制
props.put(ProducerConfig.RETRIES_CONFIG, 0);
//控制批处理大小,单位为字节
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
//批量发送,延迟为1毫秒,启用该功能能有效减少生产者发送消息次数,从而提高并发量
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
//生产者可以使用的总内存字节来缓冲等待发送到服务器的记录
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
//键的序列化方式
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
//值的序列化方式
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
}
}
==========================================
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Service;
/**
* kafka消费监听器
*/
@Service
public class DemoListener {
private static final Logger log= LoggerFactory.getLogger(DemoListener.class);
/**
* listen1
* @param record 消息体
* @param ack 手工确认
*/
@KafkaListener(topics = "hotelproduct", topicPartitions = { @TopicPartition(topic = "hotelproduct", partitions = { "0" }) },containerFactory ="kafkaListenerContainerFactory" )
public void listen1(ConsumerRecord record, Acknowledgment ack) {
log.info("listen1 demo receive : "+record.toString());
/* ToDo 业务逻辑操作*/
ack.acknowledge();
}
/**
* listen2
* @param record 消息体
* @param ack 手工确认
*/
@KafkaListener(topics = "hotelproduct", topicPartitions = { @TopicPartition(topic = "hotelproduct", partitions = { "1" }) },containerFactory ="kafkaListenerContainerFactory")
public void listen2(ConsumerRecord record, Acknowledgment ack) {
log.info("listen2 demo receive : "+record.toString());
ack.acknowledge();
}
/**
* listen3
* @param record 消息体
* @param ack 手工确认
*/
@KafkaListener(topics = "hotelproduct", topicPartitions = { @TopicPartition(topic = "hotelproduct", partitions = { "2" }) },containerFactory ="kafkaListenerContainerFactory")
public void listen3(ConsumerRecord record, Acknowledgment ack) {
log.info("listen3 demo receive : "+record.toString());
ack.acknowledge();
}
}
按分区监听,提供吞吐量,这里采用消息确认模式,消费者接收到消息后,手工确认消息,防止系统自动提交,时间未到,系统线程挂掉或服务器岩机无法提交记录offset偏移量,造成服务重启后记录丢失。