導入Kafka
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>1.2.3.RELEASE</version>
</dependency>
Spring Boot使用 1.4.0.RELEASE 版本,否則出現版本不一致錯誤;
生產者配置,消費配置,kafkaTemplate初始化,KafkaListenerContainerFactory初始化(對應@KafkaListener 中的containerFactory())未初始化kafka消費監聽器無法使用
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
/**
* kafka config
*/
@Configuration
@EnableKafka
public class KafkaConfiguration {
//ConcurrentKafkaListenerContainerFactory為創建Kafka監聽器的工程類,這里只配置了消費者
@Bean
public ConcurrentKafkaListenerContainerFactory<Integer, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(3);
factory.getContainerProperties().setPollTimeout(3000);
factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
return factory;
}
//根據consumerProperties填寫的參數創建消費者工廠
@Bean
public ConsumerFactory<Integer, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerProperties());
}
//根據senderProperties填寫的參數創建生產者工廠
@Bean
public ProducerFactory<Integer, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(senderProperties());
}
//kafkaTemplate實現了Kafka發送接收等功能
@Bean
public KafkaTemplate<Integer, String> kafkaTemplate() {
KafkaTemplate template = new KafkaTemplate<Integer, String>(producerFactory());
return template;
}
//消費者配置參數
private Map<String, Object> consumerProperties() {
Map<String, Object> props = new HashMap<>();
//連接地址
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.187.130:9092,192.168.187.132:9092,192.168.187.137:9092");
//GroupID
props.put(ConsumerConfig.GROUP_ID_CONFIG, "sun-group");
//是否自動提交
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
//是否自動提交超時時間,6秒
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "6000");
//從offset最后一個開始讀取
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
//Session超時設置,這里為12秒
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "12000");
//每次間隔poll時間,這里為6秒
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG,"6000");
//批量獲取記錄
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,"20");
//鍵的反序列化方式
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
//值的反序列化方式
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
return props;
}
//生產者配置
private Map<String, Object> senderProperties (){
Map<String, Object> props = new HashMap<>();
//連接地址
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.187.130:9092,192.168.187.132:9092,192.168.187.137:9092");
props.put(ProducerConfig.ACKS_CONFIG, "all");
//重試,0為不啟用重試機制
props.put(ProducerConfig.RETRIES_CONFIG, 0);
//控制批處理大小,單位為字節
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
//批量發送,延遲為1毫秒,啟用該功能能有效減少生產者發送消息次數,從而提高並發量
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
//生產者可以使用的總內存字節來緩沖等待發送到服務器的記錄
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
//鍵的序列化方式
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
//值的序列化方式
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
}
}
==========================================
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Service;
/**
* kafka消費監聽器
*/
@Service
public class DemoListener {
private static final Logger log= LoggerFactory.getLogger(DemoListener.class);
/**
* listen1
* @param record 消息體
* @param ack 手工確認
*/
@KafkaListener(topics = "hotelproduct", topicPartitions = { @TopicPartition(topic = "hotelproduct", partitions = { "0" }) },containerFactory ="kafkaListenerContainerFactory" )
public void listen1(ConsumerRecord record, Acknowledgment ack) {
log.info("listen1 demo receive : "+record.toString());
/* ToDo 業務邏輯操作*/
ack.acknowledge();
}
/**
* listen2
* @param record 消息體
* @param ack 手工確認
*/
@KafkaListener(topics = "hotelproduct", topicPartitions = { @TopicPartition(topic = "hotelproduct", partitions = { "1" }) },containerFactory ="kafkaListenerContainerFactory")
public void listen2(ConsumerRecord record, Acknowledgment ack) {
log.info("listen2 demo receive : "+record.toString());
ack.acknowledge();
}
/**
* listen3
* @param record 消息體
* @param ack 手工確認
*/
@KafkaListener(topics = "hotelproduct", topicPartitions = { @TopicPartition(topic = "hotelproduct", partitions = { "2" }) },containerFactory ="kafkaListenerContainerFactory")
public void listen3(ConsumerRecord record, Acknowledgment ack) {
log.info("listen3 demo receive : "+record.toString());
ack.acknowledge();
}
}
按分區監聽,提供吞吐量,這里采用消息確認模式,消費者接收到消息后,手工確認消息,防止系統自動提交,時間未到,系統線程掛掉或服務器岩機無法提交記錄offset偏移量,造成服務重啟后記錄丟失。
