Spring kafka 卡夫卡消费者在遇到异常后突然停止

Spring kafka 卡夫卡消费者在遇到异常后突然停止,spring-kafka,Spring Kafka,下面是代码片段: 卡夫卡消费配置类 public ConsumerFactory<String, String> consumerFactory() { Map<String, Object> props = new HashMap<>(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9093"); props.put(ConsumerConfig.GROUP_I

下面是代码片段:

卡夫卡消费配置类

public ConsumerFactory<String, String> consumerFactory() {
Map<String, Object> props = new HashMap<>();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9093");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "consumerGroupId");
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
    props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 10000);
    props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 10);
    props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 60000);
    props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 1000);
    props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 30000);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    return new DefaultKafkaConsumerFactory<>(props);
}

public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
    ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
    ConsumerFactory config = consumerFactory();
    factory.setConsumerFactory(config);
    factory.getContainerProperties().setCommitLogLevel(LogIfLevelEnabled.Level.INFO);
    factory.setConcurrency(kafka.getConcurrency());
    factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
    factory.getContainerProperties().setSyncCommits(true);
    factory.getContainerProperties().setPollTimeout(0);
    factory.getContainerProperties().setAckOnError(false);
    factory.getContainerProperties().setConsumerRebalanceListener(new RebalanceListener());
    return factory;
}
public ConsumerFactory ConsumerFactory(){
Map props=newhashmap();
put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,“localhost:9093”);
put(ConsumerConfig.GROUP_ID_CONFIG,“consumerGroupId”);
put(ConsumerConfig.ENABLE\u AUTO\u COMMIT\u CONFIG,false);
put(ConsumerConfig.AUTO\u COMMIT\u INTERVAL\u MS\u CONFIG,10000);
props.put(ConsumerConfig.MAX\u POLL\u RECORDS\u CONFIG,10);
props.put(ConsumerConfig.MAX\u POLL\u INTERVAL\u MS\u CONFIG,60000);
props.put(ConsumerConfig.HEARTBEAT\u INTERVAL\u MS\u CONFIG,1000);
props.put(ConsumerConfig.SESSION\u TIMEOUT\u MS\u CONFIG,30000);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,“最早”);
put(ConsumerConfig.KEY\u反序列化程序\u类\u配置,StringDeserializer.CLASS);
put(ConsumerConfig.VALUE\u反序列化程序\u类\u配置,StringDeserializer.CLASS);
返回新的默认卡夫卡消费工厂(道具);
}
公共ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory(){
ConcurrentKafkListenerContainerFactory=新ConcurrentKafkListenerContainerFactory();
ConsumerFactory config=ConsumerFactory();
setConsumerFactory(配置);
factory.getContainerProperties().setCommitLogLevel(LogIfLevelEnabled.Level.INFO);
setConcurrency(kafka.getConcurrency());
factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL\u立即);
factory.getContainerProperties().setSyncCommits(true);
factory.getContainerProperties().setPollTimeout(0);
factory.getContainerProperties().setAckoneError(false);
factory.getContainerProperties().SetConsumerBalanceListener(新的重新平衡Listener());
返回工厂;
}
重新平衡侦听器类

public class RebalanceListener implements ConsumerAwareRebalanceListener {

private Map<TopicPartition, Long> partitionToUncommittedOffsetMap;

public void setPartitionToUncommittedOffsetMap(Map<TopicPartition, Long> partitionToUncommittedOffsetMap) {
    this.partitionToUncommittedOffsetMap = partitionToUncommittedOffsetMap;
}

private void commitOffsets(Map<TopicPartition, Long> partitionToOffsetMap, Consumer consumer) {
    if(partitionToOffsetMap!=null && !partitionToOffsetMap.isEmpty()) {
        Map<TopicPartition, OffsetAndMetadata> partitionToMetadataMap = new HashMap<>();
        for(Map.Entry<TopicPartition, Long> e : partitionToOffsetMap.entrySet()) {
            log.info("Adding partition & offset for topic{}", e.getKey());
            partitionToMetadataMap.put(e.getKey(), new OffsetAndMetadata(e.getValue() + 1));
        }
        log.info("Consumer : {}, committing the offsets : {}", consumer, partitionToMetadataMap);
        consumer.commitSync(partitionToMetadataMap);
        partitionToOffsetMap.clear();
    }
}

@Override
public void onPartitionsRevokedBeforeCommit(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
    log.info("Consumer is going to commit the offsets {}",consumer);
    commitOffsets(partitionToUncommittedOffsetMap, consumer);
    log.info("Committed offsets {}",consumer);
}
公共类RebalanceListener实现ConsumerAwareRebalanceListener{
私有地图分区到非委员会DoffSetMap;
public void setPartitionToUncommitteedOffsetMap(映射分区ToUncommitteedOffsetMap){
this.partitionToUncommitteedOffsetMap=partitionToUncommitteedOffsetMap;
}
私有void committeofset(映射分区到offsetmap,消费者){
if(partitionToOffsetMap!=null&!partitionToOffsetMap.isEmpty()){
Map partitionToMetadataMap=新HashMap();
对于(Map.Entry e:partitionToOffsetMap.entrySet()){
info(“为主题{}添加分区和偏移量”,e.getKey());
partitionToMetadataMap.put(e.getKey(),new OffsetAndMetadata(e.getValue()+1));
}
info(“Consumer:{},提交偏移量:{}”,Consumer,partitiontomatamap);
consumer.commitSync(分区到元数据映射);
partitionToOffsetMap.clear();
}
}
@凌驾
重新提交(使用者、集合分区)前的分区上的公共void{
info(“消费者将提交偏移量{}”,消费者);
委托集合(消费者对非委托集合图的划分);
log.info(“提交的偏移量{}”,消费者);
}
卡夫卡利斯纳阶级

 @KafkaListener(topics = "#{'${dimebox.kafka.topicName}'.split('"+ COMMA + "')}", groupId ="${dimebox.kafka.consumerGroupId}")
public void receive(@Header(KafkaHeaders.RECEIVED_TOPIC) String topic,@Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
    @Header(KafkaHeaders.OFFSET) long offset, Acknowledgment acknowledgment, final String payload) {
        TopicPartition tp = new TopicPartition(topic, partition);
    Map<TopicPartition, Long> partitionToUncommittedOffsetMap = new ConcurrentHashMap<>();
    partitionToUncommittedOffsetMap.put(tp, offset);
    ((RebalanceListener)consumerConfig.kafkaListenerContainerFactory
            (new ApplicationProperties()).getContainerProperties().getConsumerRebalanceListener())
            .setPartitionToUncommittedOffsetMap(partitionToUncommittedOffsetMap);
        LOGGER.info("Insert Message Received from offset : {} ", offset);
        importerService.importer(payload);
        acknowledgment.acknowledge();
}
@KafkaListener(topics=“#${dimebox.kafka.topicName}”.split(“+逗号+”)}),groupId=“${dimebox.kafka.consumerGroupId}”)
public void receive(@Header(KafkaHeaders.RECEIVED_TOPIC)字符串主题,@Header(KafkaHeaders.RECEIVED_PARTITION_ID)int分区,
@标题(KafkaHeaders.OFFSET)长偏移量、确认、最终字符串有效负载){
TopicPartition tp=新的TopicPartition(主题,分区);
Map partitionToUncommitteedOffsetMap=新的ConcurrentHashMap();
分区到非委员会DOFFSETMAP.put(tp,偏移量);
((重新平衡侦听器)consumerConfig.kafkaListenerContainerFactory
(新的ApplicationProperties()).getContainerProperties().GetConsumerBalanceListener())
.SetPartitionToUncommitteedOffsetMap(PartitionToUncommitteedOffsetMap);
info(“插入从偏移量:{}接收的消息”,偏移量);
进口商服务。进口商(有效载荷);
确认。确认();
}
在这种配置之后,卡夫卡消费者突然停止了。
我们处理来自Kafka的消息,下游api返回错误并引发异常。如果处理消息的正常流停止,则发布执行。请注意,异常在应用程序级别处理并记录。我们需要使用spring Kafka库提供的特定错误处理方法吗稀罕?

卡夫卡消费者突然停止。
您需要解释这意味着什么。此外,还不清楚在撤销分区时为什么要提交旧的偏移量-代码试图做什么-您的代码将在通过确认处理记录时提交偏移量;如果要倒带,应使用seek而不是committing。我建议您打开调试日志,看看发生了什么。@Gary,这里是“importerService.importer(有效负载)”我们正在连接到不同的服务器。如果我将从该服务器收到错误的请求或任何异常,则Kafka consumer将停止,但如果我将重新启动我的服务器,它将开始处理。正如您所建议的,此处不是consumer.commitSync(partitionToMetadataMap我们应该寻找方法?假设您成功处理了一些记录后,将偏移重置为起始位置对我来说仍然没有意义;您需要更清楚地解释您的用例。
Kafka消费者突然停止。
您需要解释这意味着什么。另外,还不清楚为什么在撤销分区时提交旧偏移量-该代码试图做什么-当记录通过确认进行处理时,您的代码将提交偏移量;如果要倒带,应使用seek而不是提交。我建议您打开调试日志以查看发生了什么。@Gary,这里是“importerService.importer”(有效负载)“我们正在连接到不同的服务器。如果我将从该服务器收到错误请求或任何异常,则Kafka