Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/spring-boot/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 测试@KafkaListener的Spring Kafka错误处理程序时出现异常_Java_Spring Boot_Apache Kafka_Spring Kafka_Spring Kafka Test - Fatal编程技术网

Java 测试@KafkaListener的Spring Kafka错误处理程序时出现异常

Java 测试@KafkaListener的Spring Kafka错误处理程序时出现异常,java,spring-boot,apache-kafka,spring-kafka,spring-kafka-test,Java,Spring Boot,Apache Kafka,Spring Kafka,Spring Kafka Test,我试图测试@KafkaListener正在使用的containerlistenerfactory中定义的错误处理程序 我有不同的重试次数,基于我想测试的侦听器是否正常工作的不同异常抛出。但是在第一次抛出异常并转到错误处理程序后,我得到了一个非法状态异常,因此它没有尝试在我编写的测试中重试。 相同的代码在实际设置中工作 这是我得到的一个例外 异常后寻求电流;嵌套异常为org.springframework.kafka.listener.ListenerExecutionFailedExceptio

我试图测试@KafkaListener正在使用的containerlistenerfactory中定义的错误处理程序

我有不同的重试次数,基于我想测试的侦听器是否正常工作的不同异常抛出。但是在第一次抛出异常并转到错误处理程序后,我得到了一个非法状态异常,因此它没有尝试在我编写的测试中重试。 相同的代码在实际设置中工作

这是我得到的一个例外

异常后寻求电流;嵌套异常为org.springframework.kafka.listener.ListenerExecutionFailedException

我希望测试重试10次,然后在恢复中打印消息。但它不会重试,因为errorhandler正在抛出非法状态异常

有人能推荐一下吗

@Configuration
@EnableKafka
public  class Config {

    public static boolean seekPerformed;
    
    public static int retries;
    
    private Integer retryCount=10;

    private Integer RetryCount2=5;
    
    @Autowired
    private KafkaTemplate<String, Object> kafkaTemplate;
    
    @Spy
    private errorCodes errorCodes;

    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, Anky>kafkaListenerContainerFactory(EmbeddedKafkaBroker embeddedKafka) {
        ConcurrentKafkaListenerContainerFactory<String, Anky>factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory(embeddedKafka));
        factory.getContainerProperties().setDeliveryAttemptHeader(true);
        factory.getContainerProperties().setAckMode(AckMode.MANUAL_IMMEDIATE);
        SeekToCurrentErrorHandler errorHandler = new SeekToCurrentErrorHandler((record, exception) -> {
            System.out.println(
                    "RetryPolicy** limit has been exceeded! You should really handle this better." + record.key());
        });
        errorHandler.setBackOffFunction((record, exception) -> {
            retries++;
            seekPerformed = true;
            int maxRetryCount = retryCount+ retryCount2;
            Anky msg = (Anky) record.value();

            if (msg.getErrorCode.equals(getExceptionA())) {
                return new FixedBackOff(0L,Long.valueOf(retryCount));
            }
            else {
                return new FixedBackOff(0L,Long.valueOf(retryCount2));
            }

        });
        errorHandler.setCommitRecovered(true);
        factory.setErrorHandler(errorHandler);
        factory.setConcurrency(2);
        //errorHandler.setLogLevel(Level.INFO);
        factory.setStatefulRetry(true);
        return factory;
    }

    @Bean
    public DefaultKafkaConsumerFactory<String, Anky> consumerFactory(EmbeddedKafkaBroker embeddedKafka) {
        //return new DefaultKafkaConsumerFactory<>(consumerConfigs(embeddedKafka));
        return new DefaultKafkaConsumerFactory<>(consumerConfigs(embeddedKafka), new StringDeserializer(),
                new JsonDeserializer<>(EdealsMessage.class, false));
    }

    @Bean
    public Map<String, Object> consumerConfigs(EmbeddedKafkaBroker embeddedKafka) {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafka.getBrokerAddress(0).toString());
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "retry-grp");
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        //props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, new StringDeserializer());
        //props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, new JsonDeserializer<>(EdealsMessage.class, false));
        return props;
    }


    @Bean
    public ProducerFactory<String, Object> testProducerFactory(EmbeddedKafkaBroker embeddedKafka) {
        Map<String, Object> configs = new HashMap<>();
        configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafka.getBrokerAddress(0).toString());
        configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
        return new DefaultKafkaProducerFactory<>(configs);
    }

    @Bean
    public KafkaTemplate<String, Object> testKafkaTemplate(EmbeddedKafkaBroker embeddedKafka) {
        KafkaTemplate<String, Object> kafkaTemplate = new KafkaTemplate<>(testProducerFactory(embeddedKafka));
        kafkaTemplate.setDefaultTopic("sr1");
        return kafkaTemplate;
    }

    @KafkaListener(topics = "sr1", groupId = "retry-grp",containerFactory = "kafkaListenerContainerFactory")
    public void listen1(ConsumerRecord<String, Anky> record,
            @Header(KafkaHeaders.DELIVERY_ATTEMPT) int delivery) throws AppException{

    
        try {
            throw new AppException(//this is our custom exception in the application);
        }
        catch(AppException se) {
            if(record.value().getNewErrorCode().equals(se.getErrorCode())) {
                System.out.println("are you here?");
                throw se;
            }
    }

}

@EnableKafka
@SpringBootTest
@ExtendWith(SpringExtension.class)
@ContextConfiguration(classes = Config.class)
@EmbeddedKafka(
        partitions = 1, 
        controlledShutdown = true, topics = {"sr1"},
        brokerProperties = {
            "listeners=PLAINTEXT://localhost:3333", 
            "port=3333"
    })
public class KafkaRetryTest {

    @Autowired
    private Config config;

    @Autowired
    private KafkaTemplate<String, Object> template;
    
    @Autowired
    KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry;
    
    @Autowired
    EmbeddedKafkaBroker kafkaEmbedded;
    
    
    @Spy
    private ErrorCodes errorCodes;
    
    @BeforeEach
    public void setUp() throws Exception,SystemException {
      for (MessageListenerContainer messageListenerContainer : kafkaListenerEndpointRegistry.getListenerContainers()) {
        ContainerTestUtils.waitForAssignment(messageListenerContainer, 
        kafkaEmbedded.getPartitionsPerTopic()
        );
      }
    }

    @Test
    public void testStatefulRetry() throws Exception {
        Anky msg = new Anky();
        msg.setNewErrorCode(errorCodes.getExceptionA());
        this.template.send("sr1","3323800",msg);
        assertThat(this.config.seekPerformed).isTrue();
        System.out.println("******"+this.config.retries);
}   
}
AppConfig***(从属性文件读取bootstrapaddress)

@Value(Value=“${kafka.bootstrapAddress}”)
私有字符串引导器;
@豆子
公共消费者工厂消费者工厂(){
Map props=kafkaProperties.buildConsumerProperties();
put(ConsumerConfig.BOOTSTRAP\u SERVERS\u CONFIG,bootstrapAddress);
props.put(ConsumerConfig.GROUP\u ID\u CONFIG,groupId);
put(ConsumerConfig.ENABLE\u AUTO\u COMMIT\u CONFIG,autoCommit);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,autoOffsetReset);
返回新的DefaultKafkanConsumerFactory(props,new StringDeserializer(),
新的JsonDeserializer(Anky.class,false));
}

显示
非法状态异常的完整堆栈跟踪

异常后寻求当前

这是正常的;您可以通过设置日志级别来抑制这种情况

这对我来说就像预期的一样

@springboot应用程序
公共类SO64780994应用程序{
公共静态void main(字符串[]args){
SpringApplication.run(So64780994Application.class,args);
}
@卡夫卡列斯汀(id=“so64780994”,topics=“so64780994”)
公共void侦听(字符串输入){
系统输出打印项次(输入);
抛出新的运行时异常(“测试”);
}
@豆子
新主题(){
返回TopicBuilder.name(“so64780994”).partitions(1.replications(1.build());
}
@豆子
ErrorHandler(){
SeekToCurrentErrorHandler eh=新的SeekToCurrentErrorHandler((rec,ex)->{
System.out.println(“重试次数耗尽:+rec”);
});
eh.setBackOffFunction((rec,ex)->{
返回新的固定回退(0升,8);
});
eh.setLogLevel(Level.DEBUG);
返回eh;
}
@豆子
ApplicationRunner(KafkaTemplate模板){
返回参数->模板发送(“so64780994”、“foo”);
}
}

对我来说,它也适用于实际的卡夫卡集群设置。但当我试图创建一个测试时,它却失败了。在它到达错误处理程序后,它不会再次返回到侦听器,而是抛出IllegalStateException。没有堆栈跟踪,我帮不了你。奇怪的是,它现在可以正常工作,没有任何更改。但还有一个问题。。。因为我的测试是向主题发送消息,在消费者自己被呼叫之后。。。有没有办法让我的测试知道监听器何时完成?目前我正在做一个thread.sleep,以便在消费者完成后验证来自测试运行的调用,但这并不是监听器完成的确认方式;请看和它的链接。谢谢我还有一个问题。。。。我有两个用@EmbeddedKafka注释的不同测试类(partitions=1,controlledShutdown=true,topics={“abc”},brokerProperties={“listeners=PLAINTEXT://localhost:3333”,“port=3333”})。就个人而言,这些课程很好。。。但是在maven构建中,一个测试类失败,它抱怨端口3333已经在使用。。。我能做些什么来解决这个问题?
@EnableKafka
@SpringBootTest(classes=MyConsumer.class)
@ExtendWith(SpringExtension.class)
@DirtiesContext
@ContextConfiguration(classes = AppConfig.class)
@EmbeddedKafka(
    partitions = 1, 
     topics = {"test_topic"}
             , bootstrapServersProperty ="spring.kafka.bootstrap-servers"
    )
public class KafkaConsumerTest {
}
@Value(value = "${kafka.bootstrapAddress}")
    private String bootstrapAddress;

@Bean
    public ConsumerFactory<String, Anky> consumerFactory() {

        Map<String, Object> props = kafkaProperties.buildConsumerProperties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);

        return new DefaultKafkaConsumerFactory<>(props, new StringDeserializer(),
                new JsonDeserializer<>(Anky.class, false));
    }
foo
2020-11-11 09:20:04.836  INFO 65759 --- [o64780994-0-C-1] o.a.k.clients.consumer.KafkaConsumer     : [Consumer clientId=consumer-so64780994-1, groupId=so64780994] Seeking to offset 2 for partition so64780994-0
foo
2020-11-11 09:20:05.334  INFO 65759 --- [o64780994-0-C-1] o.a.k.clients.consumer.KafkaConsumer     : [Consumer clientId=consumer-so64780994-1, groupId=so64780994] Seeking to offset 2 for partition so64780994-0
foo
2020-11-11 09:20:05.836  INFO 65759 --- [o64780994-0-C-1] o.a.k.clients.consumer.KafkaConsumer     : [Consumer clientId=consumer-so64780994-1, groupId=so64780994] Seeking to offset 2 for partition so64780994-0
foo
2020-11-11 09:20:06.338  INFO 65759 --- [o64780994-0-C-1] o.a.k.clients.consumer.KafkaConsumer     : [Consumer clientId=consumer-so64780994-1, groupId=so64780994] Seeking to offset 2 for partition so64780994-0
foo
2020-11-11 09:20:06.843  INFO 65759 --- [o64780994-0-C-1] o.a.k.clients.consumer.KafkaConsumer     : [Consumer clientId=consumer-so64780994-1, groupId=so64780994] Seeking to offset 2 for partition so64780994-0
foo
2020-11-11 09:20:07.347  INFO 65759 --- [o64780994-0-C-1] o.a.k.clients.consumer.KafkaConsumer     : [Consumer clientId=consumer-so64780994-1, groupId=so64780994] Seeking to offset 2 for partition so64780994-0
foo
2020-11-11 09:20:07.856  INFO 65759 --- [o64780994-0-C-1] o.a.k.clients.consumer.KafkaConsumer     : [Consumer clientId=consumer-so64780994-1, groupId=so64780994] Seeking to offset 2 for partition so64780994-0
foo
2020-11-11 09:20:08.361  INFO 65759 --- [o64780994-0-C-1] o.a.k.clients.consumer.KafkaConsumer     : [Consumer clientId=consumer-so64780994-1, groupId=so64780994] Seeking to offset 2 for partition so64780994-0
foo
Retries exhausted:ConsumerRecord(topic = so64780994, partition = 0, leaderEpoch = 0, offset = 2, CreateTime = 1605104404708, serialized key size = -1, serialized value size = 3, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = foo)