Netty NioEventLoopGroup导致Vertx 3.x中的OutOfmemory

Netty NioEventLoopGroup导致Vertx 3.x中的OutOfmemory,netty,vert.x,Netty,Vert.x,我在我的应用程序中使用Vertx 3.6.3和Kafka,我将我的verticle部署到集群中,但我的应用程序经常崩溃,我分析堆转储,并得到如附件中所示的错误 创建了许多Netty NioEventLoopGroup对象。这是vertx的bug还是我的代码的bug?谁能解释一下Vertx是如何使用Netty的,以及为什么会发生这种错误 更新: 我在我的项目中分享了一些源代码的细节,如下所示 public class Application { private JsonObject co

我在我的应用程序中使用Vertx 3.6.3和Kafka,我将我的verticle部署到集群中,但我的应用程序经常崩溃,我分析堆转储,并得到如附件中所示的错误

创建了许多Netty NioEventLoopGroup对象。这是vertx的bug还是我的代码的bug?谁能解释一下Vertx是如何使用Netty的,以及为什么会发生这种错误

更新:

我在我的项目中分享了一些源代码的细节,如下所示

public class Application {

    private JsonObject config;

    public Application() {
    }

    // Getter, setter

}


public class BaseVerticle extends AbstractVerticle {

    private static final Logger LOGGER = LogManager.getLogger(BaseVerticle.class);

    /**
     * Load config from properties.
     *
     * @return
     */
    protected Future<Application> loadConfig(Application application) {
        Future future = Future.future();

        ConfigStoreOptions file = new ConfigStoreOptions()
        .setType("file")
        .setFormat("properties")
        .setConfig(new JsonObject().put("path", "application.properties"));

        ConfigStoreOptions env = new ConfigStoreOptions().setType("env");
        ConfigStoreOptions sys = new ConfigStoreOptions().setType("sys");

        ConfigRetrieverOptions options = new ConfigRetrieverOptions()
            .addStore(file).addStore(env).addStore(sys);

        ConfigRetriever retriever = ConfigRetriever.create(vertx, options);
        retriever.getConfig(json -> {
            if (json.failed()) {
                LOGGER.error("Failed to load configuration. Reason: " + json.cause().getMessage());
                // Failed to retrieve the configuration
                json.cause().printStackTrace();
                future.fail(json.cause());
            } else {
                LOGGER.info("Load configuration success.");
                JsonObject config = json.result();
                future.complete(application.setConfig(config));
            }
        });

        return future;
    }
}

public class MainVerticle extends BaseVerticle {

  private static final Logger LOGGER = LogManager.getLogger(MainVerticle.class);

  @Override
  public void start(Future<Void> startFuture) throws Exception {
    doStart(startFuture);
  }

  private void doStart(Future<Void> startFuture) {
        vertx.exceptionHandler(event -> LOGGER.error( " throws exception: {}", event.getMessage(), event));
        LOGGER.info("vertx.isClustered() = {}", vertx.isClustered());
        Application application = new Application();

        loadConfig(application)
        .compose(this::deployProcessingVerticle)
        .setHandler(r -> {
            if(r.succeeded()) {
                LOGGER.info("Deploy {} success.", getClass().getSimpleName());
                startFuture.complete();
            } else {
                LOGGER.info("Deploy {} failed.", getClass().getSimpleName());
                startFuture.fail(r.cause());
            }
        });

    }

    private Future<Application> deployProcessingVerticle(Application application) {
        Future<Application> future = Future.future();

        JsonObject configuration = application.getConfig();

        int WORKER_POOL_SIZE = configuration.getInteger("http.workerPoolSize");
        DeploymentOptions opts = new DeploymentOptions()
                .setHa(true)
                .setWorker(true)
                .setInstances(1)
                .setWorkerPoolSize(WORKER_POOL_SIZE)
                .setWorkerPoolName("processing")
                .setConfig(configuration);

        vertx.deployVerticle(ProcessingVerticle.class, opts, res -> {
            if (res.failed()) {
                future.fail(res.cause());
                LOGGER.error("Deploy ProcessingVerticle failed. Reason: {}", res.cause().getMessage(), res.cause());
            } else {
                future.complete(application);
                LOGGER.info("Deploy ProcessingVerticle success.");
            }
        });

        return future;
    }

    public static void main(String[] args) {
        Vertx.clusteredVertx(new VertxOptions().setHAEnabled(true), 
                vertx -> vertx.result().deployVerticle(MainVerticle.class.getName(), new DeploymentOptions().setHa(true))
    );
  }
}

public class ProcessingVerticle extends AbstractVerticle {

    private static final Logger LOGGER = LogManager.getLogger(ProcessingVerticle.class);

    private KafkaHandler kafkaHandler;

    @Override
    public void start(Future<Void> startFuture) throws Exception {
        super.start(startFuture);
        kafkaHandler = new KafkaHandler(vertx, config(), startFuture);
    }

}

public class KafkaHandler{

    private static final Logger logger = LogManager.getLogger(KafkaHandler.class);

    private KafkaWriteStream<String, JsonObject> producer;
    private KafkaReadStream<String, JsonObject> consumer;
    private Vertx vertx;
    private JsonObject config;
    private Function<JsonObject, Void> processMessage1;
    private Function<JsonObject, Void> processMessage2;

    private String topic1;
    private String topic2;

    public KafkaHandler(Vertx vertx, JsonObject config, Future<Void> startFuture){
        this.vertx = vertx;
        this.config = config;
        initTopics(config);
        startKafka(startFuture);
    }

    private void startKafka(Future<Void> startFuture) {
        createProducer();
        createConsumer();
    }

    private void createProducer() {
        Properties config = new Properties();
        String server = this.config.getString("kafka.servers", "localhost:9092");
        config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
        config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonObjectSerializer.class);
        config.put(ProducerConfig.LINGER_MS_CONFIG, 100);
        config.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, this.config.getString("kafka.request.timeout", "30000"));
        config.put(ProducerConfig.ACKS_CONFIG, "1");
        producer = KafkaWriteStream.create(vertx, config, String.class, JsonObject.class);
    }

    private void initTopics(JsonObject config) {
        topic1 = this.config.getString(...);
        topic2 = this.config.getString(...);
    }

    public void publishMessage(JsonObject message, String topic){
        producer.write(new ProducerRecord<>(topic, message), ar -> {
            if (ar.failed()){
                logger.error(ar.cause());
            }
        });
    }

    private void createConsumer() {
        Properties config = new Properties();
        String server = this.config.getString("kafka.servers", "localhost:9092");
        config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
        config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, this.config.getString("kafka.offset.reset", "latest"));
        config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        config.put(ConsumerConfig.GROUP_ID_CONFIG, this.config.getString("kafka.group.id"));
        consumer = KafkaReadStream.create(vertx, config, String.class, JsonObject.class);
    }

    private void processRecord(ConsumerRecord<String, JsonObject> record) {
        logger.info("Topic {} - Receive Message: {}", record.topic(), record.value().toString());

        if(record.topic().contains(topic1)){
            processMessage1.apply(record.value());
        }
        if(record.topic().contains(topic2)){
            processMessage2.apply(record.value());
        }
    }

    public void consumerSubscribe(List<Integer> coins){
        String[] arr = {topic1, topic2};
        String env = config.getString("env", "dev");
        List<String> listTopics = new ArrayList<>();
        for (String name : arr) {
            listTopics.add(name);
        }

        Set<String> topics = new HashSet<>(listTopics);
        consumer.subscribe(topics, ar -> {
            if (ar.succeeded()) {
                logger.info("Consumer subscribed");
                vertx.setPeriodic(1000, timerId -> {
                  consumer.poll(100, records -> {
                    if (records.succeeded()) {
                        records.result().forEach(record -> {
                            processRecord(record);
                        });
                    }
                  });
                });
            } else {
                logger.error(ar.cause());
            }
        });
    }

    @AfterClass
    public void stopKafka(){
        if (producer != null) {
            producer.close();
        }
        if (consumer != null) {
            consumer.close();
        }
    }


    // Getter, Setter
}
公共类应用程序{
私有JsonObject配置;
公共应用程序(){
}
//盖特,塞特
}
公共类BaseVerticle扩展了AbstractVerticle{
私有静态最终记录器Logger=LogManager.getLogger(BaseVerticle.class);
/**
*从属性加载配置。
*
*@返回
*/
受保护的未来加载配置(应用程序){
Future=Future.Future();
ConfigStoreOptions文件=新的ConfigStoreOptions()
.setType(“文件”)
.setFormat(“属性”)
.setConfig(新的JsonObject().put(“path”、“application.properties”);
ConfigStoreOptions env=新的ConfigStoreOptions().setType(“env”);
ConfigStoreOptions sys=新的ConfigStoreOptions().setType(“sys”);
ConfigRetrieverOptions=new ConfigRetrieverOptions()
.addStore(文件).addStore(环境).addStore(系统);
ConfigRetriever retriever=ConfigRetriever.create(vertx,选项);
retriever.getConfig(json->{
if(json.failed()){
LOGGER.error(“未能加载配置。原因:+json.cause().getMessage());
//检索配置失败
json.cause().printStackTrace();
future.fail(json.cause());
}否则{
LOGGER.info(“加载配置成功”);
JsonObject config=json.result();
future.complete(application.setConfig(config));
}
});
回归未来;
}
}
公共类MainVerticle扩展了BaseVerticle{
私有静态最终记录器Logger=LogManager.getLogger(MainVerticle.class);
@凌驾
public void start(Future startFuture)引发异常{
doStart(开始未来);
}
私有void doStart(未来开始未来){
exceptionHandler(event->LOGGER.error(“抛出异常:{}”,event.getMessage(),event));
info(“vertx.isClustered()={}”,vertx.isClustered());
应用程序=新应用程序();
loadConfig(应用程序)
.compose(此::deployProcessingVerticle)
.setHandler(r->{
if(r.successed()){
info(“Deploy{}success.”,getClass().getSimpleName());
startFuture.complete();
}否则{
info(“部署{}失败。”,getClass().getSimpleName());
startFuture.fail(r.cause());
}
});
}
私有未来部署ProcessingVerticle(应用程序){
Future=Future.Future();
JsonObject配置=application.getConfig();
int WORKER_POOL_SIZE=configuration.getInteger(“http.workerPoolSize”);
DeploymentOptions opts=新的DeploymentOptions()
.setHa(对)
.setWorker(真)
.setInstances(1)
.setWorkerPoolSize(工作池大小)
.setWorkerPoolName(“处理”)
.setConfig(配置);
vertx.deployVerticle(ProcessingVerticle.class,opts,res->{
if(res.failed()){
未来失败(res.cause());
LOGGER.error(“部署ProcessingVerticle失败。原因:{}”,res.cause().getMessage(),res.cause());
}否则{
未来。完成(申请);
info(“部署ProcessingVerticle成功”);
}
});
回归未来;
}
公共静态void main(字符串[]args){
Vertx.clusteredVertx(新的VertxOptions().setHAEnabled(true),
vertx->vertx.result().deployVerticle(MainVerticle.class.getName(),new DeploymentOptions().setHa(true))
);
}
}
公共类ProcessingVerticle扩展了AbstractVerticle{
私有静态最终记录器Logger=LogManager.getLogger(ProcessingVerticle.class);
私人卡夫卡汉德勒卡夫卡汉德勒;
@凌驾
public void start(Future startFuture)引发异常{
超级启动(开始未来);
kafkaHandler=新的kafkaHandler(vertx,config(),startFuture);
}
}
公共级卡夫卡汉德勒{
私有静态最终记录器Logger=LogManager.getLogger(KafkaHandler.class);
卡夫卡夫私人电影制作人;
私人卡夫卡区域流消费者;
私有Vertx-Vertx;
私有JsonObject配置;
私有函数processMessage1;
私有函数processMessage2;
私有字符串主题1;
私有字符串主题2;
公共KafkaHandler(Vertx Vertx、JsonObject配置、未来开始未来){
this.vertx=vertx;
this.config=config;
初始主题(配置);
startKafka(startFuture);
}
私有无效开始卡夫卡(未来开始未来){
createProducer();
createConsumer();
}
私有void createProducer(){
属性配置=新属性();
stringserver=this.config.getString(“kafka.servers”,“localhost:9092”);
config.put(ProducerConfig.BOOTSTRAP\u SERVERS\u config,server);
config.put(ProducerConfig.KEY\u SERIALIZER\u CLASS\u config,StringSerializer.CLASS);
put(ProducerConfig.VALUE\u SERIALIZER\u CLASS\u config,JsonObjectSerializer.CLASS);
config.put(ProducerConfig.LINGER\u MS\u config,100);
config.put(ProducerConfig.REQUEST\u TIMEOUT\u MS\u config,this.config.getString(“kafka.REQUEST.TIMEOUT”,“30000”);
config.put(ProducerConfig.ACKS_config,“1”);
制作人=KafkaWriteStream.cr