Netty Apache Storm java.nio.channels.ClosedChannel异常:null

Netty Apache Storm java.nio.channels.ClosedChannel异常:null,netty,apache-kafka,apache-storm,Netty,Apache Kafka,Apache Storm,我们正在尝试使用ApacheStorm来处理大量(虚假)消息。 消息示例: "{"clientName":"Sergey Bakulin","sum":12925,"group":"property","suspicious":false,"clientId":2,"dt":1404387303764,"coord":{"lat":55.767842588357645,"lon":37.46920361823332}}". 我们使用ApacheKafka作为Storm集群的消息源。我们的目的是

我们正在尝试使用ApacheStorm来处理大量(虚假)消息。 消息示例:

"{"clientName":"Sergey Bakulin","sum":12925,"group":"property","suspicious":false,"clientId":2,"dt":1404387303764,"coord":{"lat":55.767842588357645,"lon":37.46920361823332}}".
我们使用ApacheKafka作为Storm集群的消息源。我们的目的是能够处理至少50k msg/秒/节点。当我们使用多个节点时,经常会遇到错误(日志片段来自worker-*.log):

我们目前的风暴形态:

########### These MUST be filled in for a storm configuration
storm.zookeeper.servers:
  - "172.31.*.*"

storm.local.dir: "/home/*/storm/data"
nimbus.host: "127.0.0.1"
supervisor.slots.ports:
  - 6701
  - 6702

ui.port: 8090

worker.childopts: "-Xmx6g -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=1%ID% -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun$

supervisor.childopts: "-Xmx1024m -Djava.net.preferIPv4Stack=true"
supervisor.worker.start.timeout.secs: 10
supervisor.worker.timeout.secs: 10
supervisor.monitor.frequency.secs: 3
supervisor.heartbeat.frequency.secs: 5
supervisor.enable: true

storm.messaging.netty.server_worker_threads: 2
storm.messaging.netty.client_worker_threads: 2
storm.messaging.netty.buffer_size: 5242880
storm.messaging.netty.max_retries: 25
storm.messaging.netty.max_wait_ms: 1000
我们的风暴拓扑:

Properties conf = Util.readProperties(ClientTopology.class, "storm.properties");

prepareRedisDB(conf);

TopologyBuilder builder = new TopologyBuilder();

builder.setSpout("kafka_trans_spout", getKafkaSpout(conf, conf.getProperty("kafka_trans_topic")), 3);
builder.setSpout("kafka_socevent_spout", getKafkaSpout(conf, conf.getProperty("kafka_socevent_topic")), 3);

builder.setBolt("json_to_tuple_trans_bolt", new JSONToTupleBolt(Transaction.class), 6)
        .shuffleGrouping("kafka_trans_spout");
builder.setBolt("json_to_tuple_socevent_bolt", new JSONToTupleBolt(SocialEvent.class), 3)
        .shuffleGrouping("kafka_socevent_spout");

builder.setBolt("alert_bolt", new AlertBolt(conf), 3)
        .fieldsGrouping("json_to_tuple_trans_bolt", new Fields("cl_id"))
        .fieldsGrouping("json_to_tuple_socevent_bolt", new Fields("cl_id"));
builder.setBolt("offer_bolt", new NearestOfferBolt(conf), 3)
        .shuffleGrouping("json_to_tuple_trans_bolt");

run(builder, args, 6);

private static KafkaSpout getKafkaSpout(Properties conf, String topic) {
    SpoutConfig spoutConfig = new SpoutConfig(
            new ZkHosts(conf.getProperty("zk_host"), "/brokers"),
            topic,
            "/brokers",
            conf.getProperty("kafka_consumer_group_id"));
    List<String> zkServers = new ArrayList<String>();
    zkServers.add(conf.getProperty("zk_host"));
    spoutConfig.zkServers = zkServers;
    spoutConfig.zkPort = Integer.valueOf(conf.getProperty("zk_port"));
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    spoutConfig.forceFromStart = true;
    spoutConfig.fetchSizeBytes = 5*1024*1024;
    spoutConfig.bufferSizeBytes = 5*1024*1024;
    storm.kafka.KafkaSpout kafkaSpout = new storm.kafka.KafkaSpout(spoutConfig);
    return kafkaSpout;
}
Properties conf=Util.readProperties(ClientTopology.class,“storm.Properties”);
预备数据库(conf);
TopologyBuilder=新TopologyBuilder();
builder.setspoot(“卡夫卡转换喷嘴”,getKafkaSpout(conf,conf.getProperty(“卡夫卡转换主题”)),3);
builder.setspoot(“卡夫卡事件”,getKafkaSpout(conf,conf.getProperty(“卡夫卡事件”),3);
setBolt(“json-to-tuple-trans-bolt”,新的JSONTupleBolt(Transaction.class),6)
.洗牌组合(“卡夫卡跨口”);
setBolt(“json\u to\u tuple\u socevent\u bolt”,新的JSONToTupleBolt(SocialEvent.class),3)
.shuffleGrouping(“卡夫卡音乐节”);
builder.setBolt(“警报螺栓”,新警报螺栓(形态),3)
.Fields分组(“json-to-tuple-trans-bolt”,新字段(“cl-id”))
.Fields分组(“json到元组”,新字段(“cl-id”);
承插螺栓(“提供螺栓”,新的NearestOfferBolt(形态),3)
.shuffleGrouping(“json到tuple到trans-bolt”);
运行(生成器,args,6);
私有静态KafkaSpout getKafkaSpout(属性配置,字符串主题){
喷动配置喷动配置=新喷动配置(
新的ZkHosts(conf.getProperty(“zk_host”),“/brokers”),
话题,,
“/经纪人”,
conf.getProperty(“卡夫卡消费者组id”);
List zkServers=newarraylist();
添加(conf.getProperty(“zk_主机”);
spootconfig.zkServers=zkServers;
spoutConfig.zkPort=Integer.valueOf(conf.getProperty(“zk_-port”);
spoutConfig.scheme=new SchemeAsMultiScheme(new StringScheme());
spoutConfig.forceFromStart=true;
spoutConfig.fetchSizeBytes=5*1024*1024;
spoutConfig.bufferSizeBytes=5*1024*1024;
storm.kafka.KafkaSpout KafkaSpout=新storm.kafka.KafkaSpout(spoutConfig);
返回卡夫卡;
}
我们使用AWS c3.2x大型机器, Apache Storm 0.9.2-孵化,
ApacheKafka 2.9.2-0.8.1.1.

测试Ping和Telnet: 确保运行storm的每台机器都通过ping连接到所有其他机器(所有工人、nimbus和zookeeper)。 尝试通过IP、主机名和FQDN ping,如果不起作用,请编辑主机 (/etc/hosts)文件

此外,通过远程登录机器,检查storm.yaml(67016702)中打开的端口。 动物园管理员(2181)

在我测试的环境中,storm.yaml设置与以下netty设置一起使用:

storm.messaging.netty.buffer_size: 5242880
storm.messaging.netty.client_worker_threads: 1
storm.messaging.netty.max_retries: 100
storm.messaging.netty.max_wait_ms: 1000
storm.messaging.netty.min_wait_ms: 100
storm.messaging.netty.server_worker_threads: 1
storm.messaging.transport: backtype.storm.messaging.netty.Context

试着先添加负载,然后运行拓扑,这种情况在我身上发生过好几次,因为主题是新的,负载是不存在的

您可以在172.31.23.123:6701上检查您是否确实有监听服务。请在这台机器上尝试netstat-antp | grep 6701。您有解决方案吗?我现在也遇到了同样的错误。异常看起来好像无法从外部世界访问主管的端口。查看此链接:您是否尝试过将telnet连接到该主机端口?
storm.messaging.netty.buffer_size: 5242880
storm.messaging.netty.client_worker_threads: 1
storm.messaging.netty.max_retries: 100
storm.messaging.netty.max_wait_ms: 1000
storm.messaging.netty.min_wait_ms: 100
storm.messaging.netty.server_worker_threads: 1
storm.messaging.transport: backtype.storm.messaging.netty.Context