Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/ionic-framework/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Apache kafka 卡夫卡消费群体Id和消费者再平衡问题_Apache Kafka_Kafka Consumer Api_Apache Kafka Connect - Fatal编程技术网

Apache kafka 卡夫卡消费群体Id和消费者再平衡问题

Apache kafka 卡夫卡消费群体Id和消费者再平衡问题,apache-kafka,kafka-consumer-api,apache-kafka-connect,Apache Kafka,Kafka Consumer Api,Apache Kafka Connect,我在我的生产服务器中使用Kafka0.10.0和zookeeper3.4.6。我有20个主题,每个主题大约有50个分区。我总共有100个用户订阅了不同的主题和分区。所有用户都有相同的groupId。那么,如果为特定主题添加或删除消费者,那么附加到不同主题的消费者也将经历重新平衡,这种情况会发生吗 我的消费者代码是: public static void main(String[] args) { String groupId = "prod" String top

我在我的生产服务器中使用Kafka
0.10.0
和zookeeper
3.4.6
。我有20个主题,每个主题大约有50个分区。我总共有100个用户订阅了不同的主题和分区。所有用户都有相同的groupId。那么,如果为特定主题添加或删除消费者,那么附加到不同主题的消费者也将经历重新平衡,这种情况会发生吗

我的消费者代码是:

public static void main(String[] args) {
        String groupId = "prod"
        String topicRegex = args[0]
        String consumerTimeOut = "10000"
        int n_threads = 1
        if (args && args.size() > 1) {
            ConfigLoader.init(args[1])
        }
        else {
            ConfigLoader.init('development')
        }
        if(args && args.size() > 2 && args[2].isInteger()){
            n_threads = (args[2]).toInteger()
        }

        ExecutorService executor = Executors.newFixedThreadPool(n_threads)
        addShutdownHook(executor)
        String zooKeeper = ConfigLoader.conf.zookeeper.hostName
        List<Runnable> taskList = []
        for(int i = 0; i < n_threads; i++){
            KafkaConsumer example = new KafkaConsumer(zooKeeper, groupId, topicRegex, consumerTimeOut)
            taskList.add(example)
        }
        taskList.each{ task ->
            executor.submit(task)
        }
        executor.shutdown()
        executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS)
    }

private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId, String consumerTimeOut) {

        Properties props = new Properties()
        props.put("zookeeper.connect", a_zookeeper)
        props.put("group.id", a_groupId)
        props.put("zookeeper.session.timeout.ms", "10000")
        props.put("rebalance.backoff.ms","10000")
        props.put("zookeeper.sync.time.ms","200")
        props.put("rebalance.max.retries","10")
        props.put("enable.auto.commit", "false")
        props.put("consumer.timeout.ms", consumerTimeOut)
        props.put("auto.offset.reset", "smallest")
        return new ConsumerConfig(props)

    }

public void run(String topicRegex) {
        String threadName = Thread.currentThread().getName()
        logger.info("{} [{}] main Starting", TAG, threadName)
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>()
        List<KafkaStream<byte[], byte[]>> streams = consumer.createMessageStreamsByFilter(new Whitelist(topicRegex),1)
        ConsumerConnector consumerConnector = consumer

        for (final KafkaStream stream : streams) {
            ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator()
            List<Object> batchTypeObjList = []
            String topic
            String topicObjectType
            String method
            String className
            String deserialzer
            Integer batchSize = 200
            while (true){
                boolean hasNext = false
                try {
                    hasNext = consumerIterator.hasNext()
                } catch (InterruptedException interruptedException) {
                    //if (exception instanceof InterruptedException) {
                    logger.error("{} [{}]Interrupted Exception: {}", TAG, threadName, interruptedException.getMessage())
                    throw interruptedException
                    //} else {
                } catch(ConsumerTimeoutException timeoutException){
                    logger.error("{} [{}] Timeout Exception: {}", TAG, threadName, timeoutException.getMessage())
                    topicListMap.each{ eachTopic, value ->
                        batchTypeObjList = topicListMap.get(eachTopic)
                        if(batchTypeObjList != null && !batchTypeObjList.isEmpty()) {
                            def dbObject = topicConfigMap.get(eachTopic)
                            logger.debug("{} [{}] Timeout Happened.. Indexing remaining objects in list for topic: {}", TAG, threadName, eachTopic)
                            className = dbObject.get(KafkaTopicConfigEntity.CLASS_NAME_KEY)
                            method = dbObject.get(KafkaTopicConfigEntity.METHOD_NAME_KEY)
                            int sleepTime = 0
                            if(dbObject.get(KafkaTopicConfigEntity.CONUSMER_SLEEP_IN_MS) != null)
                                sleepTime = dbObject.get(KafkaTopicConfigEntity.CONUSMER_SLEEP_IN_MS)?.toInteger()
                            executeMethod(className, method, batchTypeObjList)
                            batchTypeObjList.clear()
                            topicListMap.put(eachTopic,batchTypeObjList)
                            sleep(sleepTime)
                        }
                    }
                    consumer.commitOffsets()
                    continue
                } catch(Exception exception){
                    logger.error("{} [{}]Exception: {}", TAG, threadName, exception.getMessage())
                    throw exception
                }
                if(hasNext) {
                    def consumerObj = consumerIterator.next()
                    logger.debug("{} [{}] partition name: {}", TAG, threadName, consumerObj.partition())
                    topic = consumerObj.topic()
                    DBObject dbObject = topicConfigMap.get(topic)
                    logger.debug("{} [{}] topic name: {}", TAG, threadName, topic)
                    topicObjectType = dbObject.get(KafkaTopicConfigEntity.TOPIC_OBJECT_TYPE_KEY)
                    deserialzer = KafkaConfig.DEFAULT_DESERIALIZER
                    if(KafkaConfig.DESERIALIZER_MAP.containsKey(topicObjectType)){
                        deserialzer = KafkaConfig.DESERIALIZER_MAP.get(topicObjectType)
                    }
                    className = dbObject.get(KafkaTopicConfigEntity.CLASS_NAME_KEY)
                    method = dbObject.get(KafkaTopicConfigEntity.METHOD_NAME_KEY)
                    boolean isBatchJob = dbObject.get(KafkaTopicConfigEntity.IS_BATCH_JOB_KEY)
                    if(dbObject.get(KafkaTopicConfigEntity.BATCH_SIZE_KEY) != null)
                        batchSize = dbObject.get(KafkaTopicConfigEntity.BATCH_SIZE_KEY)
                    else
                        batchSize = 1
                    Object queueObj = (Class.forName(deserialzer)).deserialize(consumerObj.message())
                    int sleepTime = 0
                    if(dbObject.get(KafkaTopicConfigEntity.CONUSMER_SLEEP_IN_MS) != null)
                        sleepTime = dbObject.get(KafkaTopicConfigEntity.CONUSMER_SLEEP_IN_MS)?.toInteger()
                    if(isBatchJob == true){
                        batchTypeObjList = topicListMap.get(topic)
                        batchTypeObjList.add(queueObj)
                        if(batchTypeObjList.size() == batchSize) {
                            executeMethod(className, method, batchTypeObjList)
                            batchTypeObjList.clear()
                            sleep(sleepTime)
                        }
                        topicListMap.put(topic,batchTypeObjList)
                    } else {
                        executeMethod(className, method, queueObj)
                        sleep(sleepTime)
                    }
                    consumer.commitOffsets()
                }
            }
            logger.debug("{} [{}] Shutting Down Process ", TAG, threadName)
        }
    }
publicstaticvoidmain(字符串[]args){
String groupId=“prod”
字符串topicRegex=args[0]
字符串consumerTimeOut=“10000”
int n_线程=1
if(args&&args.size()>1){
ConfigLoader.init(参数[1])
}
否则{
ConfigLoader.init('development')
}
if(args&&args.size()>2&&args[2].isInteger()){
n_threads=(args[2]).toInteger()
}
ExecutorService executor=Executors.newFixedThreadPool(n个线程)
addShutdownHook(执行器)
字符串zooKeeper=ConfigLoader.conf.zooKeeper.hostName
列表任务列表=[]
对于(int i=0;i
执行者提交(任务)
}
执行器关闭()
执行器等待终止(长最大值,时间单位,秒)
}
私有静态ConsumerConfig CreateCummerConfig(字符串a\u zookeeper、字符串a\u groupId、字符串consumerTimeOut){
Properties props=新属性()
道具。放置(“zookeeper.connect”,a_zookeeper)
props.put(“group.id”,a_groupId)
道具放置(“zookeeper.session.timeout.ms”,“10000”)
道具。放置(“再平衡。后退。ms”,“10000”)
道具放置(“zookeeper.sync.time.ms”,“200”)
道具放置(“再平衡,最大重试次数”,“10”)
props.put(“enable.auto.commit”、“false”)
props.put(“consumer.timeout.ms”,consumerTimeOut)
道具放置(“自动偏移重置”,“最小”)
返回新的消费者配置(道具)
}
公共无效运行(字符串topicRegex){
字符串threadName=Thread.currentThread().getName()
logger.info(“{}[{}]main start”,标记,threadName)
Map topicCountMap=newhashmap()
List streams=consumer.createMessageStreamsByFilter(新白名单(topicRegex),1)
ConsumerConnector ConsumerConnector=消费者
对于(最终卡夫卡斯特雷姆流:流){
ConsumerIterator ConsumerIterator=stream.iterator()
列表batchTypeObjList=[]
字符串主题
字符串TopicObject类型
字符串方法
字符串类名
字符串反序列化器
整数batchSize=200
while(true){
布尔hasNext=false
试一试{
hasNext=consumerIterator.hasNext()
}捕获(中断异常中断异常){
//if(中断异常的异常实例){
logger.error(“{}[{}]中断异常:{}”,标记,threadName,interruptedException.getMessage())
抛出中断异常
//}否则{
}捕获(ConsumerTimeoutException timeoutException){
logger.error(“{}[{}]超时异常:{}”,标记,threadName,timeoutException.getMessage()
topicListMap.each{eachTopic,值->
batchTypeObjList=topicListMap.get(eachTopic)
if(batchTypeObjList!=null&!batchTypeObjList.isEmpty()){
def dbObject=topicConfigMap.get(eachTopic)
debug(“{}[{}]超时发生..为列表中的剩余对象编制索引,主题:{}”,标记,线程名,eachTopic)
className=dbObject.get(KafkaTopicConfigEntity.CLASS\u NAME\u KEY)
method=dbObject.get(KafkaTopicConfigEntity.method\u NAME\u KEY)
int sleepTime=0
if(dbObject.get(KafkaTopicConfigEntity.CONUSMER_SLEEP_IN_MS)!=null)
sleepTime=dbObject.get(KafkaTopicConfigEntity.CONUSMER_SLEEP_IN_MS)?.toInteger()
executeMethod(类名、方法、batchTypeObjList)
batchTypeObjList.clear()
topicListMap.put(eachTopic,batchTypeObjList)
睡眠(睡眠时间)
}
}
consumer.commitofsets()
持续
}捕获(异常){
logger.error(“{}[{}]异常:{}”,标记,threadName,Exception.getMessage()
抛出异常
}
如果(下一步){
def consumerObj=consumerIterator.next()
logger.debug(“{}[{}]分区名称:{}”,标记,threadName,consumerObj.partition()
topic=consumerObj.topic()
DBObject DBObject=topicConfigMap.get(主题)
debug(“{}[{}]主题名:{}”,标记,线程名,主题)
topicObjectType=dbObject.get(KafkaTopicConfigEntity.TOPIC\u OBJECT\u TYPE\u KEY)
反序列化程序=kafkanconfig.DEFAULT\u反序列化程序
if(kafkanconfig.反序列化器_MAP.containsKey(TopicObject类型)){
反序列化器=kafkanconfig.DESERIALIZER\u MAP.get(TopicObject类型)
}
className=dbObject.get(KafkaTopicConfigEntity.CLASS\u NAME\u KEY)
method=dbObject.get(KafkaTopicConfigEntity.method\u NAME\u KEY)
布尔值isBatchJob=dbObject。
Subscribing consumer1 to topic test1
Starting thread for consumer1
Polling consumer1
consumer1 got 0 partitions revoked!
consumer1 got 2 partitions assigned!
Polling consumer1
Polling consumer1
Polling consumer1
Subscribing consumer2 to topic test2
Starting thread for consumer2
Polling consumer2
Polling consumer1
consumer2 got 0 partitions revoked!
Polling consumer1
Polling consumer1
consumer1 got 2 partitions revoked!
consumer2 got 9 partitions assigned!
consumer1 got 2 partitions assigned!
Polling consumer2
Polling consumer1
Polling consumer2
Polling consumer1
Polling consumer2