Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/apache-kafka/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Apache kafka kafka通过时间戳、消费者循环获取记录_Apache Kafka_Kafka Consumer Api - Fatal编程技术网

Apache kafka kafka通过时间戳、消费者循环获取记录

Apache kafka kafka通过时间戳、消费者循环获取记录,apache-kafka,kafka-consumer-api,Apache Kafka,Kafka Consumer Api,我正在使用Kafka 0.10.2.1群集。我正在使用Kafka的offsetForTimes API来寻找特定的偏移量,并希望在到达结束时间戳时突破循环 我的代码如下: //package kafka.ex.test; import java.util.*; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.consumer.ConsumerRecords; i

我正在使用Kafka 0.10.2.1群集。我正在使用Kafka的offsetForTimes API来寻找特定的偏移量,并希望在到达结束时间戳时突破循环

我的代码如下:

//package kafka.ex.test;

import java.util.*;



import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;

public class ConsumerGroup {


   public static OffsetAndTimestamp fetchOffsetByTime( KafkaConsumer<Long, String> consumer , TopicPartition partition , long startTime){

      Map<TopicPartition, Long> query = new HashMap<>();
      query.put(
              partition,
              startTime);

      final Map<TopicPartition, OffsetAndTimestamp> offsetResult = consumer.offsetsForTimes(query);
      if( offsetResult == null || offsetResult.isEmpty() ) {
         System.out.println(" No Offset to Fetch ");
         System.out.println(" Offset Size "+offsetResult.size());



         return null;
      }
      final OffsetAndTimestamp offsetTimestamp = offsetResult.get(partition);
      if(offsetTimestamp == null ){
         System.out.println("No Offset Found for partition : "+partition.partition());
      }
      return offsetTimestamp;
   }

   public static KafkaConsumer<Long, String>  assignOffsetToConsumer( KafkaConsumer<Long, String> consumer, String topic , long startTime ){
      final List<PartitionInfo> partitionInfoList = consumer.partitionsFor(topic);
      System.out.println("Number of Partitions : "+partitionInfoList.size());
      final List<TopicPartition> topicPartitions = new ArrayList<>();
      for (PartitionInfo pInfo : partitionInfoList) {
         TopicPartition partition = new TopicPartition(topic, pInfo.partition());
         topicPartitions.add(partition);
      }
      consumer.assign(topicPartitions);
      for(TopicPartition partition : topicPartitions ){
         OffsetAndTimestamp offSetTs = fetchOffsetByTime(consumer, partition, startTime);


         if( offSetTs == null ){
            System.out.println("No Offset Found for partition : " + partition.partition());
            consumer.seekToEnd(Arrays.asList(partition));
         }else {
            System.out.println(" Offset Found for partition : " +offSetTs.offset()+" " +partition.partition());
            System.out.println("FETCH offset success"+
                    " Offset " + offSetTs.offset() +
                    " offSetTs " + offSetTs);
            consumer.seek(partition, offSetTs.offset());
         }
      }
      return consumer;
   }

   public static void main(String[] args) throws Exception {


      String topic = args[0].toString();
      String group = args[1].toString();

      long start_time_Stamp =  Long.parseLong( args[3].toString());
      String bootstrapServers = args[2].toString();
      long end_time_Stamp =  Long.parseLong( args[4].toString());
      Properties props = new Properties();
      boolean reachedEnd = false;

      props.put("bootstrap.servers", bootstrapServers);
      props.put("group.id", group);
      props.put("enable.auto.commit", "true");
      props.put("auto.commit.interval.ms", "1000");
      props.put("session.timeout.ms", "30000");
      props.put("key.deserializer",
         "org.apache.kafka.common.serialization.StringDeserializer");
      props.put("value.deserializer",
         "org.apache.kafka.common.serialization.StringDeserializer");

      KafkaConsumer<Long, String> consumer = new KafkaConsumer<Long, String>(props);
      assignOffsetToConsumer(consumer, topic, start_time_Stamp);


      System.out.println("Subscribed to topic " + topic);
      int i = 0;

      int arr[] = {0,0,0,0,0};
      while (true) {
         ConsumerRecords<Long, String> records = consumer.poll(6000);
         int count= 0;
         long lasttimestamp = 0;
         long lastOffset = 0;
            for (ConsumerRecord<Long, String> record : records) {

               count++;

               if(arr[record.partition()] == 0){
                  arr[record.partition()] =1;
               }


               if (record.timestamp() >= end_time_Stamp) {
                  reachedEnd = true;
                  break;
               }


               System.out.println("record=>"+" offset="
                       +record.offset()
                       + " timestamp="+record.timestamp()
                        + " :"+record);
               System.out.println("recordcount = "+count+" bitmap"+Arrays.toString(arr));

            }

         if (reachedEnd) break;
         if (records == null || records.isEmpty()) break; // dont wait for records
      }

   }



}
//包kafka.ex.test;
导入java.util.*;
导入org.apache.kafka.clients.consumer.KafkaConsumer;
导入org.apache.kafka.clients.consumer.ConsumerRecords;
导入org.apache.kafka.clients.consumer.ConsumerRecord;
导入org.apache.kafka.clients.consumer.OffsetAndTimestamp;
导入org.apache.kafka.common.PartitionInfo;
导入org.apache.kafka.common.TopicPartition;
公共类ConsumerGroup{
公共静态偏移量和时间戳fetchOffsetByTime(KafkaConsumer使用者、TopicPartition分区、long startTime){
Map query=newhashmap();
查询.输入(
隔断
开始时间);
最终映射offsetResult=消费者。offsetsForTimes(查询);
if(offsetResult==null | | offsetResult.isEmpty()){
System.out.println(“无需提取的偏移量”);
System.out.println(“偏移大小”+offsetResult.Size());
返回null;
}
最终OffsetAndTimestamp offsetTimestamp=offsetResult.get(分区);
if(offsetTimestamp==null){
System.out.println(“未找到分区:+partition.partition())的偏移量);
}
返回offsetTimestamp;
}
公共静态卡夫卡消费者分配OffsetOConsumer(卡夫卡消费者,字符串主题,长起始时间){
最终列表partitionInfo列表=消费者。用于(主题)的分区;
System.out.println(“分区数:+partitionInfo.size());
最终列表topicPartitions=new ArrayList();
for(PartitionInfo pInfo:PartitionInfo列表){
TopicPartition=新的TopicPartition(主题,pInfo.partition());
添加(分区);
}
consumer.assign(主题分区);
for(TopicPartition分区:topicPartitions){
OffsetAndTimestamp Offsets=fetchOffsetByTime(使用者、分区、开始时间);
if(offSetTs==null){
System.out.println(“未找到分区:+partition.partition())的偏移量);
consumer.seekToEnd(Arrays.asList(partition));
}否则{
System.out.println(“为分区找到偏移量:“+offSetTs.Offset()+”+partition.partition());
System.out.println(“获取偏移量成功”+
“偏移量”+偏移量。偏移量()+
“偏移量”+偏移量);
seek(分区,offsets.offset());
}
}
退货消费者;
}
公共静态void main(字符串[]args)引发异常{
字符串主题=args[0]。toString();
字符串组=args[1]。toString();
long start_time_Stamp=long.parseLong(args[3].toString());
字符串bootstrapServers=args[2]。toString();
long end_time_Stamp=long.parseLong(args[4].toString());
Properties props=新属性();
布尔值reachedEnd=false;
props.put(“bootstrap.servers”,bootstrapserver);
道具放置(“group.id”,group);
props.put(“enable.auto.commit”、“true”);
props.put(“auto.commit.interval.ms”,“1000”);
props.put(“session.timeout.ms”,“30000”);
props.put(“key.deserializer”,
“org.apache.kafka.common.serialization.StringDeserializer”);
props.put(“value.deserializer”,
“org.apache.kafka.common.serialization.StringDeserializer”);
卡夫卡消费者=新卡夫卡消费者(道具);
AssignOffsetOconsumer(消费者、主题、开始时间戳);
System.out.println(“订阅主题”+主题);
int i=0;
int arr[]={0,0,0,0};
while(true){
ConsumerRecords记录=consumer.poll(6000);
整数计数=0;
长lasttimestamp=0;
长时间偏移=0;
对于(消费者记录:记录){
计数++;
if(arr[record.partition()]==0){
arr[record.partition()]=1;
}
if(record.timestamp()>=结束时间戳){
reachedEnd=true;
打破
}
System.out.println(“记录=>”+“偏移量=”
+记录偏移量()
+“timestamp=“+record.timestamp()
+“:”+记录);
System.out.println(“recordcount=“+count+”位图“+Arrays.toString(arr));
}
如果(到达终点)断裂;
如果(records==null | | records.isEmpty())中断;//不要等待记录
}
}
}
我面临以下问题:

  • consumer.poll甚至失败1000毫秒。如果我使用1000毫秒,我必须在循环中轮询几次。我现在有一个非常大的价值。但是,既然已经找到了分区内的相关偏移量,那么如何可靠地设置轮询超时以便立即返回数据呢

  • 我的观察是,当返回数据时,它并不总是来自所有分区。即使从所有分区返回数据,也不会返回所有记录。主题中的记录数量超过1000条。但在循环中实际获取和打印的记录数量较少(~200)。我目前使用的卡夫卡API有什么问题吗

  • 如何可靠地跳出循环,在获得开始和结束时间戳之间的所有数据后,不过早地跳出循环

  • 每次轮询获取的记录数量取决于用户配置

  • 当一个分区到达endtimestamp时,您正在中断循环,这不是您想要的。在退出轮询循环之前,您应该检查所有分区是否都已结束

  • 轮询调用是一种异步调用,每个节点都有获取请求和响应,因此,根据代理响应的不同,您可以在轮询中获取所有响应,也可以不获取所有响应