如何使用kafka客户端使用java语言从主题中消费

如何使用kafka客户端使用java语言从主题中消费,java,apache-kafka,Java,Apache Kafka,我正在尝试使用我知道会产生消息的主题中的消息(kafka producer已经设计好了,我所要做的就是编写一个使用消息的代码) 这是我的代码: public class ConsumerManager extends ShutdownableThread { // private final KafkaConsumer<String, String> kafkaConsumer; // static String brokers = Configuration.

我正在尝试使用我知道会产生消息的主题中的消息(kafka producer已经设计好了,我所要做的就是编写一个使用消息的代码)

这是我的代码:

public class ConsumerManager extends ShutdownableThread {

    //    private final KafkaConsumer<String, String> kafkaConsumer;
//    static String brokers = Configuration.prop.getProperty("suite.pusher.defaultParams.kafkaBrokers");
    static String brokers = "<kafka_server_ip>:9092";
    static private Producer<String, String> m_kafkaProducers;
    static private int m_CurrentProducerIndex;
    static private int maxKafkaMessageSizeInMb = 8;
    static private String kafkaCompressionCodec = "lz4";
    static private String maxKafkaAcks = "1";
    static private int maxKafkaRetries;
    static private int kafkaBatchSize = 16384;
    static private int kafkaBufferMemory = 33554432;
    static private int kafkaConnectionsMaxIdleMs = 540000;
    static private int kafkaLingerMs = 0;
    static private int kafkaMaxBlocks = 100;
    static private int kafkaMaxRequestSize = 1048576;
    static private int kafkaRequestTimeoutMs = 300000;
    static private int kafkaMaxInFlightRequestsPerConnection = 5;
    static private int kafkaBatchNumMessages = 10000;
    static private int kafkeQueueBufferingMaxMs = 10;
    static private int kafkaFetchWaitMaxMs = 10;
    static private int kafkaSocketBlockingMaxMs = 10;
    static private boolean kafkaSocketNagleDisable = false;
    String kafkaPrefix = Configuration.prop.getProperty("suite.pusher.defaultParams.kafkaTopicPrefix");

    private final Boolean isAsync = false;
    static String TOPIC = "proc_422_26952";//"proc_69686_61";//" "qa_69313_2";
    private final KafkaConsumer<String, String> consumer;
    private final String topics;

    public ConsumerManager(String topic) {
        super("KafkaConsumerExample", false);
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "CageConsumerGroup");
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        consumer = new KafkaConsumer<String, String>(props);
        this.topics = topic;

    }
    @Override
    public void doWork() {
        consumer.subscribe(Collections.singletonList(this.topics));
        ConsumerRecords<String, String> records = consumer.poll(10000);
        for (ConsumerRecord<String, String> record : records) {
            System.out.println("Received message: (" + record.key() + ", " + record.value() + ") at offset " + record.offset());
        }
    }

    @Override
    public String name() {
        return null;
    }

    @Override
    public boolean isInterruptible() {
        return false;
    }
}
公共类ConsumerManager扩展了ShutdownableThread{
//私人最终卡夫卡消费者卡夫卡消费者;
//静态字符串代理=Configuration.prop.getProperty(“suite.pusher.defaultParams.kafkaBrokers”);
静态字符串代理=“:9092”;
静态私人制片人m_kafkaProducers;
静态专用int m_CurrentProducerIndex;
静态私有int maxKafkaMessageSizeInMb=8;
静态私有字符串kafkacompressiondec=“lz4”;
静态私有字符串maxKafkaAcks=“1”;
静态私有int-maxKafkaRetries;
卡夫卡巴奇的静态私有int=16384;
卡夫卡布尔默里的静态私有值=33554432;
静态私有int-kafkaConnectionsMaxIdleMs=540000;
静态私有int-kafkaLingerMs=0;
静态私有int-kafkaMaxBlocks=100;
静态私有int-kafkaMaxRequestSize=1048576;
静态私有int kafkaRequestTimeoutMs=300000;
静态私有int-kafkaMaxInFlightRequestsPerConnection=5;
静态私有int-kafkaBatchNumMessages=10000;
静态私有int-kafqueuebufferingmaxms=10;
静态私有int-kafkaFetchWaitMaxMs=10;
静态私有int-kafkascocketblockingmaxms=10;
静态私有布尔值kafkascocketnagledisable=false;
字符串kafkaPrefix=Configuration.prop.getProperty(“suite.pusher.defaultParams.kafkaTopicPrefix”);
私有最终布尔值isAsync=false;
静态字符串TOPIC=“proc_422_26952”/“proc_69686_61”/“qa_69313_2”;
私人最终卡夫卡消费者;
私人最终字符串主题;
公共消费者管理器(字符串主题){
超级(“KafkaConsumerExample”,假);
Properties props=新属性();
put(ConsumerConfig.BOOTSTRAP\u server\u CONFIG,brokers);
put(ConsumerConfig.GROUP_ID_CONFIG,“CageConsumerGroup”);
put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,“true”);
put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,“1000”);
props.put(ConsumerConfig.SESSION\u TIMEOUT\u MS\u CONFIG,“30000”);
put(ConsumerConfig.KEY_反序列化程序_CLASS_CONFIG,“org.apache.kafka.common.serialization.StringDeserializer”);
put(ConsumerConfig.VALUE_反序列化程序_CLASS_CONFIG,“org.apache.kafka.common.serialization.StringDeserializer”);
消费者=新卡夫卡消费者(道具);
this.topics=主题;
}
@凌驾
公共工作{
consumer.subscribe(Collections.singletonList(this.topics));
ConsumerRecords记录=consumer.poll(10000);
对于(消费者记录:记录){
System.out.println(“收到的消息:(“+record.key()+”,“+record.value()+”)位于偏移量“+record.offset());
}
}
@凌驾
公共字符串名称(){
返回null;
}
@凌驾
公共布尔值可中断(){
返回false;
}
}
但是,当我到达以下行时:
ConsumerRecords records=consumer.poll(10000);
什么也没有发生,测试也完成了,我无法查看记录或知道主题收到了消息,然后我可以将它们显示到控制台

我错过了什么?

好的

我从零开始更改了所有的consumer类,现在它似乎可以工作了,这是我的代码:

public class ConsumerManager  {

    static String brokers = "<kafka_host>:9092";
    private ExecutorService executor;
    String kafkaPrefix = "proc_422_26952";
    private final Boolean isAsync = false;
    static String TOPIC; //"proc_69686_61";//" "qa_69313_2";

    public ConsumerManager(String topicName) {

        TOPIC = topicName;
        Properties props = new Properties();

        props.put("bootstrap.servers", brokers);
        props.put("group.id", "CageConsumerGroupIO");
        props.put("enable.auto.commit", "true");
        props.put("auto.commit.interval.ms", "1000");
        props.put("session.timeout.ms", "30000");
        props.put("max.poll.records", "500");
        props.put("key.deserializer",
                "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer",
                "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        consumer.subscribe(Arrays.asList(TOPIC));
        System.out.println("Subscribed to topic " + TOPIC);
        int i = 0;

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(100);

            for (ConsumerRecord<String, String> record : records) {
                System.out.printf("offset = %d, key = %s, value = %s\n",
                        record.offset(), record.key(), record.value());
                System.out.println(record.value());
            }


        }
    }
公共类消费者管理器{
静态字符串代理=“:9092”;
私人遗嘱执行人;
字符串kafkaPrefix=“proc_422_26952”;
私有最终布尔值isAsync=false;
静态字符串主题;//“proc_69686_61”;//“qa_69313_2”;
公共消费者管理器(字符串topicName){
主题=主题名称;
Properties props=新属性();
props.put(“bootstrap.servers”,brokers);
props.put(“group.id”、“CageConsumerGroupIO”);
props.put(“enable.auto.commit”、“true”);
props.put(“auto.commit.interval.ms”,“1000”);
props.put(“session.timeout.ms”,“30000”);
道具放置(“最大投票记录”,“500”);
props.put(“key.deserializer”,
“org.apache.kafka.common.serialization.StringDeserializer”);
props.put(“value.deserializer”,
“org.apache.kafka.common.serialization.StringDeserializer”);
卡夫卡消费者=新卡夫卡消费者(道具);
consumer.subscribe(Arrays.asList(TOPIC));
System.out.println(“订阅主题”+主题);
int i=0;
while(true){
ConsumerRecords记录=consumer.poll(100);
对于(消费者记录:记录){
System.out.printf(“偏移量=%d,键=%s,值=%s\n”,
record.offset()、record.key()、record.value();
System.out.println(record.value());
}
}
}

kafka-avro-console-consumer-bootstrap服务器主机:9092--topic proc_422_26952--from-start?@giorgosmyriantous-我使用了这个命令:
sudo./kafka-console-consumer.sh--bootstrap服务器本地主机:9092--topic proc_422_26952--from-from-code>,我看到了流到的所有数据这个主题…看起来像这样:“建议的术语\”,“数据搜索术语\:\”,\“href\:\”,\“i\”:“a356\”,“t\”:false,\“pn\”:“li214\”,{“n\”:{“nt\”:1,\“tn\”:“DIV\”,“a\”:尝试使用不同的组id,并确保最早开始偏移量。另外,轮询10秒将是一个阻塞调用…控制台使用者执行100ms轮询。无论我尝试了什么,我都无法使用代码将记录消耗到主题中,手动操作我可以轻松完成。请从客户端附加log4j输出。要启用log4j,设置以下jvm参数:
-Dlog4j.configuration=file:/parth/to/log4j.properties