从kafka读取时发生java.nio.BufferUnderflowException
我在使用kafka中的事件时遇到BufferUnderflowException错误。我能够成功地阅读大约10万条消息,之后我遇到了这个错误 我遇到过一些stackoverflow帖子,当服务器版本和消费者版本不匹配时,就会发生这种情况。但是,我的消费者版本和服务器版本匹配 我将非常感谢您对如何调试这个的想法从kafka读取时发生java.nio.BufferUnderflowException,java,apache-kafka,kafka-consumer-api,Java,Apache Kafka,Kafka Consumer Api,我在使用kafka中的事件时遇到BufferUnderflowException错误。我能够成功地阅读大约10万条消息,之后我遇到了这个错误 我遇到过一些stackoverflow帖子,当服务器版本和消费者版本不匹配时,就会发生这种情况。但是,我的消费者版本和服务器版本匹配 我将非常感谢您对如何调试这个的想法 org.apache.kafka.common.protocol.types.SchemaException: Error reading field 'responses': Error
org.apache.kafka.common.protocol.types.SchemaException: Error reading field 'responses': Error reading field 'topic': java.nio.BufferUnderflowException
at org.apache.kafka.common.protocol.types.Schema.read(Schema.java:71)
at org.apache.kafka.clients.NetworkClient.handleCompletedReceives(NetworkClient.java:464)
at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:279)
at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.clientPoll(ConsumerNetworkClient.java:303)
at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:197)
at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:187)
at org.apache.kafka.clients.consumer.KafkaConsumer.pollOnce(KafkaConsumer.java:877)
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:829)
at com.howtoprogram.kafka.ConsumerTest.main(ConsumerTest.java:58)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
卡夫卡服务器版本
kafka_2.10-0.9.0.0
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.9.0.0</version>
</dependency>
卡夫卡消费版
kafka_2.10-0.9.0.0
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.9.0.0</version>
</dependency>
代码
导入java.io.PrintStream;
导入java.util.array;
导入java.util.Map;
导入java.util.Properties;
导入java.util.concurrent.AtomicLong;
导入org.apache.kafka.clients.consumer.ConsumerRecord;
导入org.apache.kafka.clients.consumer.ConsumerRecords;
导入org.apache.kafka.clients.consumer.KafkaConsumer;
公共类消费者测试{
公共静态AtomicLong totalMessages=新的AtomicLong(0);
静态字符串bootstrapserver=“10.0.1.1:9092”;
静态字符串consumerGroup=ConsumerTest.class.getSimpleName();
静态字符串topic=“events.topic”;
公共静态void main(字符串[]args){
Properties props=新属性();
props.put(“bootstrap.servers”,bootstrapserver);
props.put(“group.id”,consumerGroup);
props.put(“enable.auto.commit”、“true”);
props.put(“auto.commit.interval.ms”,“1000”);
道具放置(“自动偏移重置”、“最早”);
props.put(“session.timeout.ms”,“30000”);
put(“key.deserializer”、“org.apache.kafka.common.serialization.StringDeserializer”);
put(“value.deserializer”、“org.apache.kafka.common.serialization.StringDeserializer”);
System.out.println(“初始卡夫卡消费者卡夫卡-0.9.0.0”);
试一试{
卡夫卡消费者卡夫卡消费者=新卡夫卡消费者(道具);
kafkaConsumer.subscribe(Arrays.asList(topic));
System.out.println(“订阅卡夫卡主题”+主题);
while(true){
消费者记录记录=kafkaConsumer.poll(1000);
totalMessages.addAndGet(records.count());
System.out.println(“totalMessages=“+totalMessages.get());
对于(消费者记录:记录){
}
}}
捕获(例外e){
System.out.println(“出错”);
e、 printStackTrace(新的PrintStream(System.out));
}
}
}
不确定这是否重要,但制作人是一个spark job,版本为0.8.2。所以,有点老了
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
<version>2.0.1</version>
</dependency>
org.apache.spark
. 虽然我没有点击上面的bufferunderflow异常,但消费者有时会卡住,无法读取kafka上的所有消息。我也没有看到任何例外。下面是代码
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicLong;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
public class KafkaConsumerThread extends Thread {
private final ConsumerConnector consumer;
private final String topic;
AtomicLong counter = new AtomicLong(0);
public KafkaConsumerThread(String topic) {
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());
this.topic = topic;
}
private static ConsumerConfig createConsumerConfig() {
Properties props = new Properties();
props.put("zookeeper.connect", KafkaProperties.zkConnect);
props.put("group.id", KafkaProperties.groupId);
props.put("zookeeper.session.timeout.ms", "4000");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
System.out.println("listening to zookeper " + KafkaProperties.zkConnect);
return new ConsumerConfig(props);
}
public void run() {
System.out.println("listening to topic " + topic);
try {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, new Integer(1));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
System.out.println("got the stream");
ConsumerIterator<byte[], byte[]> it = stream.iterator();
System.out.println("iterating the stream");
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> next = it.next();
String msg = new String(next.message());
// System.out.println(msg);
counter.incrementAndGet();
System.out.println("Consumed = " + counter.get());
}
} catch (Exception e) {
System.out.println("something went wrong ");
e.printStackTrace(System.out);
}
}
public static void main(String args[]) {
try {
KafkaConsumerThread consumerThread = new KafkaConsumerThread(KafkaProperties.topic);
consumerThread.start();
} catch (Exception e) {
System.out.println("something went wrong main loop ");
e.printStackTrace(System.out);
}
for (;;) {
}
}
}
import java.util.HashMap;
导入java.util.List;
导入java.util.Map;
导入java.util.Properties;
导入java.util.concurrent.AtomicLong;
导入kafka.consumer.ConsumerConfig;
导入kafka.consumer.ConsumerIterator;
进口kafka.consumer.KafkaStream;
导入kafka.javaapi.consumer.ConsumerConnector;
导入kafka.message.MessageAndMetadata;
公共类KafkaConsumerThread扩展线程{
私人最终消费者连接消费者;
私有字符串主题;
AtomicLong计数器=新的AtomicLong(0);
公共KafkanConsumerThread(字符串主题){
consumer=kafka.consumer.consumer.createJavaConsumerConnector(createConsumerConfig());
this.topic=主题;
}
私有静态ConsumerConfig CreateCummerConfig(){
Properties props=新属性();
props.put(“zookeeper.connect”,KafkaProperties.zkConnect);
props.put(“group.id”,KafkaProperties.groupId);
props.put(“zookeeper.session.timeout.ms”,“4000”);
道具放置(“zookeeper.sync.time.ms”,“200”);
props.put(“auto.commit.interval.ms”,“1000”);
System.out.println(“监听zookeper”+KafkaProperties.zkConnect);
返回新的消费者配置(道具);
}
公开募捐{
System.out.println(“收听主题”+主题);
试一试{
Map topicCountMap=新HashMap();
topicCountMap.put(主题,新整数(1));
Map consumerMap=consumer.createMessageStreams(topicCountMap);
KafkaStream stream=consumerMap.get(主题).get(0);
System.out.println(“获取流”);
ConsumerIterator it=stream.iterator();
System.out.println(“迭代流”);
while(it.hasNext()){
MessageAndMetadata next=it.next();
String msg=新字符串(next.message());
//System.out.println(msg);
counter.incrementAndGet();
System.out.println(“consumered=“+counter.get());
}
}捕获(例外e){
System.out.println(“出错”);
e、 printStackTrace(系统输出);
}
}
公共静态void main(字符串参数[]){
试一试{
KafkaConsumerThread=新的KafkaConsumerThread(KafkaProperties.topic);
consumerThread.start();
}捕获(例外e){
System.out.println(“主循环出错”);
e、 printStackTrace(系统输出);
}
对于(;;){
}
}
}
在kafka客户端google group上发布与制片人/消费者相同的代码在最新kafka服务器版本上运行良好-kafka_2.11-0.11.0.0.jar在kafka客户端google group上发布与制片人/消费者相同的代码在最新kafka服务器版本上运行良好-kafka_2.11-0.11.0.jar
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicLong;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
public class KafkaConsumerThread extends Thread {
private final ConsumerConnector consumer;
private final String topic;
AtomicLong counter = new AtomicLong(0);
public KafkaConsumerThread(String topic) {
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());
this.topic = topic;
}
private static ConsumerConfig createConsumerConfig() {
Properties props = new Properties();
props.put("zookeeper.connect", KafkaProperties.zkConnect);
props.put("group.id", KafkaProperties.groupId);
props.put("zookeeper.session.timeout.ms", "4000");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
System.out.println("listening to zookeper " + KafkaProperties.zkConnect);
return new ConsumerConfig(props);
}
public void run() {
System.out.println("listening to topic " + topic);
try {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, new Integer(1));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
System.out.println("got the stream");
ConsumerIterator<byte[], byte[]> it = stream.iterator();
System.out.println("iterating the stream");
while (it.hasNext()) {
MessageAndMetadata<byte[], byte[]> next = it.next();
String msg = new String(next.message());
// System.out.println(msg);
counter.incrementAndGet();
System.out.println("Consumed = " + counter.get());
}
} catch (Exception e) {
System.out.println("something went wrong ");
e.printStackTrace(System.out);
}
}
public static void main(String args[]) {
try {
KafkaConsumerThread consumerThread = new KafkaConsumerThread(KafkaProperties.topic);
consumerThread.start();
} catch (Exception e) {
System.out.println("something went wrong main loop ");
e.printStackTrace(System.out);
}
for (;;) {
}
}
}