Spark'中的java.lang.InterruptedException;s rdd。收集方法

Spark'中的java.lang.InterruptedException;s rdd。收集方法,java,apache-spark,apache-kafka,spark-streaming,Java,Apache Spark,Apache Kafka,Spark Streaming,我在Spark的rdd.collect()方法中得到了一个异常。堆栈跟踪是: java.lang.Error: java.lang.InterruptedException at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1155) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPool

我在Spark的
rdd.collect()
方法中得到了一个异常。堆栈跟踪是:

java.lang.Error: java.lang.InterruptedException
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1155)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.InterruptedException
        at java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedInterruptibly(AbstractQueuedSynchronizer.java:998)
        at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1304)
        at scala.concurrent.impl.Promise$DefaultPromise.tryAwait(Promise.scala:202)
        at scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:218)
        at scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:153)
        at org.apache.spark.util.ThreadUtils$.awaitReady(ThreadUtils.scala:222)
        at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:633)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2027)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2048)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2067)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2092)
        at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:939)
        at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
        at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
        at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
        at org.apache.spark.rdd.RDD.collect(RDD.scala:938)
        at org.apache.spark.api.java.JavaRDDLike$class.collect(JavaRDDLike.scala:361)
        at org.apache.spark.api.java.AbstractJavaRDDLike.collect(JavaRDDLike.scala:45)
        at com.cybernetix.kaafka.EnrichEventSparkConsumer.lambda$startEnrichEventConsumer$ef798c17$1(EnrichEventSparkConsumer.java:76)
        at org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$1.apply(JavaDStreamLike.scala:272)
        at org.apache.spark.streaming.api.java.JavaDStreamLike$$anonfun$foreachRDD$1.apply(JavaDStreamLike.scala:272)
        at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:628)
        at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:628)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ForEachDStream.scala:51)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:51)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:51)
        at org.apache.spark.streaming.dstream.DStream.createRDDWithLocalProperties(DStream.scala:416)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply$mcV$sp(ForEachDStream.scala:50)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:50)
        at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:50)
        at scala.util.Try$.apply(Try.scala:192)
        at org.apache.spark.streaming.scheduler.Job.run(Job.scala:39)
        at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply$mcV$sp(JobScheduler.scala:257)
        at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:257)
        at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:257)
        at scala.util.DynamicVariable.withValue(DynamicVariable.scala:58)
        at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler.run(JobScheduler.scala:256)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        ... 2 more
我的代码是:

mapper = new ObjectMapper();
Collection<String> topics = Arrays.asList(sparkConfiguration.getEnrichEventTopic());
Map<String, Object> kafkaParams = new HashedMap();
kafkaParams.put("bootstrap.servers", sparkConfiguration.getBootStrapServers());
kafkaParams.put("key.deserializer", StringDeserializer.class);
kafkaParams.put("value.deserializer", StringDeserializer.class);
kafkaParams.put("group.id", "group1");
kafkaParams.put("auto.offset.reset", "latest");
kafkaParams.put("enable.auto.commit", true);

JavaInputDStream<ConsumerRecord<String, String>> enrichEventRDD =
    KafkaUtils.createDirectStream(javaStreamingContext, LocationStrategies.PreferConsistent(),
        ConsumerStrategies.<String, String>Subscribe(topics, kafkaParams));

JavaDStream<String> enrichEventDStream = enrichEventRDD.map((x) -> x.value());

JavaDStream<List<Map<String, Object>>> enrichDataModelDStream = enrichEventDStream.map(convertIntoMapList);

// JavaDStream<EnrichEventDataModel> enrichDataModelDStream = enrichEventDStream.map(convertIntoEnrichModel);

enrichDataModelDStream.foreachRDD(enrichDataModelRdd -> {
    if (enrichDataModelRdd.count() > 0) {
        List<List<Map<String, Object>>> enrichEventDataModelList = enrichDataModelRdd.collect();
         saveDataToElasticSearch(enrichEventDataModelList);

writeToLogsFile("EnrichEvent consumer SparkStreaming job started.", Constants.INFO);
mapper=newObjectMapper();
集合主题=Arrays.asList(sparkConfiguration.getEnrichEventTopic());
Map kafkaParams=new HashedMap();
kafkaParams.put(“bootstrap.servers”,sparkConfiguration.getbootstrapserver());
kafkaParams.put(“key.deserializer”,StringDeserializer.class);
kafkaParams.put(“value.deserializer”,StringDeserializer.class);
kafkaParams.put(“group.id”,“group1”);
kafkaParams.put(“自动偏移重置”、“最新”);
kafkaParams.put(“enable.auto.commit”,true);
JavaInputDStream Enricheventdd=
KafkaUtils.createDirectStream(javaStreamingContext,LocationStrategies.PreferConsistent(),
订阅(主题,卡夫卡帕兰));
JavaDStream enrichEventDStream=enricheventdd.map((x)->x.value());
JavaDStream EnricDataModeldStream=enrichEventDStream.map(convertIntoMapList);
//JavaDStream enrichDataModelDStream=EnrichEventStream.map(ConvertingOnEnrichModel);
EnricDataModeldStream.foreachRDD(EnricDataModelRDD->{
如果(EnricDataModelRDD.count()>0){
List enrichEventDataModelList=enrichDataModelRdd.collect();
保存数据到LasticSearch(enrichEventDataModelList);
writeToLogsFile(“EnrichEvent consumer SparkStreaming作业已启动。”,Constants.INFO);