由以下原因引起的scala火花:java.lang.NoSuchMethodError:net.jpountz.lz4.LZ4BlockInputStream。<;初始化>;(Ljava/io/InputStream;Z)V

由以下原因引起的scala火花:java.lang.NoSuchMethodError:net.jpountz.lz4.LZ4BlockInputStream。<;初始化>;(Ljava/io/InputStream;Z)V,java,scala,apache-spark,Java,Scala,Apache Spark,我是scala新手,最近开始使用spark和scala。我有一段代码,只需读取和处理csv文件并处理行,所有这些都在我的笔记本电脑上本地运行。代码工作正常,突然停止工作。代码如下所示: val spark = SparkSession.builder() .appName("testApp") .config("spark.master", "local") .getOrCreate() spark.conf.set(&q

我是scala新手,最近开始使用spark和scala。我有一段代码,只需读取和处理csv文件并处理行,所有这些都在我的笔记本电脑上本地运行。代码工作正常,突然停止工作。代码如下所示:

val spark = SparkSession.builder()
  .appName("testApp")
  .config("spark.master", "local")
  .getOrCreate()

spark.conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
val sc = spark.sparkContext

val ERROR_UUID = new UUID(0,0)

// Read data from input path
val data = spark.read.format("csv")
  .schema(inputSchema)
  .load(inputPath)
  .rdd

data.take(2).foreach(println)

val headerlessRDD = data
  .map {
    case Row(colval: String, colval2: String) => {

      val colval_uuid: UUID =
        Try({
          UUID.fromString(colval)
        }).recoverWith({
          // Just log the exception and keep it as a failure.
          case (ex: Throwable) => malformedRows.add(1); ex.printStackTrace; Failure(ex);
        }).getOrElse(ERROR_UUID)

      (colval_uuid, colval2_uuid, 45)
    }
  }.filter( x => x._1 != ERROR_UUID) // FILTER OUT MALFORMED UUID ROWS

val aggregatedRDD = headerlessRDD.repartition(100)
aggregatedRDD.top(5)
例外情况是:

Caused by: java.lang.NoSuchMethodError: net.jpountz.lz4.LZ4BlockInputStream.<init>(Ljava/io/InputStream;Z)V
    at org.apache.spark.io.LZ4CompressionCodec.compressedInputStream(CompressionCodec.scala:122)
    at org.apache.spark.serializer.SerializerManager.wrapForCompression(SerializerManager.scala:163)
    at org.apache.spark.serializer.SerializerManager.wrapStream(SerializerManager.scala:124)
    at org.apache.spark.shuffle.BlockStoreShuffleReader$$anonfun$3.apply(BlockStoreShuffleReader.scala:50)
    at org.apache.spark.shuffle.BlockStoreShuffleReader$$anonfun$3.apply(BlockStoreShuffleReader.scala:50)
    at org.apache.spark.storage.ShuffleBlockFetcherIterator.next(ShuffleBlockFetcherIterator.scala:453)
    at org.apache.spark.storage.ShuffleBlockFetcherIterator.next(ShuffleBlockFetcherIterator.scala:64)
    at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
    at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
    at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
    at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:31)
    at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
    at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:439)
    at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
    at scala.collection.convert.Wrappers$IteratorWrapper.hasNext(Wrappers.scala:30)
    at org.spark_project.guava.collect.Ordering.leastOf(Ordering.java:628)
    at org.apache.spark.util.collection.Utils$.takeOrdered(Utils.scala:37)
    at org.apache.spark.rdd.RDD$$anonfun$takeOrdered$1$$anonfun$32.apply(RDD.scala:1478)
    at org.apache.spark.rdd.RDD$$anonfun$takeOrdered$1$$anonfun$32.apply(RDD.scala:1475)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:823)
    at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:823)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:310)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
    at org.apache.spark.scheduler.Task.run(Task.scala:123)
    at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
    at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    at java.lang.Thread.run(Thread.java:748)
原因:java.lang.NoSuchMethodError:net.jpountz.lz4.LZ4BlockInputStream。(Ljava/io/InputStream;Z)V
位于org.apache.spark.io.lz4compressiondec.compressedInputStream(CompressionCodec.scala:122)
位于org.apache.spark.serializer.SerializerManager.wrapForCompression(SerializerManager.scala:163)
位于org.apache.spark.serializer.SerializerManager.wrapStream(SerializerManager.scala:124)
在org.apache.spark.shuffle.blockstoreshuffleeder$$anonfun$3.apply(blockstoreshuffleeder.scala:50)
在org.apache.spark.shuffle.blockstoreshuffleeder$$anonfun$3.apply(blockstoreshuffleeder.scala:50)
位于org.apache.spark.storage.ShuffleBlockFetcherIterator.next(ShuffleBlockFetcherIterator.scala:453)
位于org.apache.spark.storage.ShuffleBlockFetcherIterator.next(ShuffleBlockFetcherIterator.scala:64)
位于scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
位于scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
位于scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
位于org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:31)
在org.apache.spark.interruptblediator.hasNext(interruptblediator.scala:37)
位于scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:439)
位于scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
位于scala.collection.convert.Wrappers$IteratorWrapper.hasNext(Wrappers.scala:30)
在org.spark_project.guava.collect.Ordering.leastOf(Ordering.java:628)上
位于org.apache.spark.util.collection.Utils$.takeOrdered(Utils.scala:37)
位于org.apache.spark.rdd.rdd$$anonfun$takeorded$1$$anonfun$32.apply(rdd.scala:1478)
位于org.apache.spark.rdd.rdd$$anonfun$takeorded$1$$anonfun$32.apply(rdd.scala:1475)
位于org.apache.spark.rdd.rdd$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(rdd.scala:823)
位于org.apache.spark.rdd.rdd$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(rdd.scala:823)
位于org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:346)上
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:310)
位于org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
位于org.apache.spark.scheduler.Task.run(Task.scala:123)
位于org.apache.spark.executor.executor$TaskRunner$$anonfun$10.apply(executor.scala:408)
位于org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
位于org.apache.spark.executor.executor$TaskRunner.run(executor.scala:414)
位于java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
位于java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
运行(Thread.java:748)
我正在使用spark 2.4.5,可以在
jars
目录中看到
lz4-java-1.4.0.jar
。我读过一些关于stackoverflow的类似问题,有人指出他们有相同的问题,但谈论卡夫卡,但我根本没有使用卡夫卡。此外,我正在使用IntelliJ IDEA运行此代码。正如我之前指出的,它工作得很好,而且不确定,但它现在抛出了一个异常。值得一提的是,如果我取出
重新分区
部分,问题就会消失!(我不打算这样做)