Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/311.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java lz4使用spark streaming从kafka读取数据时出现异常_Java_Apache Kafka_Apache Spark Dataset_Spark Structured Streaming - Fatal编程技术网

Java lz4使用spark streaming从kafka读取数据时出现异常

Java lz4使用spark streaming从kafka读取数据时出现异常,java,apache-kafka,apache-spark-dataset,spark-structured-streaming,Java,Apache Kafka,Apache Spark Dataset,Spark Structured Streaming,我试图使用spark streaming api从kafka读取json数据,当我这样做时,它抛出java.lang.NoSuchMethodError:net.jpountz.lz4.LZ4BlockInputStream.init异常。堆栈跟踪是- java.lang.NoSuchMethodError: net.jpountz.lz4.LZ4BlockInputStream.<init>(Ljava/io/InputStream;Z)V at org.apache.spark.

我试图使用spark streaming api从kafka读取json数据,当我这样做时,它抛出java.lang.NoSuchMethodError:net.jpountz.lz4.LZ4BlockInputStream.init异常。堆栈跟踪是-

java.lang.NoSuchMethodError: net.jpountz.lz4.LZ4BlockInputStream.<init>(Ljava/io/InputStream;Z)V
at org.apache.spark.io.LZ4CompressionCodec.compressedInputStream(CompressionCodec.scala:122)
at org.apache.spark.serializer.SerializerManager.wrapForCompression(SerializerManager.scala:163)
at org.apache.spark.serializer.SerializerManager.wrapStream(SerializerManager.scala:124)
at org.apache.spark.shuffle.BlockStoreShuffleReader$$anonfun$3.apply(BlockStoreShuffleReader.scala:50)
at org.apache.spark.shuffle.BlockStoreShuffleReader$$anonfun$3.apply(BlockStoreShuffleReader.scala:50)
at org.apache.spark.storage.ShuffleBlockFetcherIterator.next(ShuffleBlockFetcherIterator.scala:421)
at org.apache.spark.storage.ShuffleBlockFetcherIterator.next(ShuffleBlockFetcherIterator.scala:61)
at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:30)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage3.agg_doAggregateWithKeys_0$(Unknown Source)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage3.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
at org.apache.spark.sql.execution.streaming.StateStoreRestoreExec$$anonfun$doExecute$1.apply(statefulOperators.scala:217)
at org.apache.spark.sql.execution.streaming.StateStoreRestoreExec$$anonfun$doExecute$1.apply(statefulOperators.scala:215)
at org.apache.spark.sql.execution.streaming.state.package$StateStoreOps$$anonfun$1.apply(package.scala:67)
at org.apache.spark.sql.execution.streaming.state.package$StateStoreOps$$anonfun$1.apply(package.scala:62)
at org.apache.spark.sql.execution.streaming.state.StateStoreRDD.compute(StateStoreRDD.scala:78)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.sql.execution.streaming.state.StateStoreRDD.compute(StateStoreRDD.scala:77)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
java.lang.NoSuchMethodError:net.jpountz.lz4.LZ4BlockInputStream。(Ljava/io/InputStream;Z)V
位于org.apache.spark.io.lz4compressiondec.compressedInputStream(CompressionCodec.scala:122)
位于org.apache.spark.serializer.SerializerManager.wrapForCompression(SerializerManager.scala:163)
位于org.apache.spark.serializer.SerializerManager.wrapStream(SerializerManager.scala:124)
在org.apache.spark.shuffle.blockstoreshuffleeder$$anonfun$3.apply(blockstoreshuffleeder.scala:50)
在org.apache.spark.shuffle.blockstoreshuffleeder$$anonfun$3.apply(blockstoreshuffleeder.scala:50)
位于org.apache.spark.storage.ShuffleBlockFetcherIterator.next(ShuffleBlockFetcherIterator.scala:421)
位于org.apache.spark.storage.ShuffleBlockFetcherIterator.next(ShuffleBlockFetcherIterator.scala:61)
位于scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435)
位于scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441)
位于scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
位于org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:30)
在org.apache.spark.interruptblediator.hasNext(interruptblediator.scala:37)
位于scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
位于org.apache.spark.sql.catalyst.expressions.GeneratedClass$generatorforcodegenstage3.agg_doAggregateWithKeys_0$(未知源)
位于org.apache.spark.sql.catalyst.expressions.GeneratedClass$GenerateEditorForCodeGenStage3.processNext(未知源)
位于org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
位于org.apache.spark.sql.execution.whisttagecodegenexec$$anonfun$10$$anon$1.hasNext(whisttagecodegenexec.scala:614)
位于org.apache.spark.sql.execution.streaming.StateStoreRestoreExec$$anonfun$doExecute$1.apply(statefulOperators.scala:217)
位于org.apache.spark.sql.execution.streaming.StateStoreRestoreExec$$anonfun$doExecute$1.apply(statefulOperators.scala:215)
位于org.apache.spark.sql.execution.streaming.state.package$StateStoreOps$$anonfun$1.apply(package.scala:67)
位于org.apache.spark.sql.execution.streaming.state.package$StateStoreOps$$anonfun$1.apply(package.scala:62)
位于org.apache.spark.sql.execution.streaming.state.StateStoreRDD.compute(StateStoreRDD.scala:78)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:288)
在org.apache.spark.rdd.MapPartitionsRDD.compute上(MapPartitionsRDD.scala:38)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:288)
位于org.apache.spark.sql.execution.streaming.state.StateStoreRDD.compute(StateStoreRDD.scala:77)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:288)
在org.apache.spark.rdd.MapPartitionsRDD.compute上(MapPartitionsRDD.scala:38)
在org.apache.spark.rdd.rdd.computeOrReadCheckpoint(rdd.scala:324)
位于org.apache.spark.rdd.rdd.iterator(rdd.scala:288)
位于org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
位于org.apache.spark.scheduler.Task.run(Task.scala:109)
位于org.apache.spark.executor.executor$TaskRunner.run(executor.scala:345)
位于java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
位于java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
运行(Thread.java:745)
我的pom.xml文件具有以下依赖项-

    <!-- https://mvnrepository.com/artifact/net.jpountz.lz4/lz4 -->
    <dependency>
        <groupId>net.jpountz.lz4</groupId>
        <artifactId>lz4</artifactId>
        <version>1.3.0</version>
    </dependency>
    <dependency>
        <groupId>org.apache.spark</groupId>
        <artifactId>spark-core_2.11</artifactId>
        <version>2.3.1</version>
        <exclusions>
            <exclusion>
                <artifactId>lz4-java</artifactId>
                <groupId>org.lz4</groupId>
            </exclusion>
        </exclusions>
    </dependency>
    <dependency>
        <groupId>org.apache.spark</groupId>
        <artifactId>spark-sql_2.11</artifactId>
        <version>2.3.1</version>
    </dependency>
    <dependency>
        <groupId>org.apache.spark</groupId>
        <artifactId>spark-sql-kafka-0-10_2.11</artifactId>
        <version>2.3.1</version>
        <scope>provided</scope>
    </dependency>

    <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka -->
    <dependency>
        <groupId>org.apache.kafka</groupId>
        <artifactId>kafka_2.11</artifactId>
        <version>1.1.0</version>
    </dependency>

net.jpountz.lz4
lz4
1.3.0
org.apache.spark
spark-core_2.11
2.3.1
LZ4Java
org.lz4
org.apache.spark
spark-sql_2.11
2.3.1
org.apache.spark
spark-sql-kafka-0-10_2.11
2.3.1
假如
org.apache.kafka
卡夫卡2.11
1.1.0
以及spark streaming类,以显示我如何尝试将kafka值读取为字符串,然后使用自定义解析器将其解析为Person类-

  public static void main( String[] args ) throws Exception
{
    if( args.length < 3 )
    {
        System.err
                .println("Usage: JavaStructuredKafkaWordCount <bootstrap-servers> " + "<subscribe-type> <topics>");
        System.exit(1);
    }

    String bootstrapServers = args[0];
    String subscribeType = args[1];
    String topics = args[2];

    SparkSession spark = SparkSession.builder().appName("JavaStructuredKafkaWordCount")
            .config("spark.master", "local").getOrCreate();


    // Create DataSet representing the stream of input lines from kafka
    Dataset<String> df = spark.readStream().format("kafka").option("kafka.bootstrap.servers", bootstrapServers)
            .option(subscribeType, topics).load().selectExpr("CAST(value AS STRING)").as(Encoders.STRING());

    Dataset<Person> stringMein = df.map(
            (MapFunction<String, Person>) row -> JSONToPerson.parseJsonToPerson(row),
            Encoders.bean(Person.class));

    //stringMein.printSchema();
    // Generate running word count
    Dataset<Row> cardDF = stringMein.groupBy("age").count();
    // Start running the query that prints the running counts to the console
    StreamingQuery query = cardDF.writeStream().outputMode("update").format("console").start();

    query.awaitTermination();
}
publicstaticvoidmain(字符串[]args)引发异常
{
如果(参数长度<3)
{
System.err
.println(“用法:javastructuredkafcawordcount”+”);
系统出口(1);
}
字符串bootstrapserver=args[0];
字符串subscribeType=args[1];
字符串主题=args[2];
SparkSession spark=SparkSession.builder().appName(“JavaStructuredKafkaWordCount”)
.config(“spark.master”、“local”).getOrCreate();
//创建表示kafka输入行流的数据集
Dataset df=spark.readStream().format(“kafka”).option(“kafka.bootstrap.servers”,bootstrapserver)
.option(subscribeType,topics).load().selectExpr(“CAST(值为字符串)”).AS(Encoders.STRING());
数据集stringMein=df.map(
(MapFunction)行->JSONToPerson.parseJsonToPerson(行),
编码器.bean(Person.class));
//stringMein.printSchema();
//生成运行字数
数据集cardDF=stringMein.groupBy(“年龄”).count();
//开始运行将运行计数打印到控制台的查询
StreamingQuery query=cardDF.writeStream().outputMode(“更新”).format(“控制台”).start();
query.waittermination();
}

}

更好的选择是,在初始化SparkSession时将此行添加到Spark配置中

.config("spark.io.compression.codec", "snappy")
另一个选项是,您可以在build.sbt中为net.jpountz.lz4添加排除规则

lazy val excludeJars = ExclusionRule(organization = "net.jpountz.lz4", name = "lz4")

添加下一个依赖项对我有效:

<dependency>
<groupId>net.jpountz.lz4</groupId>
<artifactId>lz4</artifactId>
<version>1.3.0</version>
</dependency>enter code here

net.jpountz.lz4
lz4
1.3.0
在这里输入代码
config(“spark.io.compression.codec”、“snappy”)方式的可能重复对我来说很有效。谢谢