Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/335.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 动态类加载器的IllegaAccessError_Java_Scala_Hadoop_Classloader_Dynamic Class Loaders - Fatal编程技术网

Java 动态类加载器的IllegaAccessError

Java 动态类加载器的IllegaAccessError,java,scala,hadoop,classloader,dynamic-class-loaders,Java,Scala,Hadoop,Classloader,Dynamic Class Loaders,我创建了一个自定义的ParquetOutputFormat(org.apache.parquet.hadoop中的类)来覆盖getRecordWriter方法。在getRecordWriter方法中,它访问CodecFactory,这会导致IllegalAccessError。为了解决这个问题,我尝试创建自己的类加载器,但这没有帮助。我关注了这篇博文 在创建自定义类加载器之前,我使用了CustomParquetOutputFormat,如下所示: override def createOutpu

我创建了一个自定义的
ParquetOutputFormat
(org.apache.parquet.hadoop中的类)来覆盖
getRecordWriter
方法。在
getRecordWriter
方法中,它访问
CodecFactory
,这会导致
IllegalAccessError
。为了解决这个问题,我尝试创建自己的类加载器,但这没有帮助。我关注了这篇博文

在创建自定义类加载器之前,我使用了
CustomParquetOutputFormat
,如下所示:

override def createOutputFormat: OutputFormat[Void, InternalRow] with Ext = new CustomParquetOutputFormat[InternalRow]() with Ext {
 ...
}
调用
getRecordWriter
时,当
CustomParquetOutputFormat
尝试访问第274行上的
CodeFactory
时,会发生此问题:

  CodecFactory codecFactory = new CodecFactory(conf);
(这是CustomParquetOutputFormat访问的ParquetOutputFormat的第274行)

CodecFactory
是包私有的

自定义类加载器:

class CustomClassLoader(urls: Array[URL], parent: ClassLoader, whiteList: List[String])
  extends ChildFirstURLClassLoader(urls, parent) {
  override def  loadClass(name: String) = {
    if (whiteList.exists(name.startsWith)) {
      super.loadClass(name)
    } else {
      parent.loadClass(name)
    }
  }
}
val sc: SparkContext = SparkContext.getOrCreate()
val cl: CustomClassLoader = new CustomClassLoader(sc.jars.map(new URL(_)).toArray,
  Thread.currentThread.getContextClassLoader, List(
    "org.apache.parquet.hadoop.CustomParquetOutputFormat",
    "org.apache.parquet.hadoop.CodecFactory",
    "org.apache.parquet.hadoop.ParquetFileWriter",
    "org.apache.parquet.hadoop.ParquetRecordWriter",
    "org.apache.parquet.hadoop.InternalParquetRecordWriter",
    "org.apache.parquet.hadoop.ColumnChunkPageWriteStore",
    "org.apache.parquet.hadoop.MemoryManager"
  ))


cl.loadClass("org.apache.parquet.hadoop.CustomParquetOutputFormat")
  .getConstructor(classOf[String], classOf[TaskAttemptContext])
  .newInstance(fullPathWithoutExt, taskAttemptContext)
  .asInstanceOf[OutputFormat[Void, InternalRow] with ProvidesExtension]
java.lang.IllegalAccessError: tried to access class org.apache.parquet.hadoop.CodecFactory from class org.apache.parquet.hadoop.customParquetOutputFormat
        at org.apache.parquet.hadoop.CustomParquetOutputFormat.getRecordWriter(CustomParquetOutputFormat.scala:40)
        at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:262)
        at org.apache.spark.custom.hadoop.HadoopWriter.<init>(HadoopWriter.scala:35)
        at org.apache.spark.sql.execution.datasources.parquet.ParquetWriter.<init>(ParquetWriter.scala:16)
        at org.apache.spark.sql.execution.datasources.parquet.ParquetWriterFactory.createWriter(ParquetWriterFactory.scala:71)
        at com.abden.custom.index.IndexBuilder$$anonfun$4.apply(IndexBuilder.scala:55)
        at com.abden.custom.index.IndexBuilder$$anonfun$4.apply(IndexBuilder.scala:54)
        at scala.collection.immutable.Stream.map(Stream.scala:418)
        at com.abden.custom.index.IndexBuilder.generateTiles(IndexBuilder.scala:54)
        at com.abden.custom.index.IndexBuilder.generateLayer(IndexBuilder.scala:155)
        at com.abden.custom.index.IndexBuilder.appendLayer(IndexBuilder.scala:184)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1$$anonfun$apply$1.apply(IndexBuilder.scala:213)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1$$anonfun$apply$1.apply(IndexBuilder.scala:210)
        at scala.collection.Iterator$class.foreach(Iterator.scala:742)
        at com.abden.custom.util.SplittingByKeyIterator.foreach(SplittingByKeyIterator.scala:3)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1.apply(IndexBuilder.scala:210)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1.apply(IndexBuilder.scala:209)
        at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$33.apply(RDD.scala:920)
        at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$33.apply(RDD.scala:920)
        at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
        at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
        at org.apache.spark.scheduler.Task.run(Task.scala:89)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
用法:

class CustomClassLoader(urls: Array[URL], parent: ClassLoader, whiteList: List[String])
  extends ChildFirstURLClassLoader(urls, parent) {
  override def  loadClass(name: String) = {
    if (whiteList.exists(name.startsWith)) {
      super.loadClass(name)
    } else {
      parent.loadClass(name)
    }
  }
}
val sc: SparkContext = SparkContext.getOrCreate()
val cl: CustomClassLoader = new CustomClassLoader(sc.jars.map(new URL(_)).toArray,
  Thread.currentThread.getContextClassLoader, List(
    "org.apache.parquet.hadoop.CustomParquetOutputFormat",
    "org.apache.parquet.hadoop.CodecFactory",
    "org.apache.parquet.hadoop.ParquetFileWriter",
    "org.apache.parquet.hadoop.ParquetRecordWriter",
    "org.apache.parquet.hadoop.InternalParquetRecordWriter",
    "org.apache.parquet.hadoop.ColumnChunkPageWriteStore",
    "org.apache.parquet.hadoop.MemoryManager"
  ))


cl.loadClass("org.apache.parquet.hadoop.CustomParquetOutputFormat")
  .getConstructor(classOf[String], classOf[TaskAttemptContext])
  .newInstance(fullPathWithoutExt, taskAttemptContext)
  .asInstanceOf[OutputFormat[Void, InternalRow] with ProvidesExtension]
java.lang.IllegalAccessError: tried to access class org.apache.parquet.hadoop.CodecFactory from class org.apache.parquet.hadoop.customParquetOutputFormat
        at org.apache.parquet.hadoop.CustomParquetOutputFormat.getRecordWriter(CustomParquetOutputFormat.scala:40)
        at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:262)
        at org.apache.spark.custom.hadoop.HadoopWriter.<init>(HadoopWriter.scala:35)
        at org.apache.spark.sql.execution.datasources.parquet.ParquetWriter.<init>(ParquetWriter.scala:16)
        at org.apache.spark.sql.execution.datasources.parquet.ParquetWriterFactory.createWriter(ParquetWriterFactory.scala:71)
        at com.abden.custom.index.IndexBuilder$$anonfun$4.apply(IndexBuilder.scala:55)
        at com.abden.custom.index.IndexBuilder$$anonfun$4.apply(IndexBuilder.scala:54)
        at scala.collection.immutable.Stream.map(Stream.scala:418)
        at com.abden.custom.index.IndexBuilder.generateTiles(IndexBuilder.scala:54)
        at com.abden.custom.index.IndexBuilder.generateLayer(IndexBuilder.scala:155)
        at com.abden.custom.index.IndexBuilder.appendLayer(IndexBuilder.scala:184)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1$$anonfun$apply$1.apply(IndexBuilder.scala:213)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1$$anonfun$apply$1.apply(IndexBuilder.scala:210)
        at scala.collection.Iterator$class.foreach(Iterator.scala:742)
        at com.abden.custom.util.SplittingByKeyIterator.foreach(SplittingByKeyIterator.scala:3)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1.apply(IndexBuilder.scala:210)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1.apply(IndexBuilder.scala:209)
        at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$33.apply(RDD.scala:920)
        at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$33.apply(RDD.scala:920)
        at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
        at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
        at org.apache.spark.scheduler.Task.run(Task.scala:89)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
错误:

class CustomClassLoader(urls: Array[URL], parent: ClassLoader, whiteList: List[String])
  extends ChildFirstURLClassLoader(urls, parent) {
  override def  loadClass(name: String) = {
    if (whiteList.exists(name.startsWith)) {
      super.loadClass(name)
    } else {
      parent.loadClass(name)
    }
  }
}
val sc: SparkContext = SparkContext.getOrCreate()
val cl: CustomClassLoader = new CustomClassLoader(sc.jars.map(new URL(_)).toArray,
  Thread.currentThread.getContextClassLoader, List(
    "org.apache.parquet.hadoop.CustomParquetOutputFormat",
    "org.apache.parquet.hadoop.CodecFactory",
    "org.apache.parquet.hadoop.ParquetFileWriter",
    "org.apache.parquet.hadoop.ParquetRecordWriter",
    "org.apache.parquet.hadoop.InternalParquetRecordWriter",
    "org.apache.parquet.hadoop.ColumnChunkPageWriteStore",
    "org.apache.parquet.hadoop.MemoryManager"
  ))


cl.loadClass("org.apache.parquet.hadoop.CustomParquetOutputFormat")
  .getConstructor(classOf[String], classOf[TaskAttemptContext])
  .newInstance(fullPathWithoutExt, taskAttemptContext)
  .asInstanceOf[OutputFormat[Void, InternalRow] with ProvidesExtension]
java.lang.IllegalAccessError: tried to access class org.apache.parquet.hadoop.CodecFactory from class org.apache.parquet.hadoop.customParquetOutputFormat
        at org.apache.parquet.hadoop.CustomParquetOutputFormat.getRecordWriter(CustomParquetOutputFormat.scala:40)
        at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:262)
        at org.apache.spark.custom.hadoop.HadoopWriter.<init>(HadoopWriter.scala:35)
        at org.apache.spark.sql.execution.datasources.parquet.ParquetWriter.<init>(ParquetWriter.scala:16)
        at org.apache.spark.sql.execution.datasources.parquet.ParquetWriterFactory.createWriter(ParquetWriterFactory.scala:71)
        at com.abden.custom.index.IndexBuilder$$anonfun$4.apply(IndexBuilder.scala:55)
        at com.abden.custom.index.IndexBuilder$$anonfun$4.apply(IndexBuilder.scala:54)
        at scala.collection.immutable.Stream.map(Stream.scala:418)
        at com.abden.custom.index.IndexBuilder.generateTiles(IndexBuilder.scala:54)
        at com.abden.custom.index.IndexBuilder.generateLayer(IndexBuilder.scala:155)
        at com.abden.custom.index.IndexBuilder.appendLayer(IndexBuilder.scala:184)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1$$anonfun$apply$1.apply(IndexBuilder.scala:213)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1$$anonfun$apply$1.apply(IndexBuilder.scala:210)
        at scala.collection.Iterator$class.foreach(Iterator.scala:742)
        at com.abden.custom.util.SplittingByKeyIterator.foreach(SplittingByKeyIterator.scala:3)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1.apply(IndexBuilder.scala:210)
        at com.abden.custom.index.IndexBuilder$$anonfun$appendLayers$1.apply(IndexBuilder.scala:209)
        at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$33.apply(RDD.scala:920)
        at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$33.apply(RDD.scala:920)
        at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
        at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
        at org.apache.spark.scheduler.Task.run(Task.scala:89)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)

codefactory
没有修改器,因此仅限于其包。即使使用动态类加载器从同一个类加载器加载所有类,我仍然会得到
IllegalAccessError

,因此您尝试的是打破Java的工作方式!您希望通过实现自己的类加载器来访问一个包私有的类,该类允许破坏JVM的保护规则(因此您希望破坏Java语言规范!)

我的答案很简单:不要这样做

如果它是包专用的,则无法访问它。句号

我认为最好的方法是根据您需要的功能进行思考,并使用当前的API实现它,而不必强行进入。因此,与其问如何进行技术破解,最好的办法是解释您想做什么(为什么要实现自己的
getRecordWriter
方法)

关于如何用普通java读/写拼花地板文件,我已经回答了这个SOW问题:

问候,


Loïc

奇怪的是,错误消息显示的是
customParquetOutputFormat
(小写c),而其他所有内容都是指
customParquetOutputFormat
(大写c)。除此之外,您应该知道
super.loadClass(名称)
还将首先检查父加载程序,如果父加载程序未找到该类,则仅尝试在本地解析该类。而且,不同类加载程序加载的类始终被视为处于不同的(运行时)状态包,不管它们的名称。抱歉,修复了错误消息。我更改了此问题的类的名称,并意外地使用了小写。您好,您能在自定义类装入器之前共享您的代码以了解您以前遇到的问题吗?因为在这里实现您自己的类装入器似乎有些过分…@loicmathieu我添加了一些我以前是如何调用它的上下文您能否共享mvn clean;mvn dependency:tree-U>output.txt的输出