Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/apache-spark/6.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Scala 未找到AWS Glue RDD.saveAsTextFile()引发类org.apache.hadoop.mapred.DirectOutputCommitter_Scala_Apache Spark_Rdd_Aws Glue - Fatal编程技术网

Scala 未找到AWS Glue RDD.saveAsTextFile()引发类org.apache.hadoop.mapred.DirectOutputCommitter

Scala 未找到AWS Glue RDD.saveAsTextFile()引发类org.apache.hadoop.mapred.DirectOutputCommitter,scala,apache-spark,rdd,aws-glue,Scala,Apache Spark,Rdd,Aws Glue,我正在创建一个简单的ETL,它读取十亿个文件并对它们进行重新分区(换句话说,压缩到更小的数量以便进一步处理) 简单的AWS胶水应用: import org.apache.spark.SparkContext object Hello { def main(sysArgs: Array[String]) { val spark: SparkContext = new SparkContext() val input_path = "s3a://my-bucket-

我正在创建一个简单的ETL,它读取十亿个文件并对它们进行重新分区(换句话说,压缩到更小的数量以便进一步处理)

简单的AWS胶水应用:

import org.apache.spark.SparkContext

object Hello {
  def main(sysArgs: Array[String]) {
    val spark: SparkContext = new SparkContext()
    val input_path =  "s3a://my-bucket-name/input/*"
    val output_path = "s3a://my-bucket-name/output/*"
    val num_partitions = 5
    val ingestRDD = spark.textFile(input_path)
    ingestRDD.repartition(num_partitions).saveAsTextFile(output_path)    
  }
}
引发以下回溯:

ERROR [main] glue.ProcessLauncher (Logging.scala:logError(70)): Exception in User Class: java.lang.RuntimeException : java.lang.RuntimeException: java.lang.ClassNotFoundException: Class org.apache.hadoop.mapred.DirectOutputCommitter not found
org.apache.hadoop.conf.Configuration.getClass(Configuration.java:2401)
org.apache.hadoop.mapred.JobConf.getOutputCommitter(JobConf.java:725)
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsHadoopFile$4.apply$mcV$sp(PairRDDFunctions.scala:1048)
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsHadoopFile$4.apply(PairRDDFunctions.scala:1032)
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsHadoopFile$4.apply(PairRDDFunctions.scala:1032)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopFile(PairRDDFunctions.scala:1032)
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsHadoopFile$1.apply$mcV$sp(PairRDDFunctions.scala:958)
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsHadoopFile$1.apply(PairRDDFunctions.scala:958)
org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsHadoopFile$1.apply(PairRDDFunctions.scala:958)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopFile(PairRDDFunctions.scala:957)
org.apache.spark.rdd.RDD$$anonfun$saveAsTextFile$1.apply$mcV$sp(RDD.scala:1499)
org.apache.spark.rdd.RDD$$anonfun$saveAsTextFile$1.apply(RDD.scala:1478)
org.apache.spark.rdd.RDD$$anonfun$saveAsTextFile$1.apply(RDD.scala:1478)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
org.apache.spark.rdd.RDD.saveAsTextFile(RDD.scala:1478)
Hello$.main(hello_world_parallel_rdd_scala:18)
Hello.main(hello_world_parallel_rdd_scala)
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
java.lang.reflect.Method.invoke(Method.java:498)
com.amazonaws.services.glue.SparkProcessLauncherPlugin$class.invoke(ProcessLauncher.scala:38)
com.amazonaws.services.glue.ProcessLauncher$$anon$1.invoke(ProcessLauncher.scala:67)
com.amazonaws.services.glue.ProcessLauncher.launch(ProcessLauncher.scala:108)
com.amazonaws.services.glue.ProcessLauncher$.main(ProcessLauncher.scala:21)
com.amazonaws.services.glue.ProcessLauncher.main(ProcessLauncher.scala)

同时,此代码在本地环境、集群和EMR集群中工作。

为pyspark设置Hadoop配置

import org.apache.spark.SparkContext

object Hello {
  def main(sysArgs: Array[String]) {
    val spark: SparkContext = new SparkContext()
    spark.hadoopConfiguration.set("mapred.output.committer.class", "org.apache.hadoop.mapred.DirectFileOutputCommitter")
    val input_path =  "s3a://my-bucket-name/input/*"
    val output_path = "s3a://my-bucket-name/output/*"
    val num_partitions = 5
    val ingestRDD = spark.textFile(input_path)
    ingestRDD.repartition(num_partitions).saveAsTextFile(output_path)    
  }
}
sc._jsc.hadoopConfiguration().set("mapred.output.committer.class", "org.apache.hadoop.mapred.DirectFileOutputCommitter")