Warning: file_get_contents(/data/phpspider/zhask/data//catemap/1/visual-studio-2012/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Scala 如何使Spark从机使用HDFS输入文件';本地';用Hadoop+;火花簇?_Scala_Hadoop_Apache Spark_Hdfs_Cluster Computing - Fatal编程技术网

Scala 如何使Spark从机使用HDFS输入文件';本地';用Hadoop+;火花簇?

Scala 如何使Spark从机使用HDFS输入文件';本地';用Hadoop+;火花簇?,scala,hadoop,apache-spark,hdfs,cluster-computing,Scala,Hadoop,Apache Spark,Hdfs,Cluster Computing,我有一个9台安装了ApacheHadoop2.7.2和Spark2.0.0的计算机集群。每台计算机运行一个HDFS数据节点和Spark从机。其中一台计算机还运行HDFS namenode和Spark master 我在HDFS中上传了几TB的gz归档文件,其中Replication=2。原来有些档案是腐败的。我想找到他们。看起来“gunzip-t”能帮上忙。因此,我试图找到一种在集群上运行Spark应用程序的方法,以便每个Spark executor尽可能地将“本地”(即,其中一个副本位于该执行

我有一个9台安装了ApacheHadoop2.7.2和Spark2.0.0的计算机集群。每台计算机运行一个HDFS数据节点和Spark从机。其中一台计算机还运行HDFS namenode和Spark master

我在HDFS中上传了几TB的gz归档文件,其中Replication=2。原来有些档案是腐败的。我想找到他们。看起来“gunzip-t”能帮上忙。因此,我试图找到一种在集群上运行Spark应用程序的方法,以便每个Spark executor尽可能地将“本地”(即,其中一个副本位于该执行器运行的同一台计算机上)存档到它。以下脚本运行,但有时Spark执行器处理HDFS中的“远程”文件:

// Usage (after packaging a jar with mainClass set to 'com.qbeats.cortex.CommoncrawlArchivesTester' in spark.pom
// and placing this jar file into Spark's home directory):
//    ./bin/spark-submit --master spark://LV-WS10.lviv:7077 spark-cortex-fat.jar spark://LV-WS10.lviv:7077 hdfs://LV-WS10.lviv:9000/commoncrawl 9
// means testing for corruption the gz-archives in the directory hdfs://LV-WS10.lviv:9000/commoncrawl
// using a Spark cluster with the Spark master URL spark://LV-WS10.lviv:7077 and 9 Spark slaves

package com.qbeats.cortex

import org.apache.hadoop.mapred.TextInputFormat
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.FileSplit
import org.apache.spark.rdd.HadoopRDD
import org.apache.spark.{SparkContext, SparkConf, AccumulatorParam}
import sys.process._

object CommoncrawlArchivesTester extends App {
  object LogAccumulator extends AccumulatorParam[String] {
    def zero(initialValue: String): String = ""
    def addInPlace(log1: String, log2: String) = if (log1.isEmpty) log2 else log1 + "\n" + log2
  }

  override def main(args: Array[String]): Unit = {
    if (args.length >= 3) {
      val appName = "CommoncrawlArchivesTester"
      val conf = new SparkConf().setAppName(appName).setMaster(args(0))
      conf.set("spark.executor.memory", "6g")
      conf.set("spark.shuffle.service.enabled", "true")
      conf.set("spark.dynamicAllocation.enabled", "true")
      conf.set("spark.dynamicAllocation.initialExecutors", args(2))
      val sc = new SparkContext(conf)

      val log = sc.accumulator(LogAccumulator.zero(""))(LogAccumulator)

      val text = sc.hadoopFile(args(1), classOf[TextInputFormat], classOf[LongWritable], classOf[Text])
      val hadoopRdd = text.asInstanceOf[HadoopRDD[LongWritable, Text]]

      val fileAndLine = hadoopRdd.mapPartitionsWithInputSplit { (inputSplit, iterator) =>
        val fileName = inputSplit.asInstanceOf[FileSplit].getPath.toString
        class FilePath extends Iterable[String] {
          def iterator = List(fileName).iterator
        }
        val result = (sys.env("HADOOP_PREFIX") + "/bin/hadoop fs -cat " + fileName) #| "gunzip -t" !

        println("Processed %s.".format(fileName))
        if (result != 0) {
          log.add(fileName)
          println("Corrupt: %s.".format(fileName))
        }
        (new FilePath).iterator
      }

      val result = fileAndLine.collect()

      println("Corrupted files:")
      println(log.value)
    }
  }
}
你有什么建议


稍后添加:

我尝试了另一个脚本,它通过textFile()从HDFS获取文件。我看Spark执行器不喜欢输入文件中的“本地”文件。这与“Spark将代码转化为数据,而不是将数据转化为代码”不矛盾吗


赛义夫C,你能详细解释一下吗?

我已经通过从Spark的独立模式切换到纱线模式解决了这个问题


相关话题:

我通过将Spark的独立模式切换到纱线模式,解决了这个问题


相关主题:

Spark on Hadoop可能看不到该文件的本地版本。您可能需要使用spark submit中的
--files
选项添加它们。使用此选项时,执行器根目录下的spark可以访问本地文件。Hadoop上的spark可能看不到文件的本地版本。您可能需要使用spark submit中的
--files
选项添加它们。使用此选项时,执行器根目录下的spark可以访问本地文件。
// Usage (after packaging a jar with mainClass set to 'com.qbeats.cortex.CommoncrawlArchiveLinesCounter' in spark.pom)
//   ./bin/spark-submit --master spark://LV-WS10.lviv:7077 spark-cortex-fat.jar spark://LV-WS10.lviv:7077 hdfs://LV-WS10.lviv:9000/commoncrawl 9

package com.qbeats.cortex

import org.apache.spark.{SparkContext, SparkConf}

object CommoncrawlArchiveLinesCounter extends App {
  override def main(args: Array[String]): Unit = {
    if  (args.length >= 3) {
      val appName = "CommoncrawlArchiveLinesCounter"
      val conf = new SparkConf().setAppName(appName).setMaster(args(0))
      conf.set("spark.executor.memory", "6g")
      conf.set("spark.shuffle.service.enabled", "true")
      conf.set("spark.dynamicAllocation.enabled", "true")
      conf.set("spark.dynamicAllocation.initialExecutors", args(2))
      val sc = new SparkContext(conf)

      val helper = new Helper
      val nLines = sc.
        textFile(args(1) + "/*").
        mapPartitionsWithIndex( (index, it) => {
          println("Processing partition %s".format(index))
          it
        }).
        count
      println(nLines)
    }
  }
}