Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/apache-spark/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Scala 如何将spark日志文件转换为一个CSV文件_Scala_Apache Spark_Logfile Analysis - Fatal编程技术网

Scala 如何将spark日志文件转换为一个CSV文件

Scala 如何将spark日志文件转换为一个CSV文件,scala,apache-spark,logfile-analysis,Scala,Apache Spark,Logfile Analysis,我有一个spark应用程序日志文件的集合,我希望每个文件的应用程序名称、提交时间、完成时间和可累积指标都作为一行添加到一个CSV文件中。使用SPARK/SCALA 编辑: 很抱歉,一个Spark应用程序日志文件放在这里太大了,而且也太复杂了,每个作业都会重复更新一些指标,我需要所有这些指标的总和—最后的那些不是更新的—这里是我迄今为止尝试的 import org.apache.log4j._ import org.apache.spark.sql._ object LogToCSV {

我有一个spark应用程序日志文件的集合,我希望每个文件的应用程序名称、提交时间、完成时间和可累积指标都作为一行添加到一个CSV文件中。使用SPARK/SCALA 编辑: 很抱歉,一个Spark应用程序日志文件放在这里太大了,而且也太复杂了,每个作业都会重复更新一些指标,我需要所有这些指标的总和—最后的那些不是更新的—这里是我迄今为止尝试的

import org.apache.log4j._
import org.apache.spark.sql._


object LogToCSV {
  val Logs= "SparkAppName, SubmissionTime, CompletionTime,ExecutorDeserializeCpuTime,ResultSize,ShuffleReadRemoteBytesRead, ShuffleReadFetchWaitTime,MemoryBytesSpilled,ShuffleReadLocalBytesRead,ExecutorDeserializeTime,PeakExecutionMemory,ExecutorCpuTime, ShuffleReadLocalBlocksFetched,JVMGCTime,ShuffleReadRemoteBytesReadToDisk,ShuffleReadRecordsRead,DiskBytesSpilled,ExecutorRunTime,ShuffleReadRemoteBlocksFetched,Result"
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.ERROR)
    Logger.getLogger("akka").setLevel(Level.ERROR)
    val ss = SparkSession
      .builder
      .appName("SparkSQLDFjoin")
      .master("local[*]")
      .getOrCreate()
    import ss.implicits._
ScalaWriter.Writer.Write(Logs, "Results.csv")
    val Dir = ss.sparkContext.wholeTextFiles("/home/rudaini/Desktop/Thesis/Results/Results/Tesx/*")
    println(Dir.count())
    Dir.foreach(F =>{
      var SparkAppName = ""
      var SubmissionTime: Double = 0
      var CompletionTime: Double = 0
      var ExecutorDeserializeCpuTime: Double = 0
      var ResultSize = ""
      var ShuffleReadRemoteBytesRead = ""
      var ShuffleReadFetchWaitTime = ""
      var MemoryBytesSpilled = ""
      var ShuffleReadLocalBytesRead = ""
      var ExecutorDeserializeTime = ""
      var PeakExecutionMemory = ""
      var ExecutorCpuTime = ""
      var ShuffleReadLocalBlocksFetched = ""
      var JVMGCTime = ""
      var ShuffleReadRemoteBytesReadToDisk = ""
      var ShuffleReadRecordsRead = ""
      var DiskBytesSpilled = ""
      var ExecutorRunTime = ""
      var ShuffleReadRemoteBlocksFetched = ""
      var Result = ""

      F.toString().split("\n").foreach(L =>{
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}

        if(L.contains("ApplicationStart")){
          SubmissionTime = L.substring(L.indexOf("Timestamp")+11,
            L.indexOf(",\"User\":\"")).toDouble}

        if(L.contains("ApplicationEnd")){
          CompletionTime = L.substring(L.indexOf("Timestamp")+11,L.indexOf("Timestamp")+24).toDouble}

        if(L.contains("SparkSubmit.scala")){
          ExecutorDeserializeCpuTime = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")).toDouble}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}
        if(L.contains("spark.app.name")){
          SparkAppName = L.substring(L.indexOf("app.name")+11,
            L.indexOf("spark.scheduler")-3)}


      })
      val LineX  = SparkAppName +","+ SubmissionTime +","+ CompletionTime +","+ ExecutorDeserializeCpuTime +","+ ResultSize +","+ ShuffleReadRemoteBytesRead +","+ ShuffleReadFetchWaitTime +","+ MemoryBytesSpilled +","+
        ShuffleReadLocalBytesRead +","+ ExecutorDeserializeTime +","+ PeakExecutionMemory +","+ ExecutorCpuTime +","+
        ShuffleReadLocalBlocksFetched +","+ JVMGCTime +","+ ShuffleReadRemoteBytesReadToDisk +","+
        ShuffleReadRecordsRead +","+ DiskBytesSpilled +","+ ExecutorRunTime +","+ ShuffleReadRemoteBlocksFetched +","+
        Result

      ScalaWriter.Writer.Write(LineX, "Results.csv")
    })    
    ss.stop()
  }
}

我还没有完成,但经过更多修改后得到了更好的结果

我稍微理解了你的问题,根据我的理解,我正在回答。我希望你能进一步组织你的问题,我可能会详细回答你的问题

//define all dataframes globally
var df1: DataFrame = _
var df2: DataFrame = _      
var df3: DataFrame = _
// define main function
//initialize spark session
//creates a list of all files in a directory
def getListOfFiles(dir: String):List[File] = 
{
  val path = new File("/path/to/directory/")
  if (path.exists && path.isDirectory) 
  {
    path.listFiles.filter(_.isFile).toList
  } 
  else 
  {
    List[File]()
  }
}

val files = getListOfFiles("/path/to/directory/")
  val input = ""
  for (input <- files)
  {
    // code to extract log file data (I can help you further if you will explain your problem further)
   // load your log file data into a dataframe
 import spark.implicits._

    if(input == files(0))
    {
        df1 = Seq(
        (App Name.value, Submission Time.value, Completion Time.value, Accumulables metrics.value)
        ).toDF("App Name", "Submission Time", "Completion Time", "Accumulables metrics")
    } 
    else
    {    
        df2 = Seq(
        (App Name.value, Submission Time.value, Completion Time.value, Accumulables metrics.value)
        ).toDF("App Name", "Submission Time", "Completion Time", "Accumulables metrics")  
        df3 = trainingDF.union(df2)
        df1 = df3
    }
  }

  //  import dataframe to .csv file
  df1.coalesce(1).write
  .option("header", "true")
  .csv("path/to/directory/file_name.csv")
//全局定义所有数据帧
变量df1:数据帧=_
变量df2:DataFrame=\u
变量df3:数据帧=_
//定义主要功能
//初始化spark会话
//创建目录中所有文件的列表
def getListOfFiles(目录:字符串):列表[文件]=
{
val path=新文件(“/path/to/directory/”)
if(path.exists&&path.isDirectory)
{
path.listFiles.filter(u.isFile).toList
} 
其他的
{
列表[文件]()
}
}
val files=getListOfFiles(“/path/to/directory/”)
val input=“”

对于(输入欢迎使用stackoverflow!您的问题没有很好的定义,请展开问题陈述,并展示一个简短但完整的示例,说明您迄今为止所做的尝试。检查此示例以供参考:Hi Pardeep,您的
文件名.csv
不是…我在上遇到类似问题,解决方案是使用Log4j或PrintWriter。也许您可以这里使用Log4j表示一个解决方案。