Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/scala/19.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/apache-spark/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Scala 如何在不改变顺序的情况下逐行读取数据帧?星星之火_Scala_Apache Spark_Dataframe_Apache Spark Sql - Fatal编程技术网

Scala 如何在不改变顺序的情况下逐行读取数据帧?星星之火

Scala 如何在不改变顺序的情况下逐行读取数据帧?星星之火,scala,apache-spark,dataframe,apache-spark-sql,Scala,Apache Spark,Dataframe,Apache Spark Sql,我有一个数据帧,它包含行的seq。我想一行一行地迭代,而不改变顺序 我已经试着用下面的代码 scala> val df = Seq( | (0,"Load","employeeview", "employee.empdetails", null ), | (1,"Query","employeecountview",null,"select count(*) from employeeview"), | (2,"store", "employeecountv

我有一个数据帧,它包含行的seq。我想一行一行地迭代,而不改变顺序

我已经试着用下面的代码

 scala> val df = Seq(
 |     (0,"Load","employeeview", "employee.empdetails", null ),
 |     (1,"Query","employeecountview",null,"select count(*) from employeeview"),
 |     (2,"store", "employeecountview",null,null)
 |   ).toDF("id", "Operation","ViewName","DiectoryName","Query")
df: org.apache.spark.sql.DataFrame = [id: int, Operation: string ... 3 more fields]

scala> df.show()
+---+---------+-----------------+-------------------+--------------------+
| id|Operation|         ViewName|       DiectoryName|               Query|
+---+---------+-----------------+-------------------+--------------------+
|  0|     Load|     employeeview|employee.empdetails|                null|
|  1|    Query|employeecountview|               null|select count(*) f...|
|  2|    store|employeecountview|               null|                null|
+---+---------+-----------------+-------------------+--------------------+

scala> val dfcount = df.count().toInt
dfcount: Int = 3

scala> for( a <- 0 to dfcount-1){
              // first Iteration I want  id =0   Operation="Load" ViewName="employeeview" DiectoryName="employee.empdetails" Query= null
                // second iteration I want  id=1  Operation="Query" ViewName="employeecountview"  DiectoryName="null" Query= "select count(*) from employeeview"
               // Third Iteration I want   id= 2  Operation= "store" ViewName="employeecountview"  DiectoryName="null"  Query= "null"
          //ignore below sample code 
         //  val Operation = get(Operation(i))                   
        //       if (Operation=="Load"){
                           // based on operation type i am calling appropriate function  and passing entire row as a parameter 
        //       } else if(Operation= "Query"){      
        //                
        //       } else if(Operation= "store"){ 

        //       }

      }
scala>val df=Seq(
|(0,“加载”、“employeeview”、“employee.empdetails”,null),
|(1,“查询”,“employeecountview”,null,“从employeeview中选择计数(*)),
|(2,“store”、“employeecountview”、null、null)
|).toDF(“id”、“操作”、“视图名”、“DieCoryName”、“查询”)
df:org.apache.spark.sql.DataFrame=[id:int,Operation:string…还有3个字段]
scala>df.show()
+---+---------+-----------------+-------------------+--------------------+
|id |操作|视图名称| DieCoryName |查询|
+---+---------+-----------------+-------------------+--------------------+
|0 |加载| employeeview | employee.empdetails |空|
|1 |查询| employeecountview |空|选择计数(*)f|
|2 | store | employeecountview | null | null|
+---+---------+-----------------+-------------------+--------------------+
scala>val dfcount=df.count().toInt
dfcount:Int=3
scala>对于(a请查看以下内容:

scala> val df = Seq(
     |     (0,"Load","employeeview", "employee.empdetails", null ),
     |     (1,"Query","employeecountview",null,"select count(*) from employeeview"),
     |     (2,"store", "employeecountview",null,null)
     |   ).toDF("id", "Operation","ViewName","DiectoryName","Query")
df: org.apache.spark.sql.DataFrame = [id: int, Operation: string ... 3 more fields]

scala> df.show()
+---+---------+-----------------+-------------------+--------------------+
| id|Operation|         ViewName|       DiectoryName|               Query|
+---+---------+-----------------+-------------------+--------------------+
|  0|     Load|     employeeview|employee.empdetails|                null|
|  1|    Query|employeecountview|               null|select count(*) f...|
|  2|    store|employeecountview|               null|                null|
+---+---------+-----------------+-------------------+--------------------+


scala> val dfcount = df.count().toInt
dfcount: Int = 3

scala> :paste
// Entering paste mode (ctrl-D to finish)

for( a <- 0 to dfcount-1){
val operation = df.filter(s"id=${a}").select("Operation").as[String].first

operation match {

case "Query" => println("matching Query") // or call a function here for Query()
case "Load" => println("matching Load") // or call a function here for Load()
case "store" => println("matching store") //
case x => println("matched " + x )

}

}

// Exiting paste mode, now interpreting.

matching Load
matching Query
matching store

scala>

这是我使用数据集的解决方案。这将提供类型安全和更干净的代码。但必须对性能进行基准测试。它应该不会有太大的变化

case class EmployeeOperations(id: Int, operation: String, viewName: String,DiectoryName: String, query: String)
 val data = Seq(
    EmployeeOperations(0, "Load", "employeeview", "employee.empdetails", ""),
    EmployeeOperations(1, "Query", "employeecountview", "", "select count(*) from employeeview"),
    EmployeeOperations(2, "store", "employeecountview", "", "")
  )
  val ds: Dataset[EmployeeOperations] = spark.createDataset(data)(Encoders.product[EmployeeOperations])
  printOperation(ds).show

  def printOperation(ds: Dataset[EmployeeOperations])={
    ds.map(x => x.operation match {
      case "Query" => println("matching Query"); "Query"
      case "Load" => println("matching Load"); "Load"
      case "store" => println("matching store"); "store"
      case _ => println("Found something else") ;"Nothing"
    }
    )
  }
为了测试,我只返回了一个字符串。您可以返回任何基元类型。 这将返回:

scala> printOperation(ds).show
matching Load
matching Query
matching store
+-----+
|value|
+-----+
| Load|
|Query|
|store|
+-----+
scala> printOperation(ds).show
matching Load
matching Query
matching store
+-----+
|value|
+-----+
| Load|
|Query|
|store|
+-----+