Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/scala/19.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/apache-spark/6.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Scala 重写LogicalPlan以从聚合中下推udf_Scala_Apache Spark_Catalyst Optimizer - Fatal编程技术网

Scala 重写LogicalPlan以从聚合中下推udf

Scala 重写LogicalPlan以从聚合中下推udf,scala,apache-spark,catalyst-optimizer,Scala,Apache Spark,Catalyst Optimizer,我已经定义了一个UDF,它将输入值增加1,名为“inc”,这是我的UDF的代码 spark.udf.register(“inc”,(x:Long)=>x+1) 这是我的测试sql val df=spark.sql(“从数据中选择总和(inc(vals)) 解释(正确) df.show() 这是该sql的优化计划 == Optimized Logical Plan == Aggregate [sum(inc(vals#4L)) AS sum(inc(vals))#7L] +- LocalRel

我已经定义了一个UDF,它将输入值增加1,名为“inc”,这是我的UDF的代码

spark.udf.register(“inc”,(x:Long)=>x+1)
这是我的测试sql

val df=spark.sql(“从数据中选择总和(inc(vals))
解释(正确)
df.show()
这是该sql的优化计划

== Optimized Logical Plan ==
Aggregate [sum(inc(vals#4L)) AS sum(inc(vals))#7L]
+- LocalRelation [vals#4L]
我想重写计划,并从“sum”中提取“inc”,就像pythonudf一样。 这就是我想要的优化方案

Aggregate [sum(inc_val#6L) AS sum(inc(vals))#7L]
+- Project [inc(vals#4L) AS inc_val#6L]
   +- LocalRelation [vals#4L]
====== new agg ======
Aggregate [sum(udf0#9L) AS sum(inc(vals))#7L]
+- Project [inc(vals#4L) AS udf0#9L]
   +- LocalRelation [vals#4L]
我发现源代码文件“ExtractPythonUDFs.scala”提供了与PythonUDF类似的功能,但它插入了一个名为“ArrowEvalPython”的新节点,这是PythonUDF的逻辑计划

== Optimized Logical Plan ==
Aggregate [sum(pythonUDF0#7L) AS sum(inc(vals))#4L]
+- Project [pythonUDF0#7L]
   +- ArrowEvalPython [inc(vals#0L)], [pythonUDF0#7L], 200
      +- Repartition 10, true
         +- RelationV2[vals#0L] parquet file:/tmp/vals.parquet
我想插入的只是一个“项目节点”,我不想定义一个新节点


这是我的项目的测试代码

import org.apache.log4j.{Level,Logger}
导入org.apache.spark.sql.SparkSession
导入org.apache.spark.sql.catalyst.expressions.{Expression,NamedExpression,ScalaUDF}
导入org.apache.spark.sql.catalyst.plans.logical.{Aggregate,LogicalPlan}
导入org.apache.spark.sql.catalyst.rules.Rule
对象重写PlanTest{
案例类UdfRule(spark:SparkSession)扩展了规则[LogicalPlan]{
def collectUDFs(e:Expression):Seq[Expression]=e match{
案例自定义项:ScalaUDF=>Seq(自定义项)
case=>e.children.flatMap(collectUDFs)
}
覆盖def应用(计划:LogicalPlan):LogicalPlan=计划匹配{
案例agg@Aggregate(g,a,)如果(g.isEmpty&&a.length==1)=>
val udfs=agg.expressions.flatMap(collectUDFs)
println(“===============================”)
udfs.foreach(println)
val test=udfs(0).isInstanceOf[NamedPression]
println(s“将ScalaUDF转换为NamedExpression=${test}”)
println(“===============================”)
阿格
案例=>计划
}
}
def main(参数:数组[字符串]):单位={
Logger.getLogger(“org”).setLevel(Level.WARN)
val spark=火花会话
.builder()
.master(“本地[*]”)
.appName(“重写计划测试”)
.withExtensions(e=>e.injectOptimizerRule(UdfRule))
.getOrCreate()
val输入=顺序(100L、200L、300L)
导入spark.implicits_
input.toDF(“VAL”).createOrReplaceTempView(“数据”)
spark.udf.register(“inc”,(x:Long)=>x+1)
val df=spark.sql(“从数据中选择总和(inc(vals))
解释(正确)
df.show()
spark.stop()
}
}
我从
Aggregate
节点提取了
ScalaUDF

因为
Project
节点需要的参数是
Seq[NamedExpression]

case class Project(projectList: Seq[NamedExpression], child: LogicalPlan)
但是它无法将
ScalaUDF
转换为
NamedExpression

因此,我不知道如何构造
项目
节点

有人能给我一些建议吗


谢谢。

好的,我终于找到了回答这个问题的方法

虽然
ScalaUDF
不能强制转换为
NamedExpression
,但是
Alias
可以

因此,我从
ScalaUDF
创建
Alias
,然后构建
Project

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, ExpectsInputTypes, ExprId, Expression, NamedExpression, ScalaUDF}
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LocalRelation, LogicalPlan, Project, Subquery}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.types.{AbstractDataType, DataType}

import scala.collection.mutable

object RewritePlanTest {

  case class UdfRule(spark: SparkSession) extends Rule[LogicalPlan] {

    def collectUDFs(e: Expression): Seq[Expression] = e match {
      case udf: ScalaUDF => Seq(udf)
      case _ => e.children.flatMap(collectUDFs)
    }

    override def apply(plan: LogicalPlan): LogicalPlan = plan match {
      case agg@Aggregate(g, a, c) if g.isEmpty && a.length == 1 => {
        val udfs = agg.expressions.flatMap(collectUDFs)
        if (udfs.isEmpty) {
          agg
        } else {
          val alias_udf = for (i <- 0 until udfs.size) yield Alias(udfs(i), s"udf${i}")()
          val alias_set = mutable.HashMap[Expression, Attribute]()
          val proj = Project(alias_udf, c)
          alias_set ++= udfs.zip(proj.output)
          val new_agg = agg.withNewChildren(Seq(proj)).transformExpressionsUp {
            case udf: ScalaUDF if alias_set.contains(udf) => alias_set(udf)
          }
          println("====== new agg ======")
          println(new_agg)
          new_agg
        }
      }
      case _ => plan
    }
  }


  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)

    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("Rewrite plan test")
      .withExtensions(e => e.injectOptimizerRule(UdfRule))
      .getOrCreate()

    val input = Seq(100L, 200L, 300L)
    import spark.implicits._
    input.toDF("vals").createOrReplaceTempView("data")

    spark.udf.register("inc", (x: Long) => x + 1)

    val df = spark.sql("select sum(inc(vals)) from data where vals > 100")
    //    val plan = df.queryExecution.analyzed
    //    println(plan)
    df.explain(true)
    df.show()

    spark.stop()

  }
}