Repository: spark
Updated Branches:
  refs/heads/master 91fed8e9c -> 020ff8cd3


[SPARK-13751] [SQL] generate better code for Filter

## What changes were proposed in this pull request?

This PR improve the codegen of Filter by:

1. filter out the rows early if it have null value in it that will cause the 
condition result in null or false. After this, we could simplify the condition, 
because the input are not nullable anymore.

2. Split the condition as conjunctive predicates, then check them one by one.

Here is a piece of generated code for Filter in TPCDS Q55:
```java
/* 109 */       /*** CONSUME: Filter ((((isnotnull(d_moy#149) && 
isnotnull(d_year#147)) && (d_moy#149 = 11)) && (d_year#147 = 1999)) && 
isnotnull(d_date_sk#141)) */
/* 110 */       /* input[0, int] */
/* 111 */       boolean project_isNull2 = rdd_row.isNullAt(0);
/* 112 */       int project_value2 = project_isNull2 ? -1 : (rdd_row.getInt(0));
/* 113 */       /* input[1, int] */
/* 114 */       boolean project_isNull3 = rdd_row.isNullAt(1);
/* 115 */       int project_value3 = project_isNull3 ? -1 : (rdd_row.getInt(1));
/* 116 */       /* input[2, int] */
/* 117 */       boolean project_isNull4 = rdd_row.isNullAt(2);
/* 118 */       int project_value4 = project_isNull4 ? -1 : (rdd_row.getInt(2));
/* 119 */
/* 120 */       if (project_isNull3) continue;
/* 121 */       if (project_isNull4) continue;
/* 122 */       if (project_isNull2) continue;
/* 123 */
/* 124 */       /* (input[1, int] = 11) */
/* 125 */       boolean filter_value6 = false;
/* 126 */       filter_value6 = project_value3 == 11;
/* 127 */       if (!filter_value6) continue;
/* 128 */
/* 129 */       /* (input[2, int] = 1999) */
/* 130 */       boolean filter_value9 = false;
/* 131 */       filter_value9 = project_value4 == 1999;
/* 132 */       if (!filter_value9) continue;
/* 133 */
/* 134 */       filter_metricValue1.add(1);
/* 135 */
/* 136 */       /*** CONSUME: Project [d_date_sk#141] */
/* 137 */
/* 138 */       project_rowWriter1.write(0, project_value2);
/* 139 */       append(project_result1.copy());
```

## How was this patch tested?

Existing tests.

Author: Davies Liu <dav...@databricks.com>

Closes #11585 from davies/gen_filter.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/020ff8cd
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/020ff8cd
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/020ff8cd

Branch: refs/heads/master
Commit: 020ff8cd34b74de31e878082b8e18005f61f1f77
Parents: 91fed8e
Author: Davies Liu <dav...@databricks.com>
Authored: Thu Mar 10 16:40:16 2016 -0800
Committer: Davies Liu <davies....@gmail.com>
Committed: Thu Mar 10 16:40:16 2016 -0800

----------------------------------------------------------------------
 .../spark/sql/execution/WholeStageCodegen.scala |  2 +-
 .../spark/sql/execution/basicOperators.scala    | 65 ++++++++++++++++----
 .../joins/BroadcastNestedLoopJoin.scala         |  4 +-
 3 files changed, 57 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/020ff8cd/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
index e8e42d7..52c2971 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegen.scala
@@ -334,7 +334,7 @@ case class WholeStageCodegen(child: SparkPlan) extends 
UnaryNode with CodegenSup
 
     // try to compile, helpful for debug
     val cleanedSource = CodeFormatter.stripExtraNewLines(source)
-    // println(s"${CodeFormatter.format(cleanedSource)}")
+    logDebug(s"${CodeFormatter.format(cleanedSource)}")
     CodeGenerator.compile(cleanedSource)
 
     val rdds = child.asInstanceOf[CodegenSupport].upstreams()

http://git-wip-us.apache.org/repos/asf/spark/blob/020ff8cd/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
index 6ebbc8b..6e2a5aa 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
@@ -74,8 +74,27 @@ case class Project(projectList: Seq[NamedExpression], child: 
SparkPlan)
 }
 
 
-case class Filter(condition: Expression, child: SparkPlan) extends UnaryNode 
with CodegenSupport {
-  override def output: Seq[Attribute] = child.output
+case class Filter(condition: Expression, child: SparkPlan)
+  extends UnaryNode with CodegenSupport with PredicateHelper {
+
+  // Split out all the IsNotNulls from condition.
+  private val (notNullPreds, otherPreds) = 
splitConjunctivePredicates(condition).partition {
+    case IsNotNull(a) if child.output.contains(a) => true
+    case _ => false
+  }
+
+  // The columns that will filtered out by `IsNotNull` could be considered as 
not nullable.
+  private val notNullAttributes = notNullPreds.flatMap(_.references)
+
+  override def output: Seq[Attribute] = {
+    child.output.map { a =>
+      if (a.nullable && notNullAttributes.contains(a)) {
+        a.withNullability(false)
+      } else {
+        a
+      }
+    }
+  }
 
   private[sql] override lazy val metrics = Map(
     "numOutputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of 
output rows"))
@@ -90,20 +109,42 @@ case class Filter(condition: Expression, child: SparkPlan) 
extends UnaryNode wit
 
   override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: 
String): String = {
     val numOutput = metricTerm(ctx, "numOutputRows")
-    val expr = ExpressionCanonicalizer.execute(
-      BindReferences.bindReference(condition, child.output))
+
+    // filter out the nulls
+    val filterOutNull = notNullAttributes.map { a =>
+      val idx = child.output.indexOf(a)
+      s"if (${input(idx).isNull}) continue;"
+    }.mkString("\n")
+
     ctx.currentVars = input
-    val eval = expr.gen(ctx)
-    val nullCheck = if (expr.nullable) {
-      s"!${eval.isNull} &&"
-    } else {
-      s""
+    val predicates = otherPreds.map { e =>
+      val bound = ExpressionCanonicalizer.execute(
+        BindReferences.bindReference(e, output))
+      val ev = bound.gen(ctx)
+      val nullCheck = if (bound.nullable) {
+        s"${ev.isNull} || "
+      } else {
+        s""
+      }
+      s"""
+         |${ev.code}
+         |if (${nullCheck}!${ev.value}) continue;
+       """.stripMargin
+    }.mkString("\n")
+
+    // Reset the isNull to false for the not-null columns, then the followed 
operators could
+    // generate better code (remove dead branches).
+    val resultVars = input.zipWithIndex.map { case (ev, i) =>
+      if (notNullAttributes.contains(child.output(i))) {
+        ev.isNull = "false"
+      }
+      ev
     }
     s"""
-       |${eval.code}
-       |if (!($nullCheck ${eval.value})) continue;
+       |$filterOutNull
+       |$predicates
        |$numOutput.add(1);
-       |${consume(ctx, ctx.currentVars)}
+       |${consume(ctx, resultVars)}
      """.stripMargin
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/020ff8cd/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoin.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoin.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoin.scala
index d83486d..4143e94 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoin.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoin.scala
@@ -55,7 +55,9 @@ case class BroadcastNestedLoopJoin(
       UnsafeProjection.create(output, output)
     } else {
       // Always put the stream side on left to simplify implementation
-      UnsafeProjection.create(output, streamed.output ++ broadcast.output)
+      // both of left and right side could be null
+      UnsafeProjection.create(
+        output, (streamed.output ++ 
broadcast.output).map(_.withNullability(true)))
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to