This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 5a3f41a  [SPARK-34976][SQL] Rename GroupingSet to BaseGroupingSets
5a3f41a is described below

commit 5a3f41a017bf0fb07cf242e5c6ba25fc231863c6
Author: Angerszhuuuu <angers....@gmail.com>
AuthorDate: Wed Apr 7 13:27:21 2021 +0000

    [SPARK-34976][SQL] Rename GroupingSet to BaseGroupingSets
    
    ### What changes were proposed in this pull request?
    Current trait `GroupingSet` is ambiguous, since `grouping set` in parser 
level means one set of a group.
    Rename this to `BaseGroupingSets` since cube/rollup is syntax sugar for 
grouping sets.`
    
    ### Why are the changes needed?
    Refactor class name
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Not need
    
    Closes #32073 from AngersZhuuuu/SPARK-34976.
    
    Authored-by: Angerszhuuuu <angers....@gmail.com>
    Signed-off-by: Wenchen Fan <wenc...@databricks.com>
---
 .../spark/sql/catalyst/analysis/Analyzer.scala     | 10 ++++-----
 .../spark/sql/catalyst/expressions/grouping.scala  | 25 ++++++++++++----------
 .../spark/sql/catalyst/parser/AstBuilder.scala     |  3 ++-
 .../analysis/ResolveGroupingAnalyticsSuite.scala   |  4 ++--
 4 files changed, 23 insertions(+), 19 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index bbb7662..f50c28a 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -599,7 +599,7 @@ class Analyzer(override val catalogManager: CatalogManager)
       val aggForResolving = h.child match {
         // For CUBE/ROLLUP expressions, to avoid resolving repeatedly, here we 
delete them from
         // groupingExpressions for condition resolving.
-        case a @ Aggregate(Seq(gs: GroupingSet), _, _) =>
+        case a @ Aggregate(Seq(gs: BaseGroupingSets), _, _) =>
           a.copy(groupingExpressions = gs.groupByExprs)
       }
       // Try resolving the condition of the filter as though it is in the 
aggregate clause
@@ -610,7 +610,7 @@ class Analyzer(override val catalogManager: CatalogManager)
       if (resolvedInfo.nonEmpty) {
         val (extraAggExprs, resolvedHavingCond) = resolvedInfo.get
         val newChild = h.child match {
-          case Aggregate(Seq(gs: GroupingSet), aggregateExpressions, child) =>
+          case Aggregate(Seq(gs: BaseGroupingSets), aggregateExpressions, 
child) =>
             constructAggregate(
               gs.selectedGroupByExprs, gs.groupByExprs,
               aggregateExpressions ++ extraAggExprs, child)
@@ -636,14 +636,14 @@ class Analyzer(override val catalogManager: 
CatalogManager)
     // CUBE/ROLLUP/GROUPING SETS. This also replace grouping()/grouping_id() 
in resolved
     // Filter/Sort.
     def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsDown {
-      case h @ UnresolvedHaving(_, agg @ Aggregate(Seq(gs: GroupingSet), 
aggregateExpressions, _))
-        if agg.childrenResolved && (gs.children ++ 
aggregateExpressions).forall(_.resolved) =>
+      case h @ UnresolvedHaving(_, agg @ Aggregate(Seq(gs: BaseGroupingSets), 
aggExprs, _))
+        if agg.childrenResolved && (gs.children ++ 
aggExprs).forall(_.resolved) =>
         tryResolveHavingCondition(h)
 
       case a if !a.childrenResolved => a // be sure all of the children are 
resolved.
 
       // Ensure group by expressions and aggregate expressions have been 
resolved.
-      case Aggregate(Seq(gs: GroupingSet), aggregateExpressions, child)
+      case Aggregate(Seq(gs: BaseGroupingSets), aggregateExpressions, child)
         if (gs.children ++ aggregateExpressions).forall(_.resolved) =>
         constructAggregate(gs.selectedGroupByExprs, gs.groupByExprs, 
aggregateExpressions, child)
 
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/grouping.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/grouping.scala
index 66cd140..bf28efa 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/grouping.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/grouping.scala
@@ -26,14 +26,14 @@ import org.apache.spark.sql.types._
 /**
  * A placeholder expression for cube/rollup, which will be replaced by analyzer
  */
-trait GroupingSet extends Expression with CodegenFallback {
+trait BaseGroupingSets extends Expression with CodegenFallback {
 
   def groupingSets: Seq[Seq[Expression]]
   def selectedGroupByExprs: Seq[Seq[Expression]]
 
   def groupByExprs: Seq[Expression] = {
     assert(children.forall(_.resolved),
-      "Cannot call GroupingSet.groupByExprs before the children expressions 
are all resolved.")
+      "Cannot call BaseGroupingSets.groupByExprs before the children 
expressions are all resolved.")
     children.foldLeft(Seq.empty[Expression]) { (result, currentExpr) =>
       // Only unique expressions are included in the group by expressions and 
is determined
       // based on their semantic equality. Example. grouping sets ((a * b), (b 
* a)) results
@@ -55,7 +55,7 @@ trait GroupingSet extends Expression with CodegenFallback {
   override def eval(input: InternalRow): Any = throw new 
UnsupportedOperationException
 }
 
-object GroupingSet {
+object BaseGroupingSets {
   /**
    * 'GROUP BY a, b, c WITH ROLLUP'
    * is equivalent to
@@ -106,34 +106,37 @@ object GroupingSet {
   }
 }
 
-case class Cube(groupingSetIndexes: Seq[Seq[Int]], children: Seq[Expression]) 
extends GroupingSet {
+case class Cube(
+    groupingSetIndexes: Seq[Seq[Int]],
+    children: Seq[Expression]) extends BaseGroupingSets {
   override def groupingSets: Seq[Seq[Expression]] = 
groupingSetIndexes.map(_.map(children))
-  override def selectedGroupByExprs: Seq[Seq[Expression]] = 
GroupingSet.cubeExprs(groupingSets)
+  override def selectedGroupByExprs: Seq[Seq[Expression]] = 
BaseGroupingSets.cubeExprs(groupingSets)
 }
 
 object Cube {
   def apply(groupingSets: Seq[Seq[Expression]]): Cube = {
-    Cube(GroupingSet.computeGroupingSetIndexes(groupingSets), 
groupingSets.flatten)
+    Cube(BaseGroupingSets.computeGroupingSetIndexes(groupingSets), 
groupingSets.flatten)
   }
 }
 
 case class Rollup(
     groupingSetIndexes: Seq[Seq[Int]],
-    children: Seq[Expression]) extends GroupingSet {
+    children: Seq[Expression]) extends BaseGroupingSets {
   override def groupingSets: Seq[Seq[Expression]] = 
groupingSetIndexes.map(_.map(children))
-  override def selectedGroupByExprs: Seq[Seq[Expression]] = 
GroupingSet.rollupExprs(groupingSets)
+  override def selectedGroupByExprs: Seq[Seq[Expression]] =
+    BaseGroupingSets.rollupExprs(groupingSets)
 }
 
 object Rollup {
   def apply(groupingSets: Seq[Seq[Expression]]): Rollup = {
-    Rollup(GroupingSet.computeGroupingSetIndexes(groupingSets), 
groupingSets.flatten)
+    Rollup(BaseGroupingSets.computeGroupingSetIndexes(groupingSets), 
groupingSets.flatten)
   }
 }
 
 case class GroupingSets(
     groupingSetIndexes: Seq[Seq[Int]],
     flatGroupingSets: Seq[Expression],
-    userGivenGroupByExprs: Seq[Expression]) extends GroupingSet {
+    userGivenGroupByExprs: Seq[Expression]) extends BaseGroupingSets {
   override def groupingSets: Seq[Seq[Expression]] = 
groupingSetIndexes.map(_.map(flatGroupingSets))
   override def selectedGroupByExprs: Seq[Seq[Expression]] = groupingSets
   // Includes the `userGivenGroupByExprs` in the children, which will be 
included in the final
@@ -145,7 +148,7 @@ object GroupingSets {
   def apply(
       groupingSets: Seq[Seq[Expression]],
       userGivenGroupByExprs: Seq[Expression]): GroupingSets = {
-    val groupingSetIndexes = 
GroupingSet.computeGroupingSetIndexes(groupingSets)
+    val groupingSetIndexes = 
BaseGroupingSets.computeGroupingSetIndexes(groupingSets)
     GroupingSets(groupingSetIndexes, groupingSets.flatten, 
userGivenGroupByExprs)
   }
 
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
index fad5966..e9052fd 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
@@ -966,7 +966,8 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with 
SQLConfHelper with Logg
               expression(groupByExpr.expression)
             }
           })
-      val (groupingSet, expressions) = 
groupByExpressions.partition(_.isInstanceOf[GroupingSet])
+      val (groupingSet, expressions) =
+        groupByExpressions.partition(_.isInstanceOf[BaseGroupingSets])
       if (expressions.nonEmpty && groupingSet.nonEmpty) {
         throw new ParseException("Partial CUBE/ROLLUP/GROUPING SETS like " +
           "`GROUP BY a, b, CUBE(a, b)` is not supported.",
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveGroupingAnalyticsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveGroupingAnalyticsSuite.scala
index 1622928..ae36ab9 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveGroupingAnalyticsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveGroupingAnalyticsSuite.scala
@@ -42,7 +42,7 @@ class ResolveGroupingAnalyticsSuite extends AnalysisTest {
 
   test("rollupExprs") {
     val testRollup = (exprs: Seq[Expression], rollup: Seq[Seq[Expression]]) => 
{
-      val result = GroupingSet.rollupExprs(exprs.map(Seq(_)))
+      val result = BaseGroupingSets.rollupExprs(exprs.map(Seq(_)))
       assert(result.sortBy(_.hashCode) == rollup.sortBy(_.hashCode))
     }
 
@@ -54,7 +54,7 @@ class ResolveGroupingAnalyticsSuite extends AnalysisTest {
 
   test("cubeExprs") {
     val testCube = (exprs: Seq[Expression], cube: Seq[Seq[Expression]]) => {
-      val result = GroupingSet.cubeExprs(exprs.map(Seq(_)))
+      val result = BaseGroupingSets.cubeExprs(exprs.map(Seq(_)))
       assert(result.sortBy(_.hashCode) == cube.sortBy(_.hashCode))
     }
 

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to