Repository: spark
Updated Branches:
  refs/heads/master 6cb8f836d -> 6d0bfb960


Small documentation and style fix.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/6d0bfb96
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/6d0bfb96
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/6d0bfb96

Branch: refs/heads/master
Commit: 6d0bfb9601b02749d673bbceea5c36e5bc1c3825
Parents: 6cb8f83
Author: Reynold Xin <r...@databricks.com>
Authored: Sat May 21 23:12:56 2016 -0700
Committer: Reynold Xin <r...@databricks.com>
Committed: Sat May 21 23:12:56 2016 -0700

----------------------------------------------------------------------
 .../sql/catalyst/plans/logical/Statistics.scala      |  3 ++-
 .../sql/execution/joins/BroadcastJoinSuite.scala     | 15 ++++++---------
 2 files changed, 8 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/6d0bfb96/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala
index 63f86ad..6e6cc69 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala
@@ -31,5 +31,6 @@ package org.apache.spark.sql.catalyst.plans.logical
  *
  * @param sizeInBytes Physical size in bytes. For leaf operators this defaults 
to 1, otherwise it
  *                    defaults to the product of children's `sizeInBytes`.
+ * @param isBroadcastable If true, output is small enough to be used in a 
broadcast join.
  */
-private[sql] case class Statistics(sizeInBytes: BigInt, isBroadcastable: 
Boolean = false)
+case class Statistics(sizeInBytes: BigInt, isBroadcastable: Boolean = false)

http://git-wip-us.apache.org/repos/asf/spark/blob/6d0bfb96/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/BroadcastJoinSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/BroadcastJoinSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/BroadcastJoinSuite.scala
index e681b88..be4bf5b 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/BroadcastJoinSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/BroadcastJoinSuite.scala
@@ -19,8 +19,6 @@ package org.apache.spark.sql.execution.joins
 
 import scala.reflect.ClassTag
 
-import org.scalatest.BeforeAndAfterAll
-
 import org.apache.spark.{AccumulatorSuite, SparkConf, SparkContext}
 import org.apache.spark.sql.{Dataset, QueryTest, Row, SparkSession}
 import org.apache.spark.sql.execution.exchange.EnsureRequirements
@@ -68,10 +66,11 @@ class BroadcastJoinSuite extends QueryTest with 
SQLTestUtils {
     }
   }
 
-  private def testBroadcastJoin[T: ClassTag](joinType: String,
-                                             forceBroadcast: Boolean = false): 
SparkPlan = {
+  private def testBroadcastJoin[T: ClassTag](
+      joinType: String,
+      forceBroadcast: Boolean = false): SparkPlan = {
     val df1 = spark.createDataFrame(Seq((1, "4"), (2, "2"))).toDF("key", 
"value")
-    var df2 = spark.createDataFrame(Seq((1, "1"), (2, "2"))).toDF("key", 
"value")
+    val df2 = spark.createDataFrame(Seq((1, "1"), (2, "2"))).toDF("key", 
"value")
 
     // Comparison at the end is for broadcast left semi join
     val joinExpression = df1("key") === df2("key") && df1("value") > 
df2("value")
@@ -80,11 +79,9 @@ class BroadcastJoinSuite extends QueryTest with SQLTestUtils 
{
     } else {
       df1.join(df2, joinExpression, joinType)
     }
-    val plan =
-      
EnsureRequirements(spark.sessionState.conf).apply(df3.queryExecution.sparkPlan)
+    val plan = 
EnsureRequirements(spark.sessionState.conf).apply(df3.queryExecution.sparkPlan)
     assert(plan.collect { case p: T => p }.size === 1)
-
-    return plan
+    plan
   }
 
   test("unsafe broadcast hash join updates peak execution memory") {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to