Repository: spark
Updated Branches:
  refs/heads/master 5c78be7a5 -> 9baac56cc


Minor fixes for commit https://github.com/apache/spark/pull/4592.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/9baac56c
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/9baac56c
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/9baac56c

Branch: refs/heads/master
Commit: 9baac56ccd57d3890a9b6439d4e13bbe9381822b
Parents: 5c78be7
Author: Reynold Xin <r...@databricks.com>
Authored: Mon Feb 16 10:09:55 2015 -0800
Committer: Reynold Xin <r...@databricks.com>
Committed: Mon Feb 16 10:09:55 2015 -0800

----------------------------------------------------------------------
 .../src/main/scala/org/apache/spark/sql/DataFrameImpl.scala | 6 +++---
 .../src/main/scala/org/apache/spark/sql/GroupedData.scala   | 9 ++++-----
 2 files changed, 7 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/9baac56c/sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala
index 9eb0c13..500e3c9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameImpl.scala
@@ -83,17 +83,17 @@ private[sql] class DataFrameImpl protected[sql](
 
   protected[sql] def resolve(colName: String): NamedExpression = {
     queryExecution.analyzed.resolve(colName, 
sqlContext.analyzer.resolver).getOrElse {
-      throw new RuntimeException(
+      throw new AnalysisException(
         s"""Cannot resolve column name "$colName" among 
(${schema.fieldNames.mkString(", ")})""")
     }
   }
 
-  protected[sql] def numericColumns(): Seq[Expression] = {
+  protected[sql] def numericColumns: Seq[Expression] = {
     schema.fields.filter(_.dataType.isInstanceOf[NumericType]).map { n =>
       queryExecution.analyzed.resolve(n.name, sqlContext.analyzer.resolver).get
     }
   }
- 
+
   override def toDF(colNames: String*): DataFrame = {
     require(schema.size == colNames.size,
       "The number of columns doesn't match.\n" +

http://git-wip-us.apache.org/repos/asf/spark/blob/9baac56c/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
index a5a677b..2ecf086 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
@@ -17,8 +17,8 @@
 
 package org.apache.spark.sql
 
-import scala.language.implicitConversions
 import scala.collection.JavaConversions._
+import scala.language.implicitConversions
 
 import org.apache.spark.sql.catalyst.analysis.Star
 import org.apache.spark.sql.catalyst.expressions._
@@ -26,7 +26,6 @@ import org.apache.spark.sql.catalyst.plans.logical.Aggregate
 import org.apache.spark.sql.types.NumericType
 
 
-
 /**
  * A set of methods for aggregations on a [[DataFrame]], created by 
[[DataFrame.groupBy]].
  */
@@ -48,13 +47,13 @@ class GroupedData protected[sql](df: DataFrameImpl, 
groupingExprs: Seq[Expressio
       // No columns specified. Use all numeric columns.
       df.numericColumns
     } else {
-      // Make sure all specified columns are numeric
+      // Make sure all specified columns are numeric.
       colNames.map { colName =>
         val namedExpr = df.resolve(colName)
         if (!namedExpr.dataType.isInstanceOf[NumericType]) {
           throw new AnalysisException(
             s""""$colName" is not a numeric column. """ +
-            "Aggregation function can only be performed on a numeric column.")
+            "Aggregation function can only be applied on a numeric column.")
         }
         namedExpr
       }
@@ -64,7 +63,7 @@ class GroupedData protected[sql](df: DataFrameImpl, 
groupingExprs: Seq[Expressio
       Alias(a, a.toString)()
     }
   }
- 
+
   private[this] def strToExpr(expr: String): (Expression => Expression) = {
     expr.toLowerCase match {
       case "avg" | "average" | "mean" => Average


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to