Repository: spark Updated Branches: refs/heads/master 1f5ddf17e -> 3ae0cda83
[SPARK-4695][SQL] Get result using executeCollect Using ```executeCollect``` to collect the result, because executeCollect is a custom implementation of collect in spark sql which better than rdd's collect Author: wangfei <wangf...@huawei.com> Closes #3547 from scwf/executeCollect and squashes the following commits: a5ab68e [wangfei] Revert "adding debug info" a60d680 [wangfei] fix test failure 0db7ce8 [wangfei] adding debug info 184c594 [wangfei] using executeCollect instead collect Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/3ae0cda8 Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/3ae0cda8 Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/3ae0cda8 Branch: refs/heads/master Commit: 3ae0cda83c5106136e90d59c20e61db345a5085f Parents: 1f5ddf1 Author: wangfei <wangf...@huawei.com> Authored: Tue Dec 2 14:30:44 2014 -0800 Committer: Michael Armbrust <mich...@databricks.com> Committed: Tue Dec 2 14:30:44 2014 -0800 ---------------------------------------------------------------------- .../src/main/scala/org/apache/spark/sql/hive/HiveContext.scala | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/spark/blob/3ae0cda8/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala ---------------------------------------------------------------------- diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala index 304b9a7..34fc21e 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala @@ -377,7 +377,7 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) { command.executeCollect().map(_.head.toString) case other => - val result: Seq[Seq[Any]] = toRdd.map(_.copy()).collect().toSeq + val result: Seq[Seq[Any]] = other.executeCollect().toSeq // We need the types so we can output struct field names val types = analyzed.output.map(_.dataType) // Reformat to match hive tab delimited output. @@ -416,6 +416,8 @@ object HiveContext { case (bin: Array[Byte], BinaryType) => new String(bin, "UTF-8") case (decimal: Decimal, DecimalType()) => // Hive strips trailing zeros so use its toString HiveShim.createDecimal(decimal.toBigDecimal.underlying()).toString + case (decimal: BigDecimal, DecimalType()) => + HiveShim.createDecimal(decimal.underlying()).toString case (other, tpe) if primitiveTypes contains tpe => other.toString } --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org