Repository: spark
Updated Branches:
  refs/heads/branch-2.0 e69639f43 -> f52a95248


[SPARK-15597][SQL] Add SparkSession.emptyDataset

## What changes were proposed in this pull request?
This patch adds a new function emptyDataset to SparkSession, for creating an 
empty dataset.

## How was this patch tested?
Added a test case.

Author: Reynold Xin <r...@databricks.com>

Closes #13344 from rxin/SPARK-15597.

(cherry picked from commit a52e6813392ba4bdb1b818694b7ced8f6caa6a2b)
Signed-off-by: Andrew Or <and...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/f52a9524
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/f52a9524
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/f52a9524

Branch: refs/heads/branch-2.0
Commit: f52a9524865b8c56058a65b29a1aaacffb709f69
Parents: e69639f
Author: Reynold Xin <r...@databricks.com>
Authored: Fri May 27 11:13:09 2016 -0700
Committer: Andrew Or <and...@databricks.com>
Committed: Fri May 27 11:13:17 2016 -0700

----------------------------------------------------------------------
 .../main/scala/org/apache/spark/sql/SparkSession.scala  | 12 ++++++++++++
 .../test/scala/org/apache/spark/sql/DatasetSuite.scala  |  6 ++++++
 2 files changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/f52a9524/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
index aa60048..c9276cf 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
@@ -224,6 +224,18 @@ class SparkSession private(
 
   /**
    * :: Experimental ::
+   * Creates a new [[Dataset]] of type T containing zero elements.
+   *
+   * @return 2.0.0
+   */
+  @Experimental
+  def emptyDataset[T: Encoder]: Dataset[T] = {
+    val encoder = implicitly[Encoder[T]]
+    new Dataset(self, LocalRelation(encoder.schema.toAttributes), encoder)
+  }
+
+  /**
+   * :: Experimental ::
    * Creates a [[DataFrame]] from an RDD of Product (e.g. case classes, 
tuples).
    *
    * @group dataframes

http://git-wip-us.apache.org/repos/asf/spark/blob/f52a9524/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
index 2a65916..e395007 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
@@ -46,6 +46,12 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
       1, 1, 1)
   }
 
+  test("emptyDataset") {
+    val ds = spark.emptyDataset[Int]
+    assert(ds.count() == 0L)
+    assert(ds.collect() sameElements Array.empty[Int])
+  }
+
   test("range") {
     assert(spark.range(10).map(_ + 1).reduce(_ + _) == 55)
     assert(spark.range(10).map{ case i: java.lang.Long => i + 1 }.reduce(_ + 
_) == 55)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to