This is an automated email from the ASF dual-hosted git repository. srowen pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push: new ace2364 [MINOR][TEST] Correct some unit test mistakes ace2364 is described below commit ace23642967eacbb2252d9f9fa447769de6f700c Author: liuxian <liu.xi...@zte.com.cn> AuthorDate: Sat Jan 19 08:54:55 2019 -0600 [MINOR][TEST] Correct some unit test mistakes ## What changes were proposed in this pull request? Correct some unit test mistakes. ## How was this patch tested? N/A Closes #23583 from 10110346/unused_symbol. Authored-by: liuxian <liu.xi...@zte.com.cn> Signed-off-by: Sean Owen <sean.o...@databricks.com> --- sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala | 2 +- sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala index fb8239e..ab82948 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala @@ -476,7 +476,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext { val keyValue1 = ds.groupByKey(t => (t._1, "key")).mapValues(t => (t._2, "value")) val agged1 = keyValue1.mapGroups { case (g, iter) => (g._1, iter.map(_._1).sum) } - checkDataset(agged, ("a", 30), ("b", 3), ("c", 1)) + checkDataset(agged1, ("a", 30), ("b", 3), ("c", 1)) } test("groupBy function, reduce") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala index 0b6d939..4f1ae06 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala @@ -170,7 +170,7 @@ class InsertSuite extends DataSourceTest with SharedSQLContext { // Writing the table to more part files. val rdd2 = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str$i"}"""), 10) - spark.read.json(rdd1.toDS()).createOrReplaceTempView("jt2") + spark.read.json(rdd2.toDS()).createOrReplaceTempView("jt2") sql( s""" |INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt2 --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org