Github user setjet commented on a diff in the pull request: https://github.com/apache/spark/pull/18113#discussion_r155374041 --- Diff: sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala --- @@ -263,6 +262,25 @@ class DatasetAggregatorSuite extends QueryTest with SharedSQLContext { ("a", 4), ("b", 3)) } + test("typed aggregate: min, max") { + val ds = Seq("a" -> 1, "a" -> 3, "b" -> 4, "b" -> -4, "b" -> 0).toDS() + checkDataset( + ds.groupByKey(_._1).agg( + typed.min(_._2), typed.minLong(_._2), typed.max(_._2), typed.maxLong(_._2)), + ("a", Some(1.0), Some(1L), Some(3.0), Some(3L)), + ("b", Some(-4.0), Some(-4L), Some(4.0), Some(4L))) + } + + test("typed aggregate: empty") { + val empty = Seq.empty[(Double, Double)].toDS --- End diff -- That won't change anything unfortunately. The difference between the empty and the non-empty testcases is that the latter is doing a groupbykey. If this is done on an empty dataset, no Row is returned at all and therefore doesn't allow us to verify a None is returned.
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org