Github user gatorsmile commented on a diff in the pull request:

    https://github.com/apache/spark/pull/19083#discussion_r142020457
  
    --- Diff: 
sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala 
---
    @@ -95,50 +96,62 @@ class DataFrameTimeWindowingSuite extends QueryTest 
with SharedSQLContext with B
       }
     
       test("sliding window grouping") {
    -    val df = Seq(
    -      ("2016-03-27 19:39:34", 1, "a"),
    -      ("2016-03-27 19:39:56", 2, "a"),
    -      ("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
    -
    -    checkAnswer(
    -      df.groupBy(window($"time", "10 seconds", "3 seconds", "0 second"))
    -        .agg(count("*").as("counts"))
    -        .orderBy($"window.start".asc)
    -        .select($"window.start".cast("string"), 
$"window.end".cast("string"), $"counts"),
    -      // 2016-03-27 19:39:27 UTC -> 4 bins
    -      // 2016-03-27 19:39:34 UTC -> 3 bins
    -      // 2016-03-27 19:39:56 UTC -> 3 bins
    -      Seq(
    -        Row("2016-03-27 19:39:18", "2016-03-27 19:39:28", 1),
    -        Row("2016-03-27 19:39:21", "2016-03-27 19:39:31", 1),
    -        Row("2016-03-27 19:39:24", "2016-03-27 19:39:34", 1),
    -        Row("2016-03-27 19:39:27", "2016-03-27 19:39:37", 2),
    -        Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1),
    -        Row("2016-03-27 19:39:33", "2016-03-27 19:39:43", 1),
    -        Row("2016-03-27 19:39:48", "2016-03-27 19:39:58", 1),
    -        Row("2016-03-27 19:39:51", "2016-03-27 19:40:01", 1),
    -        Row("2016-03-27 19:39:54", "2016-03-27 19:40:04", 1))
    -    )
    -  }
    -
    -  test("sliding window projection") {
    -    val df = Seq(
    +    // In SPARK-21871, we added code to check the actual bytecode size of 
gen'd methods. If the size
    +    // goes over `hugeMethodLimit`, Spark fails to compile the methods and 
the execution also fails
    +    // in a test mode. So, we explicitly turn off whole-stage codegen here.
    +    // This guard can be removed if this issue fixed.
    +    withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
    +      val df = Seq(
             ("2016-03-27 19:39:34", 1, "a"),
             ("2016-03-27 19:39:56", 2, "a"),
             ("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
    -      .select(window($"time", "10 seconds", "3 seconds", "0 second"), 
$"value")
    -      .orderBy($"window.start".asc, $"value".desc).select("value")
     
    -    val expands = 
df.queryExecution.optimizedPlan.find(_.isInstanceOf[Expand])
    -    assert(expands.nonEmpty, "Sliding windows require expand")
    +      checkAnswer(
    +        df.groupBy(window($"time", "10 seconds", "3 seconds", "0 second"))
    +          .agg(count("*").as("counts"))
    +          .orderBy($"window.start".asc)
    +          .select($"window.start".cast("string"), 
$"window.end".cast("string"), $"counts"),
    +        // 2016-03-27 19:39:27 UTC -> 4 bins
    +        // 2016-03-27 19:39:34 UTC -> 3 bins
    +        // 2016-03-27 19:39:56 UTC -> 3 bins
    +        Seq(
    +          Row("2016-03-27 19:39:18", "2016-03-27 19:39:28", 1),
    +          Row("2016-03-27 19:39:21", "2016-03-27 19:39:31", 1),
    +          Row("2016-03-27 19:39:24", "2016-03-27 19:39:34", 1),
    +          Row("2016-03-27 19:39:27", "2016-03-27 19:39:37", 2),
    +          Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1),
    +          Row("2016-03-27 19:39:33", "2016-03-27 19:39:43", 1),
    +          Row("2016-03-27 19:39:48", "2016-03-27 19:39:58", 1),
    +          Row("2016-03-27 19:39:51", "2016-03-27 19:40:01", 1),
    +          Row("2016-03-27 19:39:54", "2016-03-27 19:40:04", 1))
    +      )
    +    }
    +  }
     
    -    checkAnswer(
    -      df,
    -      // 2016-03-27 19:39:27 UTC -> 4 bins
    -      // 2016-03-27 19:39:34 UTC -> 3 bins
    -      // 2016-03-27 19:39:56 UTC -> 3 bins
    -      Seq(Row(4), Row(4), Row(4), Row(4), Row(1), Row(1), Row(1), Row(2), 
Row(2), Row(2))
    -    )
    +  test("sliding window projection") {
    +    // In SPARK-21871, we added code to check the actual bytecode size of 
gen'd methods. If the size
    +    // goes over `hugeMethodLimit`, Spark fails to compile the methods and 
the execution also fails
    +    // in a test mode. So, we explicitly turn off whole-stage codegen here.
    +    // This guard can be removed if this issue fixed.
    +    withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
    --- End diff --
    
    The same here. 


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to