This is an automated email from the ASF dual-hosted git repository.

pwason pushed a commit to branch release-0.14.0
in repository https://gitbox.apache.org/repos/asf/hudi.git

commit 2c9024e4fad3254424874889aaffb9523d310423
Author: Jon Vexler <jbvex...@gmail.com>
AuthorDate: Tue Aug 15 12:15:07 2023 -0700

    [HUDI-6673] Fix Incremental Query Syntax - Spark SQL Core Flow Test (#9410)
    
    Co-authored-by: Jonathan Vexler <=>
---
 .../test/scala/org/apache/hudi/functional/TestSparkSqlCoreFlow.scala  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestSparkSqlCoreFlow.scala
 
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestSparkSqlCoreFlow.scala
index fa883cd3eb2..daf10956b69 100644
--- 
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestSparkSqlCoreFlow.scala
+++ 
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestSparkSqlCoreFlow.scala
@@ -125,7 +125,7 @@ class TestSparkSqlCoreFlow extends HoodieSparkSqlTestBase {
     // we have 2 commits, try pulling the first commit (which is not the 
latest)
     //HUDI-5266
     val firstCommit = listCommitsSince(fs, tableBasePath, "000").get(0)
-    val hoodieIncViewDf1 = spark.sql(s"select * from 
hudi_table_changes('$tableName', 'earliest', '$firstCommit')")
+    val hoodieIncViewDf1 = spark.sql(s"select * from 
hudi_table_changes('$tableName', 'latest_state', 'earliest', '$firstCommit')")
 
     assertEquals(100, hoodieIncViewDf1.count()) // 100 initial inserts must be 
pulled
     var countsPerCommit = 
hoodieIncViewDf1.groupBy("_hoodie_commit_time").count().collect()
@@ -137,7 +137,7 @@ class TestSparkSqlCoreFlow extends HoodieSparkSqlTestBase {
 
     //another incremental query with commit2 and commit3
     //HUDI-5266
-    val hoodieIncViewDf2 = spark.sql(s"select * from 
hudi_table_changes('$tableName', '$commitInstantTime2', '$commitInstantTime3')")
+    val hoodieIncViewDf2 = spark.sql(s"select * from 
hudi_table_changes('$tableName', 'latest_state', '$commitInstantTime2', 
'$commitInstantTime3')")
 
     assertEquals(uniqueKeyCnt2, hoodieIncViewDf2.count()) // 60 records must 
be pulled
     countsPerCommit = 
hoodieIncViewDf2.groupBy("_hoodie_commit_time").count().collect()

Reply via email to