bhat-vinay commented on code in PR #10414: URL: https://github.com/apache/hudi/pull/10414#discussion_r1437361618
########## hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/TestHoodieTableValuedFunction.scala: ########## @@ -192,6 +192,69 @@ class TestHoodieTableValuedFunction extends HoodieSparkSqlTestBase { } } + test(s"Test hudi_filesystem_view") { + if (HoodieSparkUtils.gteqSpark3_2) { + withTempDir { tmp => + Seq( + ("cow", true), + ("mor", true), + ("cow", false), + ("mor", false) + ).foreach { parameters => + val tableType = parameters._1 + val isTableId = parameters._2 + + val tableName = generateTableName + val tablePath = s"${tmp.getCanonicalPath}/$tableName" + val identifier = if (isTableId) tableName else tablePath + spark.sql("set hoodie.sql.insert.mode = non-strict") + + spark.sql( + s""" + |create table $tableName ( + | id int, + | name string, + | price double + |) using hudi + |partitioned by (price) + |tblproperties ( + | type = '$tableType', + | primaryKey = 'id' + |) + |location '$tablePath' + |""".stripMargin + ) + + spark.sql( + s""" + | insert into $tableName + | values (1, 'a1', 10.0), (2, 'a2', 20.0), (3, 'a3', 30.0) + | """.stripMargin + ) + spark.sql( + s""" + | insert into $tableName + | values (4, 'a4', 10.0), (5, 'a5', 20.0), (6, 'a6', 30.0) + | """.stripMargin + ) + val result1DF = spark.sql(s"select * from hudi_filesystem_view('$identifier', 'price*')") + result1DF.show(false) + val result1Array = result1DF.select( + col("Partition_Path") + ).orderBy("Partition_Path").take(10) + checkAnswer(result1Array)( + Seq("price=10.0"), Review Comment: The FileSystemView also shows the partition path. The test table is partitioned on 'price' column and hence the partition directories (under base path) are named as `price=10.0` etc. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@hudi.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org