mbutrovich commented on code in PR #4128:
URL: https://github.com/apache/datafusion-comet/pull/4128#discussion_r3155655379


##########
spark/src/test/scala/org/apache/comet/CometIcebergNativeSuite.scala:
##########
@@ -2764,4 +2769,82 @@ class CometIcebergNativeSuite extends CometTestBase with 
RESTCatalogHelper {
       }
     }
   }
+
+  test("task-level inputMetrics.bytesRead is populated for Iceberg native 
scan") {
+    assume(icebergAvailable, "Iceberg not available in classpath")
+
+    withTempIcebergDir { warehouseDir =>
+      withSQLConf(
+        "spark.sql.catalog.test_cat" -> 
"org.apache.iceberg.spark.SparkCatalog",
+        "spark.sql.catalog.test_cat.type" -> "hadoop",
+        "spark.sql.catalog.test_cat.warehouse" -> warehouseDir.getAbsolutePath,
+        CometConf.COMET_ENABLED.key -> "true",
+        CometConf.COMET_EXEC_ENABLED.key -> "true",
+        CometConf.COMET_ICEBERG_NATIVE_ENABLED.key -> "true") {
+
+        spark.sql("""
+          CREATE TABLE test_cat.db.task_metrics_test (
+            id INT,
+            value DOUBLE
+          ) USING iceberg
+        """)
+
+        spark
+          .range(10000)
+          .selectExpr("CAST(id AS INT)", "CAST(id * 1.5 AS DOUBLE) as value")
+          .coalesce(1)
+          .write
+          .format("iceberg")
+          .mode("append")
+          .saveAsTable("test_cat.db.task_metrics_test")
+
+        val bytesReadValues = mutable.ArrayBuffer.empty[Long]
+        val recordsReadValues = mutable.ArrayBuffer.empty[Long]
+
+        val listener = new SparkListener {
+          override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
+            val im = taskEnd.taskMetrics.inputMetrics
+            if (im.bytesRead > 0) {
+              bytesReadValues.synchronized {
+                bytesReadValues += im.bytesRead
+                recordsReadValues += im.recordsRead
+              }
+            }
+          }
+        }
+        spark.sparkContext.addSparkListener(listener)
+
+        try {
+          val df = spark.sql("SELECT * FROM test_cat.db.task_metrics_test")
+
+          val scanNodes = df.queryExecution.executedPlan
+            .collectLeaves()
+            .collect { case s: CometIcebergNativeScanExec => s }
+          assert(scanNodes.nonEmpty, "Expected CometIcebergNativeScanExec in 
plan")
+
+          df.collect()
+
+          // listenerBus.waitUntilEmpty() is package-private to 
org.apache.spark
+          Thread.sleep(1000)

Review Comment:
   > the same as `CometTaskMetricSuite`
   
   That's under `org.apache.spark.sql.comet` so it doesn't have this issue. 
I'll re-export it like @andygrove suggested.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to