This is an automated email from the ASF dual-hosted git repository.

changchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git


The following commit(s) were added to refs/heads/main by this push:
     new 103a668b61 [MINOR] Refactor Spark 40 Test suite (#11344)
103a668b61 is described below

commit 103a668b61e096fd1f9b78329f21e0156eb979d0
Author: Chang chen <[email protected]>
AuthorDate: Wed Dec 31 14:02:25 2025 +0800

    [MINOR] Refactor Spark 40 Test suite (#11344)
    
    * [Refactor] Rename GlutenCastSuite to GlutenCastWithAnsiOffSuite and 
update test settings to use the new suite
    
    * [Refactor] Add GlutenDataSourceV2SQLSuite classes for V1 and V2 filter 
testing
    
    Remove  GlutenDataSourceV2SQLSuiteV1Filter.scala and 
GlutenDataSourceV2SQLSuiteV2Filter.scala
    
    * [Refactor] Rename FallbackStrategiesSuite to 
GlutenFallbackStrategiesSuite and move to gluten package
    
    * [Refactor] Consolidate GlutenDeleteFromTableSuite into 
GlutenGroupBasedDeleteFromTableSuite for cleaner structure
    
    * [Refactor] Remove ParquetReadBenchmark as it is no longer necessary
    
    * [Refactor] Adjust import structure and package declaration for 
GlutenValidateRequirementsSuite
---
 .../utils/clickhouse/ClickHouseTestSettings.scala  |   9 +-
 .../gluten/utils/velox/VeloxTestSettings.scala     |   9 +-
 ...uite.scala => GlutenCastWithAnsiOffSuite.scala} |   2 +-
 ...lter.scala => GlutenDataSourceV2SQLSuite.scala} |   4 +
 .../GlutenDataSourceV2SQLSuiteV1Filter.scala       |  23 ---
 .../GlutenGroupBasedDeleteFromTableSuite.scala     |   4 +
 .../benchmarks/ParquetReadBenchmark.scala          | 227 ---------------------
 .../exchange/GlutenValidateRequirementsSuite.scala |  24 ---
 .../GlutenValidateRequirementsSuite.scala}         |   6 +-
 .../GlutenFallbackStrategiesSuite.scala}           |   9 +-
 10 files changed, 25 insertions(+), 292 deletions(-)

diff --git 
a/gluten-ut/spark40/src/test/scala/org/apache/gluten/utils/clickhouse/ClickHouseTestSettings.scala
 
b/gluten-ut/spark40/src/test/scala/org/apache/gluten/utils/clickhouse/ClickHouseTestSettings.scala
index 38b23262fb..abccf9fe91 100644
--- 
a/gluten-ut/spark40/src/test/scala/org/apache/gluten/utils/clickhouse/ClickHouseTestSettings.scala
+++ 
b/gluten-ut/spark40/src/test/scala/org/apache/gluten/utils/clickhouse/ClickHouseTestSettings.scala
@@ -27,16 +27,15 @@ import 
org.apache.spark.sql.execution.adaptive.clickhouse.ClickHouseAdaptiveQuer
 import org.apache.spark.sql.execution.datasources._
 import 
org.apache.spark.sql.execution.datasources.binaryfile.GlutenBinaryFileFormatSuite
 import 
org.apache.spark.sql.execution.datasources.csv.{GlutenCSVLegacyTimeParserSuite, 
GlutenCSVv1Suite, GlutenCSVv2Suite}
-import 
org.apache.spark.sql.execution.datasources.exchange.GlutenValidateRequirementsSuite
 import 
org.apache.spark.sql.execution.datasources.json.{GlutenJsonLegacyTimeParserSuite,
 GlutenJsonV1Suite, GlutenJsonV2Suite}
 import org.apache.spark.sql.execution.datasources.orc._
 import org.apache.spark.sql.execution.datasources.parquet._
 import org.apache.spark.sql.execution.datasources.text.{GlutenTextV1Suite, 
GlutenTextV2Suite}
 import 
org.apache.spark.sql.execution.datasources.v2.{GlutenDataSourceV2StrategySuite, 
GlutenFileTableSuite, GlutenV2PredicateSuite}
-import org.apache.spark.sql.execution.exchange.GlutenEnsureRequirementsSuite
+import org.apache.spark.sql.execution.exchange.{GlutenEnsureRequirementsSuite, 
GlutenValidateRequirementsSuite}
 import org.apache.spark.sql.execution.joins._
 import 
org.apache.spark.sql.extension.{GlutenCollapseProjectExecTransformerSuite, 
GlutenCustomerExtensionSuite, GlutenSessionExtensionSuite}
-import org.apache.spark.sql.gluten.GlutenFallbackSuite
+import org.apache.spark.sql.gluten.{GlutenFallbackStrategiesSuite, 
GlutenFallbackSuite}
 import org.apache.spark.sql.hive.execution.GlutenHiveSQLQueryCHSuite
 import org.apache.spark.sql.sources._
 
@@ -75,7 +74,7 @@ class ClickHouseTestSettings extends BackendTestSettings {
       "No deadlock in UI update",
       "SPARK-35455: Unify empty relation optimization between normal and AQE 
optimizer - multi join"
     )
-  enableSuite[FallbackStrategiesSuite]
+  enableSuite[GlutenFallbackStrategiesSuite]
   enableSuite[GlutenApproxCountDistinctForIntervalsQuerySuite]
     .excludeCH("test ApproxCountDistinctForIntervals with large number of 
endpoints")
   enableSuite[GlutenApproximatePercentileQuerySuite]
@@ -352,7 +351,7 @@ class ClickHouseTestSettings extends BackendTestSettings {
     // Extra ColumnarToRow is needed to transform vanilla columnar data to 
gluten columnar data.
     .includeCH("SPARK-37369: Avoid redundant ColumnarToRow transition on 
InMemoryTableScan")
     .excludeCH("Gluten - InMemoryRelation statistics")
-  enableSuite[GlutenCastSuite]
+  enableSuite[GlutenCastWithAnsiOffSuite]
     .exclude(
       "Process Infinity, -Infinity, NaN in case insensitive manner" // +inf 
not supported in folly.
     )
diff --git 
a/gluten-ut/spark40/src/test/scala/org/apache/gluten/utils/velox/VeloxTestSettings.scala
 
b/gluten-ut/spark40/src/test/scala/org/apache/gluten/utils/velox/VeloxTestSettings.scala
index 84ee599738..c33a4027e0 100644
--- 
a/gluten-ut/spark40/src/test/scala/org/apache/gluten/utils/velox/VeloxTestSettings.scala
+++ 
b/gluten-ut/spark40/src/test/scala/org/apache/gluten/utils/velox/VeloxTestSettings.scala
@@ -27,17 +27,16 @@ import org.apache.spark.sql.execution._
 import 
org.apache.spark.sql.execution.adaptive.velox.VeloxAdaptiveQueryExecSuite
 import org.apache.spark.sql.execution.datasources._
 import 
org.apache.spark.sql.execution.datasources.binaryfile.GlutenBinaryFileFormatSuite
-import 
org.apache.spark.sql.execution.datasources.exchange.GlutenValidateRequirementsSuite
 import 
org.apache.spark.sql.execution.datasources.json.{GlutenJsonLegacyTimeParserSuite,
 GlutenJsonV1Suite, GlutenJsonV2Suite}
 import org.apache.spark.sql.execution.datasources.orc._
 import org.apache.spark.sql.execution.datasources.parquet._
 import org.apache.spark.sql.execution.datasources.text.{GlutenTextV1Suite, 
GlutenTextV2Suite}
 import 
org.apache.spark.sql.execution.datasources.v2.{GlutenDataSourceV2StrategySuite, 
GlutenFileTableSuite, GlutenV2PredicateSuite}
-import org.apache.spark.sql.execution.exchange.GlutenEnsureRequirementsSuite
+import org.apache.spark.sql.execution.exchange.{GlutenEnsureRequirementsSuite, 
GlutenValidateRequirementsSuite}
 import org.apache.spark.sql.execution.joins._
 import org.apache.spark.sql.execution.python._
 import 
org.apache.spark.sql.extension.{GlutenCollapseProjectExecTransformerSuite, 
GlutenSessionExtensionSuite, TestFileSourceScanExecTransformer}
-import org.apache.spark.sql.gluten.GlutenFallbackSuite
+import org.apache.spark.sql.gluten.{GlutenFallbackStrategiesSuite, 
GlutenFallbackSuite}
 import org.apache.spark.sql.hive.execution.GlutenHiveSQLQuerySuite
 import org.apache.spark.sql.sources._
 
@@ -93,7 +92,7 @@ class VeloxTestSettings extends BackendTestSettings {
   enableSuite[GlutenArithmeticExpressionSuite]
     .exclude("SPARK-45786: Decimal multiply, divide, remainder, quot")
   enableSuite[GlutenBitwiseExpressionsSuite]
-  enableSuite[GlutenCastSuite]
+  enableSuite[GlutenCastWithAnsiOffSuite]
     .exclude(
       "Process Infinity, -Infinity, NaN in case insensitive manner" // +inf 
not supported in folly.
     )
@@ -591,7 +590,7 @@ class VeloxTestSettings extends BackendTestSettings {
   enableSuite[GlutenInnerJoinSuiteForceShjOff]
   enableSuite[GlutenOuterJoinSuiteForceShjOn]
   enableSuite[GlutenOuterJoinSuiteForceShjOff]
-  enableSuite[FallbackStrategiesSuite]
+  enableSuite[GlutenFallbackStrategiesSuite]
   enableSuite[GlutenBroadcastExchangeSuite]
   enableSuite[GlutenLocalBroadcastExchangeSuite]
   enableSuite[GlutenCoalesceShufflePartitionsSuite]
diff --git 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/catalyst/expressions/GlutenCastSuite.scala
 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/catalyst/expressions/GlutenCastWithAnsiOffSuite.scala
similarity index 99%
rename from 
gluten-ut/spark40/src/test/scala/org/apache/spark/sql/catalyst/expressions/GlutenCastSuite.scala
rename to 
gluten-ut/spark40/src/test/scala/org/apache/spark/sql/catalyst/expressions/GlutenCastWithAnsiOffSuite.scala
index c8b8aaf99a..74c1b25ca2 100644
--- 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/catalyst/expressions/GlutenCastSuite.scala
+++ 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/catalyst/expressions/GlutenCastWithAnsiOffSuite.scala
@@ -26,7 +26,7 @@ import org.apache.spark.util.DebuggableThreadUtils
 import java.sql.{Date, Timestamp}
 import java.util.{Calendar, TimeZone}
 
-class GlutenCastSuite extends CastWithAnsiOffSuite with GlutenTestsTrait {
+class GlutenCastWithAnsiOffSuite extends CastWithAnsiOffSuite with 
GlutenTestsTrait {
   override def beforeAll(): Unit = {
     super.beforeAll()
     // Need to explicitly set spark.sql.preserveCharVarcharTypeInfo=true for 
gluten's test
diff --git 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDataSourceV2SQLSuiteV2Filter.scala
 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDataSourceV2SQLSuite.scala
similarity index 89%
rename from 
gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDataSourceV2SQLSuiteV2Filter.scala
rename to 
gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDataSourceV2SQLSuite.scala
index 7e02fc07ce..28c9f9ff7d 100644
--- 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDataSourceV2SQLSuiteV2Filter.scala
+++ 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDataSourceV2SQLSuite.scala
@@ -18,6 +18,10 @@ package org.apache.spark.sql.connector
 
 import org.apache.spark.sql._
 
+class GlutenDataSourceV2SQLSuiteV1Filter
+  extends DataSourceV2SQLSuiteV1Filter
+  with GlutenSQLTestsBaseTrait {}
+
 class GlutenDataSourceV2SQLSuiteV2Filter
   extends DataSourceV2SQLSuiteV2Filter
   with GlutenSQLTestsBaseTrait {}
diff --git 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDataSourceV2SQLSuiteV1Filter.scala
 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDataSourceV2SQLSuiteV1Filter.scala
deleted file mode 100644
index ff76180086..0000000000
--- 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDataSourceV2SQLSuiteV1Filter.scala
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.spark.sql.connector
-
-import org.apache.spark.sql._
-
-class GlutenDataSourceV2SQLSuiteV1Filter
-  extends DataSourceV2SQLSuiteV1Filter
-  with GlutenSQLTestsBaseTrait {}
diff --git 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenGroupBasedDeleteFromTableSuite.scala
 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenGroupBasedDeleteFromTableSuite.scala
index 25f377505c..5e9abeb077 100644
--- 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenGroupBasedDeleteFromTableSuite.scala
+++ 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenGroupBasedDeleteFromTableSuite.scala
@@ -21,3 +21,7 @@ import org.apache.spark.sql.GlutenSQLTestsBaseTrait
 class GlutenGroupBasedDeleteFromTableSuite
   extends GroupBasedDeleteFromTableSuite
   with GlutenSQLTestsBaseTrait {}
+
+class GlutenDeleteFromTableSuite
+  extends GroupBasedDeleteFromTableSuite
+  with GlutenSQLTestsBaseTrait {}
diff --git 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
deleted file mode 100644
index 12cc721154..0000000000
--- 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.spark.sql.execution.benchmarks
-
-import org.apache.gluten.config.GlutenConfig
-import org.apache.gluten.execution.{FileSourceScanExecTransformer, 
WholeStageTransformer}
-import org.apache.gluten.extension.columnar.transition.Transitions
-import org.apache.gluten.utils.BackendTestUtils
-
-import org.apache.spark.SparkConf
-import org.apache.spark.benchmark.Benchmark
-import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.SparkSession
-import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.catalyst.expressions.UnsafeProjection
-import org.apache.spark.sql.execution.FileSourceScanExec
-import org.apache.spark.sql.execution.benchmark.SqlBasedBenchmark
-import org.apache.spark.sql.execution.datasources.{FilePartition, FileScanRDD, 
PartitionedFile}
-import org.apache.spark.sql.vectorized.ColumnarBatch
-
-import scala.collection.JavaConverters._
-
-/**
- * Benchmark to measure native parquet read performance. To run this benchmark:
- * {{{
- *   1. Run in IDEA: run this class directly;
- *   2. Run without IDEA: bin/spark-submit --class <this class>
- *        --jars <spark core test jar>,<spark catalyst test jar>,<spark sql 
test jar>
- *        --conf xxxx=xxx
- *        gluten-ut-XXX-tests.jar
- *        parameters
- *
- *   Parameters:
- *     1. parquet files dir;
- *     2. the fields to read;
- *     3. the execution count;
- *     4. whether to run vanilla spark benchmarks;
- * }}}
- */
-object ParquetReadBenchmark extends SqlBasedBenchmark {
-
-  protected lazy val thrdNum = "1"
-  protected lazy val memorySize = "4G"
-  protected lazy val offheapSize = "4G"
-
-  def beforeAll(): Unit = {}
-
-  override def getSparkSession: SparkSession = {
-    beforeAll();
-    val conf = new SparkConf()
-      .setAppName("ParquetReadBenchmark")
-      .setIfMissing("spark.master", s"local[$thrdNum]")
-      .set("spark.plugins", "org.apache.gluten.GlutenPlugin")
-      .set("spark.shuffle.manager", 
"org.apache.spark.shuffle.sort.ColumnarShuffleManager")
-      .set("spark.memory.offHeap.enabled", "true")
-      .setIfMissing("spark.memory.offHeap.size", offheapSize)
-      .setIfMissing("spark.sql.columnVector.offheap.enabled", "true")
-      .set("spark.sql.adaptive.enabled", "false")
-      .setIfMissing("spark.driver.memory", memorySize)
-      .setIfMissing("spark.executor.memory", memorySize)
-      .setIfMissing("spark.sql.files.maxPartitionBytes", "1G")
-      .setIfMissing("spark.sql.files.openCostInBytes", "1073741824")
-
-    if (BackendTestUtils.isCHBackendLoaded()) {
-      conf
-        .set("spark.io.compression.codec", "LZ4")
-        .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
-        .set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
-        .set("spark.gluten.sql.columnar.separate.scan.rdd.for.ch", "false")
-        .set(
-          "spark.sql.catalog.spark_catalog",
-          
"org.apache.spark.sql.execution.datasources.v2.clickhouse.ClickHouseSparkCatalog")
-        .set("spark.databricks.delta.maxSnapshotLineageLength", "20")
-        .set("spark.databricks.delta.snapshotPartitions", "1")
-        .set("spark.databricks.delta.properties.defaults.checkpointInterval", 
"5")
-        .set("spark.databricks.delta.stalenessLimit", "3600000")
-    }
-
-    SparkSession.builder.config(conf).getOrCreate()
-  }
-
-  override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
-    val (parquetDir, scanSchema, executedCnt, executedVanilla) =
-      if (mainArgs.isEmpty) {
-        ("/data/tpch-data-sf10/lineitem", "l_orderkey,l_receiptdate", 5, true)
-      } else {
-        (mainArgs(0), mainArgs(1), mainArgs(2).toInt, mainArgs(3).toBoolean)
-      }
-
-    val parquetReadDf = spark.sql(s"""
-                                     |select $scanSchema from 
parquet.`$parquetDir`
-                                     |
-                                     |""".stripMargin)
-    // Get the `FileSourceScanExecTransformer`
-    val fileScan = parquetReadDf.queryExecution.executedPlan.collect {
-      case scan: FileSourceScanExecTransformer => scan
-    }.head
-
-    val filePartitions = fileScan.getPartitions
-      .map(_.asInstanceOf[FilePartition])
-
-    val wholeStageTransform = 
parquetReadDf.queryExecution.executedPlan.collect {
-      case wholeStage: WholeStageTransformer => wholeStage
-    }.head
-
-    // remove ProjectExecTransformer
-    val newWholeStage = wholeStageTransform.withNewChildren(Seq(fileScan))
-
-    // generate ColumnarToRow
-    val columnarToRowPlan = Transitions.toRowPlan(newWholeStage)
-
-    val newWholeStageRDD = newWholeStage.executeColumnar()
-    val newColumnarToRowRDD = columnarToRowPlan.execute()
-
-    // Get the total row count
-    val totalRowCnt = newWholeStageRDD
-      .mapPartitionsInternal(
-        batches => {
-          batches.map(batch => batch.numRows().toLong)
-        })
-      .collect()
-      .sum
-
-    val parquetReadBenchmark =
-      new Benchmark(
-        s"Parquet Read files, fields: $scanSchema, total $totalRowCnt records",
-        totalRowCnt,
-        output = output)
-
-    parquetReadBenchmark.addCase(s"Native Parquet Read", executedCnt) {
-      _ =>
-        val resultRDD: RDD[Long] = newWholeStageRDD.mapPartitionsInternal {
-          batches =>
-            batches.foreach(batch => batch.numRows().toLong)
-            Iterator.empty
-        }
-        resultRDD.collect()
-    }
-
-    parquetReadBenchmark.addCase(s"Native Parquet Read to Rows", executedCnt) {
-      _ =>
-        val resultRDD: RDD[Int] = newColumnarToRowRDD.mapPartitionsInternal {
-          rows =>
-            rows.foreach(_.numFields)
-            Iterator.empty
-        }
-        resultRDD.collect()
-    }
-
-    if (executedVanilla) {
-      spark.conf.set(GlutenConfig.GLUTEN_ENABLED.key, "false")
-
-      val vanillaParquet = spark.sql(s"""
-                                        |select $scanSchema from 
parquet.`$parquetDir`
-                                        |
-                                        |""".stripMargin)
-
-      val vanillaScanPlan = vanillaParquet.queryExecution.executedPlan.collect 
{
-        case scan: FileSourceScanExec => scan
-      }
-
-      val fileScan = vanillaScanPlan.head
-      val fileScanOutput = fileScan.output
-      val relation = fileScan.relation
-      val readFile: (PartitionedFile) => Iterator[InternalRow] =
-        relation.fileFormat.buildReaderWithPartitionValues(
-          sparkSession = relation.sparkSession,
-          dataSchema = relation.dataSchema,
-          partitionSchema = relation.partitionSchema,
-          requiredSchema = fileScan.requiredSchema,
-          filters = Seq.empty,
-          options = relation.options,
-          hadoopConf = 
relation.sparkSession.sessionState.newHadoopConfWithOptions(relation.options)
-        )
-
-      val newFileScanRDD = new FileScanRDD(spark, readFile, filePartitions, 
fileScan.requiredSchema)
-        .asInstanceOf[RDD[ColumnarBatch]]
-
-      val rowCnt = newFileScanRDD
-        .mapPartitionsInternal(batches => batches.map(batch => 
batch.numRows().toLong))
-        .collect()
-        .sum
-      assert(totalRowCnt == rowCnt, "The row count of the benchmark is not 
equal.")
-
-      parquetReadBenchmark.addCase(s"Vanilla Spark Parquet Read", executedCnt) 
{
-        _ =>
-          val resultRDD: RDD[Long] = newFileScanRDD.mapPartitionsInternal {
-            batches =>
-              batches.foreach(_.numRows().toLong)
-              Iterator.empty
-          }
-          resultRDD.collect()
-      }
-
-      parquetReadBenchmark.addCase(s"Vanilla Spark Parquet Read to Rows", 
executedCnt) {
-        _ =>
-          val resultRDD: RDD[Long] = newFileScanRDD.mapPartitionsInternal {
-            batches =>
-              val toUnsafe = UnsafeProjection.create(fileScanOutput, 
fileScanOutput)
-              
batches.foreach(_.rowIterator().asScala.map(toUnsafe).foreach(_.numFields))
-              Iterator.empty
-          }
-          resultRDD.collect()
-      }
-    }
-
-    parquetReadBenchmark.run()
-  }
-
-  override def afterAll(): Unit = {
-    super.afterAll()
-  }
-}
diff --git 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/datasources/exchange/GlutenValidateRequirementsSuite.scala
 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/datasources/exchange/GlutenValidateRequirementsSuite.scala
deleted file mode 100644
index 132e80696c..0000000000
--- 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/datasources/exchange/GlutenValidateRequirementsSuite.scala
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.spark.sql.execution.datasources.exchange
-
-import org.apache.spark.sql.GlutenSQLTestsBaseTrait
-import org.apache.spark.sql.execution.exchange.ValidateRequirementsSuite
-
-class GlutenValidateRequirementsSuite
-  extends ValidateRequirementsSuite
-  with GlutenSQLTestsBaseTrait {}
diff --git 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDeleteFromTableSuite.scala
 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/exchange/GlutenValidateRequirementsSuite.scala
similarity index 87%
rename from 
gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDeleteFromTableSuite.scala
rename to 
gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/exchange/GlutenValidateRequirementsSuite.scala
index ea2fc4e943..a1467af2c5 100644
--- 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/connector/GlutenDeleteFromTableSuite.scala
+++ 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/exchange/GlutenValidateRequirementsSuite.scala
@@ -14,10 +14,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.spark.sql.connector
+package org.apache.spark.sql.execution.exchange
 
 import org.apache.spark.sql.GlutenSQLTestsBaseTrait
 
-class GlutenDeleteFromTableSuite
-  extends GroupBasedDeleteFromTableSuite
+class GlutenValidateRequirementsSuite
+  extends ValidateRequirementsSuite
   with GlutenSQLTestsBaseTrait {}
diff --git 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackStrategiesSuite.scala
similarity index 97%
rename from 
gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
rename to 
gluten-ut/spark40/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackStrategiesSuite.scala
index de2436a39e..50beded8de 100644
--- 
a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
+++ 
b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackStrategiesSuite.scala
@@ -14,7 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.spark.sql.execution
+package org.apache.spark.sql.gluten
 
 import org.apache.gluten.backendsapi.BackendsApiManager
 import org.apache.gluten.config.GlutenConfig
@@ -32,9 +32,10 @@ import org.apache.spark.sql.{GlutenSQLTestsTrait, 
SparkSession}
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.expressions.Attribute
 import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.execution._
 
-class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
-  import FallbackStrategiesSuite._
+class GlutenFallbackStrategiesSuite extends GlutenSQLTestsTrait {
+  import GlutenFallbackStrategiesSuite._
 
   testGluten("Fall back the whole query if one unsupported") {
     withSQLConf((GlutenConfig.COLUMNAR_QUERY_FALLBACK_THRESHOLD.key, "1")) {
@@ -178,7 +179,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
   }
 }
 
-private object FallbackStrategiesSuite {
+private object GlutenFallbackStrategiesSuite {
   def newRuleApplier(
       spark: SparkSession,
       transformBuilders: Seq[ColumnarRuleCall => Rule[SparkPlan]]): 
HeuristicApplier = {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to