Github user cloud-fan commented on a diff in the pull request: https://github.com/apache/spark/pull/10942#discussion_r51342432 --- Diff: sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala --- @@ -59,6 +61,141 @@ class BucketedReadSuite extends QueryTest with SQLTestUtils with TestHiveSinglet } } + // To verify if pruning works, we compare the results before filtering + private def checkPrunedAnswers( + sourceDataFrame: DataFrame, + filterCondition: Column, + expectedAnswer: DataFrame): Unit = { + val filter = sourceDataFrame.filter(filterCondition).queryExecution.executedPlan + assert( + filter.isInstanceOf[execution.Filter] || + (filter.isInstanceOf[WholeStageCodegen] && + filter.asInstanceOf[WholeStageCodegen].plan.isInstanceOf[execution.Filter])) + checkAnswer( + expectedAnswer.orderBy(expectedAnswer.logicalPlan.output.map(attr => Column(attr)) : _*), + filter.children.head.executeCollectPublic().sortBy(_.toString())) + } + + test("read partitioning bucketed tables with bucket pruning filters") { + val df = (10 until 50).map(i => (i % 5, i % 13 + 10, i.toString)).toDF("i", "j", "k") + + withTable("bucketed_table") { + // The number of buckets should be large enough to make sure each bucket contains + // at most one bucketing key value. + // json does not support predicate push-down, and thus json is used here --- End diff -- Does it mean bucket pruning is not very useful for parquet?
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. --- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org