Github user tejasapatil commented on a diff in the pull request:

    https://github.com/apache/spark/pull/14864#discussion_r77438866
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala 
---
    @@ -156,24 +156,57 @@ case class FileSourceScanExec(
         false
       }
     
    -  override val outputPartitioning: Partitioning = {
    +  @transient private lazy val selectedPartitions = 
relation.location.listFiles(partitionFilters)
    +
    +  override val (outputPartitioning, outputOrdering): (Partitioning, 
Seq[SortOrder]) = {
         val bucketSpec = if 
(relation.sparkSession.sessionState.conf.bucketingEnabled) {
           relation.bucketSpec
         } else {
           None
         }
    -    bucketSpec.map { spec =>
    -      val numBuckets = spec.numBuckets
    -      val bucketColumns = spec.bucketColumnNames.flatMap { n =>
    -        output.find(_.name == n)
    -      }
    -      if (bucketColumns.size == spec.bucketColumnNames.size) {
    -        HashPartitioning(bucketColumns, numBuckets)
    -      } else {
    -        UnknownPartitioning(0)
    -      }
    -    }.getOrElse {
    -      UnknownPartitioning(0)
    +    bucketSpec match {
    +      case Some(spec) =>
    +        val numBuckets = spec.numBuckets
    +        val bucketColumns = spec.bucketColumnNames.flatMap { n =>
    +          output.find(_.name == n)
    +        }
    +        if (bucketColumns.size == spec.bucketColumnNames.size) {
    +          val partitioning = HashPartitioning(bucketColumns, numBuckets)
    +
    +          val sortOrder = if (spec.sortColumnNames.nonEmpty) {
    +            // In case of bucketing, its possible to have multiple files 
belonging to the
    +            // same bucket in a given relation. Each of these files are 
locally sorted
    +            // but those files combined together are not globally sorted. 
Given that,
    +            // the RDD partition will not be sorted even if the relation 
has sort columns set
    +            // Current solution is to check if all the buckets have a 
single file in it
    +
    +            val files = selectedPartitions.flatMap(partition => 
partition.files)
    +            val bucketToFilesGrouping =
    +              files.map(_.getPath.getName).groupBy(file => 
BucketingUtils.getBucketId(file))
    +            val singleFilePartitions = bucketToFilesGrouping.forall(p => 
p._2.length <= 1)
    +
    +            if (singleFilePartitions) {
    +              def toAttribute(colName: String): Attribute =
    +                output.find(_.name == colName).getOrElse {
    --- End diff --
    
    @cloud-fan : Sure. Did this change.
    
    I am throwing exception because end user should know that there is 
something wrong with the table metadata and they need to look into that.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to