Github user lianhuiwang commented on a diff in the pull request:

    https://github.com/apache/spark/pull/13494#discussion_r68204608
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
 ---
    @@ -109,108 +111,45 @@ private[sql] object FileSourceStrategy extends 
Strategy with Logging {
           val pushedDownFilters = 
dataFilters.flatMap(DataSourceStrategy.translateFilter)
           logInfo(s"Pushed Filters: ${pushedDownFilters.mkString(",")}")
     
    -      val readFile = files.fileFormat.buildReaderWithPartitionValues(
    -        sparkSession = files.sparkSession,
    -        dataSchema = files.dataSchema,
    -        partitionSchema = files.partitionSchema,
    -        requiredSchema = prunedDataSchema,
    -        filters = pushedDownFilters,
    -        options = files.options,
    -        hadoopConf = 
files.sparkSession.sessionState.newHadoopConfWithOptions(files.options))
    -
    -      val plannedPartitions = files.bucketSpec match {
    -        case Some(bucketing) if 
files.sparkSession.sessionState.conf.bucketingEnabled =>
    -          logInfo(s"Planning with ${bucketing.numBuckets} buckets")
    -          val bucketed =
    -            selectedPartitions.flatMap { p =>
    -              p.files.map { f =>
    -                val hosts = getBlockHosts(getBlockLocations(f), 0, 
f.getLen)
    -                PartitionedFile(p.values, f.getPath.toUri.toString, 0, 
f.getLen, hosts)
    -              }
    -            }.groupBy { f =>
    -              BucketingUtils
    -                .getBucketId(new Path(f.filePath).getName)
    -                .getOrElse(sys.error(s"Invalid bucket file ${f.filePath}"))
    -            }
    -
    -          (0 until bucketing.numBuckets).map { bucketId =>
    -            FilePartition(bucketId, bucketed.getOrElse(bucketId, Nil))
    -          }
    -
    -        case _ =>
    -          val defaultMaxSplitBytes = 
files.sparkSession.sessionState.conf.filesMaxPartitionBytes
    -          val openCostInBytes = 
files.sparkSession.sessionState.conf.filesOpenCostInBytes
    -          val defaultParallelism = 
files.sparkSession.sparkContext.defaultParallelism
    -          val totalBytes = selectedPartitions.flatMap(_.files.map(_.getLen 
+ openCostInBytes)).sum
    -          val bytesPerCore = totalBytes / defaultParallelism
    -          val maxSplitBytes = Math.min(defaultMaxSplitBytes,
    -            Math.max(openCostInBytes, bytesPerCore))
    -          logInfo(s"Planning scan with bin packing, max size: 
$maxSplitBytes bytes, " +
    -            s"open cost is considered as scanning $openCostInBytes bytes.")
    -
    -          val splitFiles = selectedPartitions.flatMap { partition =>
    -            partition.files.flatMap { file =>
    -              val blockLocations = getBlockLocations(file)
    -              (0L until file.getLen by maxSplitBytes).map { offset =>
    -                val remaining = file.getLen - offset
    -                val size = if (remaining > maxSplitBytes) maxSplitBytes 
else remaining
    -                val hosts = getBlockHosts(blockLocations, offset, size)
    -                PartitionedFile(partition.values, 
file.getPath.toUri.toString, offset, size, hosts)
    -              }
    -            }
    -          }.toArray.sortBy(_.length)(implicitly[Ordering[Long]].reverse)
    -
    -          val partitions = new ArrayBuffer[FilePartition]
    -          val currentFiles = new ArrayBuffer[PartitionedFile]
    -          var currentSize = 0L
    -
    -          /** Add the given file to the current partition. */
    -          def addFile(file: PartitionedFile): Unit = {
    -            currentSize += file.length + openCostInBytes
    -            currentFiles.append(file)
    -          }
    -
    -          /** Close the current partition and move to the next. */
    -          def closePartition(): Unit = {
    -            if (currentFiles.nonEmpty) {
    -              val newPartition =
    -                FilePartition(
    -                  partitions.size,
    -                  currentFiles.toArray.toSeq) // Copy to a new Array.
    -              partitions.append(newPartition)
    -            }
    -            currentFiles.clear()
    -            currentSize = 0
    -          }
    -
    -          // Assign files to partitions using "First Fit Decreasing" (FFD)
    -          // TODO: consider adding a slop factor here?
    -          splitFiles.foreach { file =>
    -            if (currentSize + file.length > maxSplitBytes) {
    -              closePartition()
    -            }
    -            addFile(file)
    -          }
    -          closePartition()
    -          partitions
    +      val optimizerMetadataOnly =
    +        readDataColumns.isEmpty && 
files.sparkSession.sessionState.conf.optimizerMetadataOnly
    +      val scanRdd: RDD[InternalRow] = if (optimizerMetadataOnly) {
    +        val partitionSchema = files.partitionSchema.toAttributes
    +        lazy val converter = 
GenerateUnsafeProjection.generate(partitionSchema, partitionSchema)
    +        val partitionValues = selectedPartitions.map(_.values)
    +        files.sqlContext.sparkContext.parallelize(partitionValues, 
1).map(converter(_))
    --- End diff --
    
    Yes, I rethink more and then i will add a metadataOnly optimizer to 
optimizer list.Thanks.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to