Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/21004#discussion_r181269313
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
 ---
    @@ -384,24 +356,23 @@ case class DataSource(
     
           // This is a non-streaming file based datasource.
           case (format: FileFormat, _) =>
    -        val allPaths = caseInsensitiveOptions.get("path") ++ paths
    -        val hadoopConf = sparkSession.sessionState.newHadoopConf()
    -        val globbedPaths = allPaths.flatMap(
    -          DataSource.checkAndGlobPathIfNecessary(hadoopConf, _, 
checkFilesExist)).toArray
    -
    -        val fileStatusCache = FileStatusCache.getOrCreate(sparkSession)
    -        val (dataSchema, partitionSchema) = 
getOrInferFileFormatSchema(format, fileStatusCache)
    -
    -        val fileCatalog = if 
(sparkSession.sqlContext.conf.manageFilesourcePartitions &&
    -            catalogTable.isDefined && 
catalogTable.get.tracksPartitionsInCatalog) {
    +        val globbedPaths =
    +          checkAndGlobPathIfNecessary(checkEmptyGlobPath = true, 
checkFilesExist = checkFilesExist)
    +        val useCatalogFileIndex = 
sparkSession.sqlContext.conf.manageFilesourcePartitions &&
    +          catalogTable.isDefined && 
catalogTable.get.tracksPartitionsInCatalog &&
    +          catalogTable.get.partitionSchema.nonEmpty
    --- End diff --
    
    use `partitionColumnNames` over `partitionSchema`, since 
`partitionColumnNames` is a val and `partitionSchema` is def


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to