Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/21004#discussion_r180995890
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
 ---
    @@ -384,13 +356,9 @@ case class DataSource(
     
           // This is a non-streaming file based datasource.
           case (format: FileFormat, _) =>
    -        val allPaths = caseInsensitiveOptions.get("path") ++ paths
    -        val hadoopConf = sparkSession.sessionState.newHadoopConf()
    -        val globbedPaths = allPaths.flatMap(
    -          DataSource.checkAndGlobPathIfNecessary(hadoopConf, _, 
checkFilesExist)).toArray
    -
    -        val fileStatusCache = FileStatusCache.getOrCreate(sparkSession)
    -        val (dataSchema, partitionSchema) = 
getOrInferFileFormatSchema(format, fileStatusCache)
    +        checkAndGlobPathIfNecessary(checkEmptyGlobPath = true, 
checkFilesExist = checkFilesExist)
    --- End diff --
    
    now we may glob the path twice?


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to