Github user steveloughran commented on a diff in the pull request:

    https://github.com/apache/spark/pull/21257#discussion_r197176461
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelationCommand.scala
 ---
    @@ -207,9 +210,23 @@ case class InsertIntoHadoopFsRelationCommand(
         }
         // first clear the path determined by the static partition keys (e.g. 
/table/foo=1)
         val staticPrefixPath = 
qualifiedOutputPath.suffix(staticPartitionPrefix)
    -    if (fs.exists(staticPrefixPath) && !committer.deleteWithJob(fs, 
staticPrefixPath, true)) {
    -      throw new IOException(s"Unable to clear output " +
    -        s"directory $staticPrefixPath prior to writing to it")
    +    if (fs.exists(staticPrefixPath)) {
    +      if (staticPartitionPrefix.isEmpty && outputCheck) {
    +        // input contain output, only delete output sub files when job 
commit
    +          val files = fs.listFiles(staticPrefixPath, false)
    +          while (files.hasNext) {
    +            val file = files.next()
    +            if (!committer.deleteWithJob(fs, file.getPath, false)) {
    +              throw new IOException(s"Unable to clear output " +
    +                s"directory ${file.getPath} prior to writing to it")
    +            }
    +          }
    +      } else {
    +        if (!committer.deleteWithJob(fs, staticPrefixPath, true)) {
    +          throw new IOException(s"Unable to clear output " +
    --- End diff --
    
    again, hard to see how this exception path would be reached.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to