LuciferYang commented on a change in pull request #29000: URL: https://github.com/apache/spark/pull/29000#discussion_r472729801
########## File path: sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala ########## @@ -164,4 +165,48 @@ class PartitionedWriteSuite extends QueryTest with SharedSparkSession { assert(e.getMessage.contains("Found duplicate column(s) b, b: `b`;")) } } + + test("SPARK-27194 SPARK-29302: Fix commit collision in dynamic partition overwrite mode") { + withSQLConf(SQLConf.PARTITION_OVERWRITE_MODE.key -> + SQLConf.PartitionOverwriteMode.DYNAMIC.toString, + SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key -> + classOf[PartitionFileExistCommitProtocol].getName) { + withTempDir { d => + withTable("t") { + sql( + s""" + | create table t(c1 int, p1 int) using parquet partitioned by (p1) + | location '${d.getAbsolutePath}' + """.stripMargin) + + val df = Seq((1, 2)).toDF("c1", "p1") + df.write + .partitionBy("p1") + .mode("overwrite") + .saveAsTable("t") + checkAnswer(sql("select * from t"), df) + } + } + } + } +} + +/** + * A file commit protocol with pre-created partition file. when try to overwrite partition dir + * in dynamic partition mode, FileAlreadyExist exception would raise without SPARK-31968 + */ +private class PartitionFileExistCommitProtocol( + jobId: String, + path: String, + dynamicPartitionOverwrite: Boolean) + extends HadoopMapReduceCommitProtocol(jobId, path, dynamicPartitionOverwrite) { + override def setupJob(jobContext: JobContext): Unit = { + super.setupJob(jobContext) + val stagingDir = new File(path, s".spark-staging-$jobId") Review comment: @WinkerDu `path` string is a URI format like `file:/xxx`. now the path of `conflictTaskFile` create as ``` /xxx/spark-warehouse/org.apache.spark.sql.sources.PartitionedWriteSuite/t/file:/xxx/spark-warehouse/org.apache.spark.sql.sources.PartitionedWriteSuite/t/.spark-staging-${jobId}/p1=2/part-00000-${jobId}.c000.snappy.parquet ``` We should strip the `file:` prefix or trans to a local filesystem path, let the `conflictTaskFile` create as ``` /xxx/spark-warehouse/org.apache.spark.sql.sources.PartitionedWriteSuite/t/.spark-staging-${jobId}/p1=2/part-00000-${jobId}.c000.snappy.parquet ``` ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org