Github user attilapiros commented on a diff in the pull request: https://github.com/apache/spark/pull/20249#discussion_r161488850 --- Diff: sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala --- @@ -1869,6 +1869,65 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { } } + test("SPARK-23057: SET LOCATION for managed table with partition") { + withTable("tbl_partition") { + withTempDir { dir => + sql("CREATE TABLE tbl_partition(col1 INT, col2 INT) USING parquet PARTITIONED BY (col1)") + sql("INSERT INTO tbl_partition PARTITION(col1=1) SELECT 11") + sql("INSERT INTO tbl_partition PARTITION(col1=2) SELECT 22") + checkAnswer(spark.table("tbl_partition"), Seq(Row(11, 1), Row(22, 2))) + val defaultTablePath = spark.sessionState.catalog + .getTableMetadata(TableIdentifier("tbl_partition")).storage.locationUri.get + try { + // before set location of partition col1 =1 and 2 + checkPath(defaultTablePath.toString, Map("col1" -> "1"), "tbl_partition") + checkPath(defaultTablePath.toString, Map("col1" -> "2"), "tbl_partition") + val path = dir.getCanonicalPath + + // set location of partition col1 =1 + sql(s"ALTER TABLE tbl_partition PARTITION (col1='1') SET LOCATION '$path'") + checkPath(dir.getCanonicalPath, Map("col1" -> "1"), "tbl_partition") + checkPath(defaultTablePath.toString, Map("col1" -> "2"), "tbl_partition") + + // set location of partition col1 =2 + sql(s"ALTER TABLE tbl_partition PARTITION (col1='2') SET LOCATION '$path'") + checkPath(dir.getCanonicalPath, Map("col1" -> "1"), "tbl_partition") + checkPath(dir.getCanonicalPath, Map("col1" -> "2"), "tbl_partition") + + spark.catalog.refreshTable("tbl_partition") + // SET LOCATION won't move data from previous table path to new table path. + assert(spark.table("tbl_partition").count() == 0) + // the previous table path should be still there. + assert(new File(defaultTablePath).exists()) + + sql("INSERT INTO tbl_partition PARTITION(col1=2) SELECT 33") + // newly inserted data will go to the new table path. + assert(dir.listFiles().nonEmpty) + + sql("DROP TABLE tbl_partition") + // the new table path will be removed after DROP TABLE. + assert(!dir.exists()) + } finally { + Utils.deleteRecursively(new File(defaultTablePath)) + } + } + } + } + + def checkPath(path: String, partSpec: Map[String, String], table: String): Unit = { + val catalog = spark.sessionState.catalog + val spec = Some(partSpec) --- End diff -- The "Some" is not needed here so spec.map {} below can be spared too.
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org