This is an automated email from the ASF dual-hosted git repository.

sankarh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new dfa1fc9  HIVE-21602: Dropping an external table created by migration 
case should delete the data directory (Sankar Hariappan, reviewed by Anishek 
Agarwal)
dfa1fc9 is described below

commit dfa1fc98c7fa7430bdabb00512bfc4371b5529af
Author: Sankar Hariappan <sank...@apache.org>
AuthorDate: Fri Apr 12 12:10:54 2019 +0530

    HIVE-21602: Dropping an external table created by migration case should 
delete the data directory (Sankar Hariappan, reviewed by Anishek Agarwal)
    
    Signed-off-by: Sankar Hariappan <sank...@apache.org>
---
 .../parse/TestReplicationWithTableMigration.java   | 49 ++++++++++++++++++++++
 1 file changed, 49 insertions(+)

diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigration.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigration.java
index 58561d4..bafcdbe 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigration.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigration.java
@@ -409,6 +409,55 @@ public class TestReplicationWithTableMigration {
   }
 
   @Test
+  public void testBootstrapConvertedExternalTableAutoPurgeDataOnDrop() throws 
Throwable {
+    WarehouseInstance.Tuple bootstrap = primary.run("use " + primaryDbName)
+            .run("create table avro_tbl partitioned by (country string) ROW 
FORMAT SERDE "
+                    + "'org.apache.hadoop.hive.serde2.avro.AvroSerDe' stored 
as avro "
+                    + "tblproperties ('avro.schema.url'='" + 
avroSchemaFile.toUri().toString() + "')")
+            .run("insert into avro_tbl partition (country='india') values 
('another', 13)")
+            .dump(primaryDbName, null);
+
+    replica.load(replicatedDbName, bootstrap.dumpLocation);
+    Path dataLocation = assertTablePath(replicatedDbName, "avro_tbl");
+
+    WarehouseInstance.Tuple incremental = primary.run("use " + primaryDbName)
+            .run("drop table avro_tbl")
+            .dump(primaryDbName, bootstrap.lastReplicationId);
+    replica.load(replicatedDbName, incremental.dumpLocation);
+
+    // After drop, the external table data location should be auto deleted as 
it is converted one.
+    assertFalse(replica.miniDFSCluster.getFileSystem().exists(dataLocation));
+  }
+
+  @Test
+  public void testIncConvertedExternalTableAutoDeleteDataDirOnDrop() throws 
Throwable {
+    WarehouseInstance.Tuple bootstrap = primary.dump(primaryDbName, null);
+    replica.load(replicatedDbName, bootstrap.dumpLocation);
+
+    WarehouseInstance.Tuple incremental = primary.run("use " + primaryDbName)
+            .run("create table avro_tbl ROW FORMAT SERDE "
+                    + "'org.apache.hadoop.hive.serde2.avro.AvroSerDe' stored 
as avro "
+                    + "tblproperties ('avro.schema.url'='" + 
avroSchemaFile.toUri().toString() + "')")
+            .run("insert into avro_tbl values ('str', 13)")
+            .dump(primaryDbName, bootstrap.lastReplicationId);
+    replica.load(replicatedDbName, incremental.dumpLocation);
+
+    // Data location is valid and is under default external warehouse 
directory.
+    Table avroTable = replica.getTable(replicatedDbName, "avro_tbl");
+    assertTrue(MetaStoreUtils.isExternalTable(avroTable));
+    Path dataLocation = new Path(avroTable.getSd().getLocation());
+    assertTrue(replica.miniDFSCluster.getFileSystem().exists(dataLocation));
+
+    incremental = primary.run("use " + primaryDbName)
+            .run("drop table avro_tbl")
+            .dump(primaryDbName, incremental.lastReplicationId);
+    replica.load(replicatedDbName, incremental.dumpLocation);
+
+    // After drop, the external table data location should be auto deleted as 
it is converted one.
+    assertFalse(replica.miniDFSCluster.getFileSystem().exists(dataLocation));
+  }
+
+  @Test
   public void testBootstrapLoadMigrationToAcidWithMoveOptimization() throws 
Throwable {
     List<String> withConfigs =
             
Collections.singletonList("'hive.repl.enable.move.optimization'='true'");

Reply via email to