[ 
https://issues.apache.org/jira/browse/HIVE-24918?focusedWorklogId=573869&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-573869
 ]

ASF GitHub Bot logged work on HIVE-24918:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 30/Mar/21 03:59
            Start Date: 30/Mar/21 03:59
    Worklog Time Spent: 10m 
      Work Description: hmangla98 commented on a change in pull request #2097:
URL: https://github.com/apache/hive/pull/2097#discussion_r603753051



##########
File path: 
itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
##########
@@ -350,6 +353,177 @@ public void testAcidTablesCreateTableIncremental() throws 
Throwable {
       .verifyResults(new String[] {"11"});
   }
 
+  @Test
+  public void testFailoverWithNoOpenTxns() throws Throwable {
+    WarehouseInstance.Tuple dumpData = null;
+
+    dumpData = primary.run("use " + primaryDbName)
+            .run("create table t1 (id int) clustered by(id) into 3 buckets 
stored as orc " +
+                    "tblproperties (\"transactional\"=\"true\")")
+            .run("create table t2 (rank int) partitioned by (name string) 
tblproperties(\"transactional\"=\"true\", " +
+                    "\"transactional_properties\"=\"insert_only\")")
+            .dump(primaryDbName);
+

Review comment:
       Done

##########
File path: 
itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
##########
@@ -350,6 +353,177 @@ public void testAcidTablesCreateTableIncremental() throws 
Throwable {
       .verifyResults(new String[] {"11"});
   }
 
+  @Test
+  public void testFailoverWithNoOpenTxns() throws Throwable {
+    WarehouseInstance.Tuple dumpData = null;
+
+    dumpData = primary.run("use " + primaryDbName)
+            .run("create table t1 (id int) clustered by(id) into 3 buckets 
stored as orc " +
+                    "tblproperties (\"transactional\"=\"true\")")
+            .run("create table t2 (rank int) partitioned by (name string) 
tblproperties(\"transactional\"=\"true\", " +
+                    "\"transactional_properties\"=\"insert_only\")")
+            .dump(primaryDbName);
+
+    replica.load(replicatedDbName, primaryDbName)
+            .run("use " + replicatedDbName)
+            .run("show tables")
+            .verifyResults(new String[]{"t1", "t2"})
+            .run("repl status " + replicatedDbName)
+            .verifyResult(dumpData.lastReplicationId);
+
+    List<String> failoverConfigs = Arrays.asList(
+            "'" + HiveConf.ConfVars.HIVE_REPL_FAILOVER + "'='true'");
+    dumpData = primary.run("insert into t1 values(1)")
+            .run("insert into t2 partition(name='Bob') values(11)")
+            .run("insert into t2 partition(name='Carl') values(10)")
+            .dump(primaryDbName, failoverConfigs);

Review comment:
       Done

##########
File path: 
itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
##########
@@ -350,6 +353,177 @@ public void testAcidTablesCreateTableIncremental() throws 
Throwable {
       .verifyResults(new String[] {"11"});
   }
 
+  @Test
+  public void testFailoverWithNoOpenTxns() throws Throwable {
+    WarehouseInstance.Tuple dumpData = null;
+
+    dumpData = primary.run("use " + primaryDbName)
+            .run("create table t1 (id int) clustered by(id) into 3 buckets 
stored as orc " +
+                    "tblproperties (\"transactional\"=\"true\")")
+            .run("create table t2 (rank int) partitioned by (name string) 
tblproperties(\"transactional\"=\"true\", " +
+                    "\"transactional_properties\"=\"insert_only\")")
+            .dump(primaryDbName);
+
+    replica.load(replicatedDbName, primaryDbName)
+            .run("use " + replicatedDbName)
+            .run("show tables")
+            .verifyResults(new String[]{"t1", "t2"})
+            .run("repl status " + replicatedDbName)
+            .verifyResult(dumpData.lastReplicationId);
+
+    List<String> failoverConfigs = Arrays.asList(
+            "'" + HiveConf.ConfVars.HIVE_REPL_FAILOVER + "'='true'");
+    dumpData = primary.run("insert into t1 values(1)")
+            .run("insert into t2 partition(name='Bob') values(11)")
+            .run("insert into t2 partition(name='Carl') values(10)")
+            .dump(primaryDbName, failoverConfigs);
+
+    FileSystem fs = new Path(dumpData.dumpLocation).getFileSystem(conf);
+    Path dumpPath = new Path(dumpData.dumpLocation, 
ReplUtils.REPL_HIVE_BASE_DIR);
+    assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString())));
+    assertTrue(fs.exists(new Path(dumpPath, 
FAILOVER_READY_MARKER.toString())));
+
+    replica.load(replicatedDbName, primaryDbName)
+            .run("use " + replicatedDbName)
+            .run("show tables")
+            .verifyResults(new String[]{"t1", "t2"})
+            .run("repl status " + replicatedDbName)
+            .verifyResult(dumpData.lastReplicationId)
+            .run("select id from t1")
+            .verifyResults(new String[]{"1"})
+            .run("select rank from t2 order by rank")
+            .verifyResults(new String[]{"10", "11"});
+
+    assertTrue(fs.exists(new Path(dumpPath, LOAD_ACKNOWLEDGEMENT.toString())));
+  }
+
+  @Test
+  public void testFailoverWithOpenTxnsDiffDb() throws Throwable {
+    HiveConf primaryConf = primary.getConf();
+    TxnStore txnHandler = TxnUtils.getTxnStore(primary.getConf());
+    WarehouseInstance.Tuple dumpData = null;
+
+    dumpData = primary.run("use " + primaryDbName)
+            .run("create table t1 (id int) clustered by(id) into 3 buckets 
stored as orc " +
+                    "tblproperties (\"transactional\"=\"true\")")
+            .run("create table t2 (rank int) partitioned by (name string) 
tblproperties(\"transactional\"=\"true\", " +
+                    "\"transactional_properties\"=\"insert_only\")")
+            .dump(primaryDbName);
+

Review comment:
       Done

##########
File path: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
##########
@@ -506,6 +506,8 @@ private static void populateLlapDaemonVarsSet(Set<String> 
llapDaemonVarsSetLocal
         "HDFS root scratch dir for Hive jobs which gets created with write all 
(733) permission. " +
         "For each connecting user, an HDFS scratch dir: 
${hive.exec.scratchdir}/<username> is created, " +
         "with ${hive.scratch.dir.permission}."),
+    HIVE_REPL_FAILOVER("hive.repl.failover.start",false,
+            "Indicates if user wants to trigger failover for reverse 
replication."),

Review comment:
       Done

##########
File path: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
##########
@@ -572,6 +574,11 @@ private static void populateLlapDaemonVarsSet(Set<String> 
llapDaemonVarsSetLocal
         "Indicates the timeout for all transactions which are opened before 
triggering bootstrap REPL DUMP. "
             + "If these open transactions are not closed within the timeout 
value, then REPL DUMP will "
             + "forcefully abort those transactions and continue with bootstrap 
dump."),
+    
REPL_FAILOVER_DUMP_OPEN_TXN_TIMEOUT("hive.repl.failover.dump.open.txn.timeout", 
"1h",
+            new TimeValidator(TimeUnit.HOURS),
+            "Indicates the timeout for all transactions which are opened 
before triggering failover. "
+                    + "If these open transactions are not closed within the 
timeout value, then Repl Dump will not " +
+                    "mark this state as failover_ready."),

Review comment:
       Done




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 573869)
    Time Spent: 1h  (was: 50m)

> Handle failover case during Repl Dump
> -------------------------------------
>
>                 Key: HIVE-24918
>                 URL: https://issues.apache.org/jira/browse/HIVE-24918
>             Project: Hive
>          Issue Type: New Feature
>            Reporter: Haymant Mangla
>            Assignee: Haymant Mangla
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 1h
>  Remaining Estimate: 0h
>
> To handle:
> a) Whenever user wants to go ahead with failover, during the next or 
> subsequent repl dump operation upon confirming that there are no pending open 
> transaction events, in should create a _failover_ready marker file in the 
> dump dir.
> b) Skip next repl dump instances once we have the marker file placed.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to