[ https://issues.apache.org/jira/browse/HIVE-25814?focusedWorklogId=721855&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-721855 ]
ASF GitHub Bot logged work on HIVE-25814: ----------------------------------------- Author: ASF GitHub Bot Created on: 07/Feb/22 11:35 Start Date: 07/Feb/22 11:35 Worklog Time Spent: 10m Work Description: ayushtkn commented on a change in pull request #2907: URL: https://github.com/apache/hive/pull/2907#discussion_r800566878 ########## File path: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java ########## @@ -156,6 +160,106 @@ public void tearDown() throws Throwable { primary.run("drop database if exists " + primaryDbName + "_extra cascade"); } + @Test + public void testReplicationMetricForSkippedIteration() throws Throwable { + isMetricsEnabledForTests(true); + MetricCollector collector = MetricCollector.getInstance(); + WarehouseInstance.Tuple dumpData = primary.run("use " + primaryDbName) + .run("create table t1 (id int) clustered by(id) into 3 buckets " + + "stored as orc tblproperties (\"transactional\"=\"true\")") + .run("insert into t1 values(1)") + .dump(primaryDbName); + + + ReplicationMetric metric = collector.getMetrics().getLast(); + assertEquals(metric.getProgress().getStatus(), Status.SUCCESS); + + primary.dump(primaryDbName); + + metric = collector.getMetrics().getLast(); + assertEquals(metric.getProgress().getStatus(), Status.SKIPPED); + + replica.load(replicatedDbName, primaryDbName) + .run("use " + replicatedDbName) + .run("show tables") + .verifyResults(new String[]{"t1"}) + .run("repl status " + replicatedDbName) + .verifyResult(dumpData.lastReplicationId) + .run("select id from t1") + .verifyResults(new String[]{"1"}); + + metric = collector.getMetrics().getLast(); + assertEquals(metric.getProgress().getStatus(), Status.SUCCESS); + + replica.load(replicatedDbName, primaryDbName); + + metric = collector.getMetrics().getLast(); + assertEquals(metric.getProgress().getStatus(), Status.SKIPPED); + isMetricsEnabledForTests(false); + } + + @Test + public void testReplicationMetricForFailedIteration() throws Throwable { + isMetricsEnabledForTests(true); + MetricCollector collector = MetricCollector.getInstance(); + WarehouseInstance.Tuple dumpData = primary.run("use " + primaryDbName) + .run("create table t1 (id int) clustered by(id) into 3 buckets " + + "stored as orc tblproperties (\"transactional\"=\"true\")") + .run("insert into t1 values(1)") + .dump(primaryDbName); + + ReplicationMetric metric = collector.getMetrics().getLast(); + assertEquals(metric.getProgress().getStatus(), Status.SUCCESS); + + replica.load(replicatedDbName, primaryDbName) + .run("use " + replicatedDbName) + .run("show tables") + .verifyResults(new String[]{"t1"}) + .run("repl status " + replicatedDbName) + .verifyResult(dumpData.lastReplicationId) + .run("select id from t1") + .verifyResults(new String[]{"1"}); + + Path nonRecoverableFile = new Path(new Path(dumpData.dumpLocation), ReplAck.NON_RECOVERABLE_MARKER.toString()); + FileSystem fs = new Path(dumpData.dumpLocation).getFileSystem(conf); + fs.create(nonRecoverableFile); + + primary.dumpFailure(primaryDbName); + + metric = collector.getMetrics().getLast(); + assertEquals(metric.getProgress().getStatus(), Status.SKIPPED); + assertEquals(metric.getProgress().getStages().get(0).getErrorLogPath(), nonRecoverableFile.toString()); + + primary.dumpFailure(primaryDbName); + metric = collector.getMetrics().getLast(); + assertEquals(metric.getProgress().getStatus(), Status.SKIPPED); + assertEquals(metric.getProgress().getStages().get(0).getErrorLogPath(), nonRecoverableFile.toString()); + + fs.delete(nonRecoverableFile, true); + dumpData = primary.dump(primaryDbName); + + metric = collector.getMetrics().getLast(); + assertEquals(metric.getProgress().getStatus(), Status.SUCCESS); + + replica.run("ALTER DATABASE " + replicatedDbName + + " SET DBPROPERTIES('" + ReplConst.REPL_INCOMPATIBLE + "'='true')"); + replica.loadFailure(replicatedDbName, primaryDbName); + + nonRecoverableFile = new Path(new Path(dumpData.dumpLocation), ReplAck.NON_RECOVERABLE_MARKER.toString()); + assertTrue(fs.exists(nonRecoverableFile)); + + metric = collector.getMetrics().getLast(); + assertEquals(metric.getProgress().getStatus(), Status.FAILED_ADMIN); + assertEquals(metric.getProgress().getStages().get(0).getErrorLogPath(), nonRecoverableFile.toString()); + + replica.loadFailure(replicatedDbName, primaryDbName); + + metric = collector.getMetrics().getLast(); + assertEquals(metric.getProgress().getStatus(), Status.SKIPPED); + assertEquals(metric.getProgress().getStages().get(0).getErrorLogPath(), nonRecoverableFile.toString()); + isMetricsEnabledForTests(false); Review comment: move this to finally block & move the test to TestReplicationScenario. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org Issue Time Tracking ------------------- Worklog Id: (was: 721855) Time Spent: 50m (was: 40m) > Add entry in replication_metrics table for skipped replication iterations. > -------------------------------------------------------------------------- > > Key: HIVE-25814 > URL: https://issues.apache.org/jira/browse/HIVE-25814 > Project: Hive > Issue Type: Improvement > Reporter: Haymant Mangla > Assignee: Haymant Mangla > Priority: Major > Labels: pull-request-available > Time Spent: 50m > Remaining Estimate: 0h > -- This message was sent by Atlassian Jira (v8.20.1#820001)