This is an automated email from the ASF dual-hosted git repository.
wchevreuil pushed a commit to branch HBASE-28957
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/HBASE-28957 by this push:
new 01ff3dea8a6 HBASE-29210: Introduce Validation for PITR-Critical Backup
Deletion (#6848)
01ff3dea8a6 is described below
commit 01ff3dea8a6e9b90b60d6e5d2d372d03d99bb45e
Author: vinayak hegde <[email protected]>
AuthorDate: Thu Apr 10 16:35:02 2025 +0530
HBASE-29210: Introduce Validation for PITR-Critical Backup Deletion (#6848)
Signed-off-by: Andor Molnár <[email protected]>
Signed-off-by: Wellington Chevreuil <[email protected]>
---
.../apache/hadoop/hbase/backup/BackupDriver.java | 4 +
.../hbase/backup/BackupRestoreConstants.java | 8 +
.../hadoop/hbase/backup/impl/BackupCommands.java | 173 ++++++++++++++++++-
.../hadoop/hbase/backup/TestBackupDelete.java | 6 +-
...estBackupDeleteWithContinuousBackupAndPITR.java | 186 +++++++++++++++++++++
5 files changed, 369 insertions(+), 8 deletions(-)
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
index e096bbee161..eb27e9a60e0 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.backup;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.LONG_OPTION_ENABLE_CONTINUOUS_BACKUP;
+import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.LONG_OPTION_FORCE_DELETE;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
@@ -25,6 +26,8 @@ import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_ENABLE_CONTINUOUS_BACKUP;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_ENABLE_CONTINUOUS_BACKUP_DESC;
+import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_FORCE_DELETE;
+import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_FORCE_DELETE_DESC;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_IGNORECHECKSUM;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_IGNORECHECKSUM_DESC;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP;
@@ -164,6 +167,7 @@ public class BackupDriver extends AbstractHBaseTool {
addOptWithArg(OPTION_YARN_QUEUE_NAME, OPTION_YARN_QUEUE_NAME_DESC);
addOptNoArg(OPTION_ENABLE_CONTINUOUS_BACKUP,
LONG_OPTION_ENABLE_CONTINUOUS_BACKUP,
OPTION_ENABLE_CONTINUOUS_BACKUP_DESC);
+ addOptNoArg(OPTION_FORCE_DELETE, LONG_OPTION_FORCE_DELETE,
OPTION_FORCE_DELETE_DESC);
}
@Override
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
index 5d35c8bc3fa..7989a5be8d8 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
@@ -101,6 +101,11 @@ public interface BackupRestoreConstants {
String OPTION_ENABLE_CONTINUOUS_BACKUP_DESC =
"Flag indicating that the full backup is part of a continuous backup
process.";
+ String OPTION_FORCE_DELETE = "fd";
+ String LONG_OPTION_FORCE_DELETE = "force-delete";
+ String OPTION_FORCE_DELETE_DESC =
+ "Flag to forcefully delete the backup, even if it may be required for
Point-in-Time Restore";
+
String JOB_NAME_CONF_KEY = "mapreduce.job.name";
String BACKUP_CONFIG_STRING =
@@ -134,6 +139,9 @@ public interface BackupRestoreConstants {
String CONF_CONTINUOUS_BACKUP_WAL_DIR = "hbase.backup.continuous.wal.dir";
+ String CONF_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS =
"hbase.backup.continuous.pitr.window.days";
+ long DEFAULT_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS = 30;
+
enum BackupCommand {
CREATE,
CANCEL,
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index ab9ca1c4ed2..e9d14d1426d 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.backup.impl;
+import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.CONF_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS;
+import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.DEFAULT_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
@@ -24,6 +26,8 @@ import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_ENABLE_CONTINUOUS_BACKUP;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_ENABLE_CONTINUOUS_BACKUP_DESC;
+import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_FORCE_DELETE;
+import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_FORCE_DELETE_DESC;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_IGNORECHECKSUM;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_IGNORECHECKSUM_DESC;
import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP;
@@ -46,8 +50,12 @@ import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_
import java.io.IOException;
import java.net.URI;
+import java.util.ArrayList;
import java.util.List;
+import java.util.Map;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import org.agrona.collections.MutableLong;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@@ -632,15 +640,18 @@ public final class BackupCommands {
printUsage();
throw new IOException(INCORRECT_USAGE);
}
+
+ boolean isForceDelete = cmdline.hasOption(OPTION_FORCE_DELETE);
super.execute();
if (cmdline.hasOption(OPTION_KEEP)) {
- executeDeleteOlderThan(cmdline);
+ executeDeleteOlderThan(cmdline, isForceDelete);
} else if (cmdline.hasOption(OPTION_LIST)) {
- executeDeleteListOfBackups(cmdline);
+ executeDeleteListOfBackups(cmdline, isForceDelete);
}
}
- private void executeDeleteOlderThan(CommandLine cmdline) throws
IOException {
+ private void executeDeleteOlderThan(CommandLine cmdline, boolean
isForceDelete)
+ throws IOException {
String value = cmdline.getOptionValue(OPTION_KEEP);
int days = 0;
try {
@@ -662,6 +673,7 @@ public final class BackupCommands {
BackupAdminImpl admin = new BackupAdminImpl(conn)) {
history = sysTable.getBackupHistory(-1, dateFilter);
String[] backupIds = convertToBackupIds(history);
+ validatePITRBackupDeletion(backupIds, isForceDelete);
int deleted = admin.deleteBackups(backupIds);
System.out.println("Deleted " + deleted + " backups. Total older than
" + days + " days: "
+ backupIds.length);
@@ -680,10 +692,11 @@ public final class BackupCommands {
return ids;
}
- private void executeDeleteListOfBackups(CommandLine cmdline) throws
IOException {
+ private void executeDeleteListOfBackups(CommandLine cmdline, boolean
isForceDelete)
+ throws IOException {
String value = cmdline.getOptionValue(OPTION_LIST);
String[] backupIds = value.split(",");
-
+ validatePITRBackupDeletion(backupIds, isForceDelete);
try (BackupAdminImpl admin = new BackupAdminImpl(conn)) {
int deleted = admin.deleteBackups(backupIds);
System.out.println("Deleted " + deleted + " backups. Total requested:
" + backupIds.length);
@@ -695,12 +708,162 @@ public final class BackupCommands {
}
+ /**
+ * Validates whether the specified backups can be deleted while preserving
Point-In-Time
+ * Recovery (PITR) capabilities. If a backup is the only remaining full
backup enabling PITR for
+ * certain tables, deletion is prevented unless forced.
+ * @param backupIds Array of backup IDs to validate.
+ * @param isForceDelete Flag indicating whether deletion should proceed
regardless of PITR
+ * constraints.
+ * @throws IOException If a backup is essential for PITR and force
deletion is not enabled.
+ */
+ private void validatePITRBackupDeletion(String[] backupIds, boolean
isForceDelete)
+ throws IOException {
+ if (!isForceDelete) {
+ for (String backupId : backupIds) {
+ List<TableName> affectedTables =
getTablesDependentOnBackupForPITR(backupId);
+ if (!affectedTables.isEmpty()) {
+ String errMsg = String.format(
+ "Backup %s is the only FULL backup remaining that enables PITR
for tables: %s. "
+ + "Use the force option to delete it anyway.",
+ backupId, affectedTables);
+ System.err.println(errMsg);
+ throw new IOException(errMsg);
+ }
+ }
+ }
+ }
+
+ /**
+ * Identifies tables that rely on the specified backup for PITR. If a
table has no other valid
+ * FULL backups that can facilitate recovery to all points within the PITR
retention window, it
+ * is added to the dependent list.
+ * @param backupId The backup ID being evaluated.
+ * @return List of tables dependent on the specified backup for PITR.
+ * @throws IOException If backup metadata cannot be retrieved.
+ */
+ private List<TableName> getTablesDependentOnBackupForPITR(String backupId)
throws IOException {
+ List<TableName> dependentTables = new ArrayList<>();
+
+ try (final BackupSystemTable backupSystemTable = new
BackupSystemTable(conn)) {
+ BackupInfo targetBackup = backupSystemTable.readBackupInfo(backupId);
+
+ if (targetBackup == null) {
+ throw new IOException("Backup info not found for backupId: " +
backupId);
+ }
+
+ // Only full backups are mandatory for PITR
+ if (!BackupType.FULL.equals(targetBackup.getType())) {
+ return List.of();
+ }
+
+ // Retrieve the tables with continuous backup enabled and their start
times
+ Map<TableName, Long> continuousBackupStartTimes =
+ backupSystemTable.getContinuousBackupTableSet();
+
+ // Determine the PITR time window
+ long pitrWindowDays =
getConf().getLong(CONF_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS,
+ DEFAULT_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS);
+ long currentTime = EnvironmentEdgeManager.getDelegate().currentTime();
+ final MutableLong pitrMaxStartTime =
+ new MutableLong(currentTime -
TimeUnit.DAYS.toMillis(pitrWindowDays));
+
+ // For all tables, determine the earliest (minimum) continuous backup
start time.
+ // This represents the actual earliest point-in-time recovery (PITR)
timestamp
+ // that can be used, ensuring we do not go beyond the available backup
data.
+ long minContinuousBackupStartTime = currentTime;
+ for (TableName table : targetBackup.getTableNames()) {
+ minContinuousBackupStartTime = Math.min(minContinuousBackupStartTime,
+ continuousBackupStartTimes.getOrDefault(table, currentTime));
+ }
+
+ // The PITR max start time should be the maximum of the calculated
minimum continuous backup
+ // start time and the default PITR max start time (based on the
configured window).
+ // This ensures that PITR does not extend beyond what is practically
possible.
+ pitrMaxStartTime.set(Math.max(minContinuousBackupStartTime,
pitrMaxStartTime.longValue()));
+
+ for (TableName table : targetBackup.getTableNames()) {
+ // This backup is not necessary for this table since it doesn't have
PITR enabled
+ if (!continuousBackupStartTimes.containsKey(table)) {
+ continue;
+ }
+ if (
+ !isValidPITRBackup(targetBackup, table, continuousBackupStartTimes,
+ pitrMaxStartTime.longValue())
+ ) {
+ continue; // This backup is not crucial for PITR of this table
+ }
+
+ // Check if another valid full backup exists for this table
+ List<BackupInfo> backupHistory =
backupSystemTable.getBackupInfos(BackupState.COMPLETE);
+ boolean hasAnotherValidBackup = backupHistory.stream()
+ .anyMatch(backup -> !backup.getBackupId().equals(backupId) &&
isValidPITRBackup(backup,
+ table, continuousBackupStartTimes,
pitrMaxStartTime.longValue()));
+
+ if (!hasAnotherValidBackup) {
+ dependentTables.add(table);
+ }
+ }
+ }
+ return dependentTables;
+ }
+
+ /**
+ * Determines if a given backup is a valid candidate for Point-In-Time
Recovery (PITR) for a
+ * specific table. A valid backup ensures that recovery is possible to any
point within the PITR
+ * retention window. A backup qualifies if:
+ * <ul>
+ * <li>It is a FULL backup.</li>
+ * <li>It contains the specified table.</li>
+ * <li>Its completion timestamp is before the PITR retention window start
time.</li>
+ * <li>Its completion timestamp is on or after the table’s continuous
backup start time.</li>
+ * </ul>
+ * @param backupInfo The backup information being evaluated.
+ * @param tableName The table for which PITR validity is
being checked.
+ * @param continuousBackupTables A map of tables to their continuous
backup start time.
+ * @param pitrMaxStartTime The maximum allowed start timestamp for
PITR eligibility.
+ * @return {@code true} if the backup enables recovery to all valid points
in time for the
+ * table; {@code false} otherwise.
+ */
+ private boolean isValidPITRBackup(BackupInfo backupInfo, TableName
tableName,
+ Map<TableName, Long> continuousBackupTables, long pitrMaxStartTime) {
+ // Only FULL backups are mandatory for PITR
+ if (!BackupType.FULL.equals(backupInfo.getType())) {
+ return false;
+ }
+
+ // The backup must include the table to be relevant for PITR
+ if (!backupInfo.getTableNames().contains(tableName)) {
+ return false;
+ }
+
+ // The backup must have been completed before the PITR retention window
starts,
+ // otherwise, it won't be helpful in cases where the recovery point is
between
+ // pitrMaxStartTime and the backup completion time.
+ if (backupInfo.getCompleteTs() > pitrMaxStartTime) {
+ return false;
+ }
+
+ // Retrieve the table's continuous backup start time
+ long continuousBackupStartTime =
continuousBackupTables.getOrDefault(tableName, 0L);
+
+ // The backup must have been started on or after the table’s continuous
backup start time,
+ // otherwise, it won't be helpful in few cases because we wouldn't have
the WAL entries
+ // between the backup start time and the continuous backup start time.
+ if (backupInfo.getStartTs() < continuousBackupStartTime) {
+ return false;
+ }
+
+ return true;
+ }
+
@Override
protected void printUsage() {
System.out.println(DELETE_CMD_USAGE);
Options options = new Options();
options.addOption(OPTION_KEEP, true, OPTION_KEEP_DESC);
options.addOption(OPTION_LIST, true, OPTION_BACKUP_LIST_DESC);
+ options.addOption(OPTION_FORCE_DELETE, false, OPTION_FORCE_DELETE_DESC);
HelpFormatter helpFormatter = new HelpFormatter();
helpFormatter.setLeftPadding(2);
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
index 785859c5280..31eaaff5051 100644
---
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
+++
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.backup;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
@@ -32,7 +33,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.EnvironmentEdge;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.ToolRunner;
-import org.junit.Assert;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -138,7 +138,7 @@ public class TestBackupDelete extends TestBackupBase {
assertTrue(ret == 0);
} catch (Exception e) {
LOG.error("failed", e);
- Assert.fail(e.getMessage());
+ fail(e.getMessage());
}
String output = baos.toString();
LOG.info(baos.toString());
@@ -154,7 +154,7 @@ public class TestBackupDelete extends TestBackupBase {
assertTrue(ret == 0);
} catch (Exception e) {
LOG.error("failed", e);
- Assert.fail(e.getMessage());
+ fail(e.getMessage());
}
output = baos.toString();
LOG.info(baos.toString());
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithContinuousBackupAndPITR.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithContinuousBackupAndPITR.java
new file mode 100644
index 00000000000..919d3e79f72
--- /dev/null
+++
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithContinuousBackupAndPITR.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static
org.apache.hadoop.hbase.backup.BackupRestoreConstants.CONF_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS;
+import static
org.apache.hadoop.hbase.backup.replication.ContinuousBackupReplicationEndpoint.ONE_DAY_IN_MILLISECONDS;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+import java.util.Set;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestBackupDeleteWithContinuousBackupAndPITR extends
TestBackupBase {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+
HBaseClassTestRule.forClass(TestBackupDeleteWithContinuousBackupAndPITR.class);
+
+ private BackupSystemTable backupSystemTable;
+ private String backupId1;
+ private String backupId2;
+ private String backupId3;
+ private String backupId4;
+ private String backupId5;
+
+ /**
+ * Sets up the backup environment before each test.
+ * <p>
+ * This includes:
+ * <ul>
+ * <li>Setting a 30-day PITR (Point-In-Time Recovery) window</li>
+ * <li>Registering table2 as a continuous backup table starting 40 days
ago</li>
+ * <li>Creating a mix of full and incremental backups at specific time
offsets (using
+ * EnvironmentEdge injection) to simulate scenarios like: - backups outside
PITR window - valid
+ * PITR backups - incomplete PITR chains</li>
+ * <li>Resetting the system clock after time manipulation</li>
+ * </ul>
+ * This setup enables tests to evaluate deletion behavior of backups based
on age, table type, and
+ * PITR chain requirements.
+ */
+ @Before
+ public void setup() throws Exception {
+ conf1.setLong(CONF_CONTINUOUS_BACKUP_PITR_WINDOW_DAYS, 30);
+ backupSystemTable = new BackupSystemTable(TEST_UTIL.getConnection());
+
+ long currentTime = System.currentTimeMillis();
+ long backupStartTime = currentTime - 40 * ONE_DAY_IN_MILLISECONDS;
+ backupSystemTable.addContinuousBackupTableSet(Set.of(table2),
backupStartTime);
+
+ backupId1 = fullTableBackup(Lists.newArrayList(table1));
+ assertTrue(checkSucceeded(backupId1));
+
+ // 31 days back
+ EnvironmentEdgeManager
+ .injectEdge(() -> System.currentTimeMillis() - 31 *
ONE_DAY_IN_MILLISECONDS);
+ backupId2 = fullTableBackup(Lists.newArrayList(table2));
+ assertTrue(checkSucceeded(backupId2));
+
+ // 32 days back
+ EnvironmentEdgeManager
+ .injectEdge(() -> System.currentTimeMillis() - 32 *
ONE_DAY_IN_MILLISECONDS);
+ backupId3 = fullTableBackup(Lists.newArrayList(table2));
+ assertTrue(checkSucceeded(backupId3));
+
+ // 15 days back
+ EnvironmentEdgeManager
+ .injectEdge(() -> System.currentTimeMillis() - 15 *
ONE_DAY_IN_MILLISECONDS);
+ backupId4 = fullTableBackup(Lists.newArrayList(table2));
+ assertTrue(checkSucceeded(backupId4));
+
+ // Reset clock
+ EnvironmentEdgeManager.reset();
+
+ backupId5 = incrementalTableBackup(Lists.newArrayList(table1));
+ assertTrue(checkSucceeded(backupId5));
+ }
+
+ @After
+ public void teardown() throws Exception {
+ EnvironmentEdgeManager.reset();
+ // Try to delete all backups forcefully if they exist
+ for (String id : List.of(backupId1, backupId2, backupId3, backupId4,
backupId5)) {
+ try {
+ deleteBackup(id, true);
+ } catch (Exception ignored) {
+ }
+ }
+ }
+
+ @Test
+ public void testDeleteIncrementalBackup() throws Exception {
+ assertDeletionSucceeds(backupSystemTable, backupId5, false);
+ }
+
+ @Test
+ public void testDeleteFullBackupNonContinuousTable() throws Exception {
+ assertDeletionSucceeds(backupSystemTable, backupId1, false);
+ }
+
+ @Test
+ public void testDeletePITRIncompleteBackup() throws Exception {
+ assertDeletionSucceeds(backupSystemTable, backupId4, false);
+ }
+
+ @Test
+ public void testDeleteValidPITRBackupWithAnotherPresent() throws Exception {
+ assertDeletionSucceeds(backupSystemTable, backupId2, false);
+ }
+
+ @Test
+ public void testDeleteOnlyValidPITRBackupFails() throws Exception {
+ // Delete backupId2 (31 days ago) — this should succeed
+ assertDeletionSucceeds(backupSystemTable, backupId2, false);
+
+ // Now backupId3 (32 days ago) is the only remaining PITR backup —
deletion should fail
+ assertDeletionFails(backupSystemTable, backupId3, false);
+ }
+
+ @Test
+ public void testForceDeleteOnlyValidPITRBackup() throws Exception {
+ // Delete backupId2 (31 days ago)
+ assertDeletionSucceeds(backupSystemTable, backupId2, false);
+
+ // Force delete backupId3 — should succeed despite PITR constraints
+ assertDeletionSucceeds(backupSystemTable, backupId3, true);
+ }
+
+ private void assertDeletionSucceeds(BackupSystemTable table, String backupId,
+ boolean isForceDelete) throws Exception {
+ int ret = deleteBackup(backupId, isForceDelete);
+ assertEquals(0, ret);
+ assertFalse("Backup should be deleted but still exists!",
backupExists(table, backupId));
+ }
+
+ private void assertDeletionFails(BackupSystemTable table, String backupId,
boolean isForceDelete)
+ throws Exception {
+ int ret = deleteBackup(backupId, isForceDelete);
+ assertNotEquals(0, ret);
+ assertTrue("Backup should still exist after failed deletion!",
backupExists(table, backupId));
+ }
+
+ private boolean backupExists(BackupSystemTable table, String backupId)
throws Exception {
+ return table.getBackupHistory().stream()
+ .anyMatch(backup -> backup.getBackupId().equals(backupId));
+ }
+
+ private int deleteBackup(String backupId, boolean isForceDelete) throws
Exception {
+ String[] args = buildBackupDeleteArgs(backupId, isForceDelete);
+ return ToolRunner.run(conf1, new BackupDriver(), args);
+ }
+
+ private String[] buildBackupDeleteArgs(String backupId, boolean
isForceDelete) {
+ return isForceDelete
+ ? new String[] { "delete", "-l", backupId, "-fd" }
+ : new String[] { "delete", "-l", backupId };
+ }
+}