This is an automated email from the ASF dual-hosted git repository.

ndimiduk pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new ba56db03016 HBASE-28539 Merge of incremental backups fails if backups 
are on a separate FileSystem (#5867)
ba56db03016 is described below

commit ba56db030165c2bb789f091425e650c4243217cb
Author: bcolyn-ngdata <94831216+bcolyn-ngd...@users.noreply.github.com>
AuthorDate: Thu Jun 6 15:38:14 2024 +0200

    HBASE-28539 Merge of incremental backups fails if backups are on a separate 
FileSystem (#5867)
    
    When the backups are stored on a location that is not the 
DistributedFilesystem underpinning HBase itself merging of incremental backups 
fails. Detected with backups stored on S3A, but can be reproduced with any 
other (like LocalFilesystem).
    
    Reviewed-by: Ray Mattingly <rmdmattin...@gmail.com>
    Signed-off-by: Nick Dimiduk <ndimi...@apache.org>
    Signed-off-by: Andor Molnár <an...@apache.org>
---
 .../backup/mapreduce/MapReduceBackupMergeJob.java  | 12 ++++--
 .../hadoop/hbase/backup/TestBackupMerge.java       | 44 ++++++++++++++++++++++
 2 files changed, 53 insertions(+), 3 deletions(-)

diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
index 3b4cf0246d7..1f9ae16c1df 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
@@ -96,7 +96,7 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
     boolean finishedTables = false;
     Connection conn = ConnectionFactory.createConnection(getConf());
     BackupSystemTable table = new BackupSystemTable(conn);
-    FileSystem fs = FileSystem.get(getConf());
+    FileSystem fs = null;
 
     try {
 
@@ -112,6 +112,8 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
 
       BackupInfo bInfo = table.readBackupInfo(backupIds[0]);
       String backupRoot = bInfo.getBackupRootDir();
+      Path backupRootPath = new Path(backupRoot);
+      fs = backupRootPath.getFileSystem(conf);
 
       for (int i = 0; i < tableNames.length; i++) {
         LOG.info("Merge backup images for " + tableNames[i]);
@@ -120,7 +122,9 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
         Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], 
backupIds);
         String dirs = StringUtils.join(dirPaths, ",");
 
-        Path bulkOutputPath = BackupUtils.getBulkOutputDir(
+        // bulkOutputPath should be on the same filesystem as backupRoot
+        Path tmpRestoreOutputDir = 
HBackupFileSystem.getBackupTmpDirPath(backupRoot);
+        Path bulkOutputPath = BackupUtils.getBulkOutputDir(tmpRestoreOutputDir,
           BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), 
false);
         // Delete content if exists
         if (fs.exists(bulkOutputPath)) {
@@ -186,7 +190,9 @@ public class MapReduceBackupMergeJob implements 
BackupMergeJob {
       if (!finishedTables) {
         // cleanup bulk directories and finish merge
         // merge MUST be repeated (no need for repair)
-        cleanupBulkLoadDirs(fs, toPathList(processedTableList));
+        if (fs != null) {
+          cleanupBulkLoadDirs(fs, toPathList(processedTableList));
+        }
         table.finishMergeOperation();
         table.finishBackupExclusiveOperation();
         throw new IOException("Backup merge operation failed, you should try 
it again", e);
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java
index c34f6be43b5..5a6d21dad84 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hbase.backup;
 
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.io.File;
 import java.util.List;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.TableName;
@@ -124,4 +126,46 @@ public class TestBackupMerge extends TestBackupBase {
     admin.close();
     conn.close();
   }
+
+  @Test
+  public void testIncBackupMergeRestoreSeparateFs() throws Exception {
+    String originalBackupRoot = BACKUP_ROOT_DIR;
+    // prepare BACKUP_ROOT_DIR on a different filesystem from HBase.
+    String backupTargetDir = 
TEST_UTIL.getDataTestDir("backupTarget").toString();
+    BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();
+
+    try (Connection conn = ConnectionFactory.createConnection(conf1)) {
+      BackupAdminImpl client = new BackupAdminImpl(conn);
+      List<TableName> tables = Lists.newArrayList(table1, table2);
+
+      BackupRequest request = createBackupRequest(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
+      String backupIdFull = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdFull));
+
+      request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
+      String backupIdIncMultiple = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdIncMultiple));
+
+      request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
+      String backupIdIncMultiple2 = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdIncMultiple2));
+
+      try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
+        String[] backups = new String[] { backupIdIncMultiple, 
backupIdIncMultiple2 };
+        // this throws java.lang.IllegalArgumentException: Wrong FS prior to 
HBASE-28539
+        bAdmin.mergeBackups(backups);
+      }
+
+      assertTrue(
+        new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, 
backupIdFull).toUri()).exists());
+      assertFalse(
+        new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, 
backupIdIncMultiple).toUri())
+          .exists());
+      assertTrue(
+        new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, 
backupIdIncMultiple2).toUri())
+          .exists());
+    } finally {
+      BACKUP_ROOT_DIR = originalBackupRoot;
+    }
+  }
 }

Reply via email to