This is an automated email from the ASF dual-hosted git repository.
lupeng pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.6 by this push:
new 92d4774c07e HBASE-29296 Missing critical snapshot expiration checks
(#6970)
92d4774c07e is described below
commit 92d4774c07e4b13b1c04f99911bc2d947be3256a
Author: Dimas Shidqi Parikesit <[email protected]>
AuthorDate: Sat Aug 2 10:39:06 2025 -0400
HBASE-29296 Missing critical snapshot expiration checks (#6970)
Signed-off-by: Peng Lu <[email protected]>
---
.../backup/impl/IncrementalTableBackupClient.java | 10 +
.../hadoop/hbase/backup/util/RestoreTool.java | 14 ++
.../hbase/backup/TestBackupRestoreExpiry.java | 232 +++++++++++++++++++++
.../hbase/master/procedure/SnapshotProcedure.java | 8 +
.../hbase/master/snapshot/TakeSnapshotHandler.java | 10 +
.../client/TestSnapshotWithTTLFromClient.java | 8 +-
.../TestSnapshotProcedureEarlyExpiration.java | 102 +++++++++
.../master/snapshot/TestTakeSnapshotHandler.java | 9 +
8 files changed, 389 insertions(+), 4 deletions(-)
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
index 1e07c026f0a..d51f1f47151 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
@@ -53,7 +53,9 @@ import org.apache.hadoop.hbase.mapreduce.WALPlayer;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.snapshot.SnapshotRegionLocator;
+import org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException;
import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.apache.hadoop.util.Tool;
@@ -63,6 +65,7 @@ import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
/**
@@ -541,6 +544,13 @@ public class IncrementalTableBackupClient extends
TableBackupClient {
SnapshotDescriptionUtils.readSnapshotInfo(fs, manifestDir);
SnapshotManifest manifest =
SnapshotManifest.open(conf, fs, manifestDir, snapshotDescription);
+ if (
+
SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(),
+ snapshotDescription.getCreationTime(),
EnvironmentEdgeManager.currentTime())
+ ) {
+ throw new SnapshotTTLExpiredException(
+ ProtobufUtil.createSnapshotDesc(snapshotDescription));
+ }
ColumnFamilyDescriptor[] backupCfs =
manifest.getTableDescriptor().getColumnFamilies();
if (!areCfsCompatible(currentCfs, backupCfs)) {
diff --git
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index 7549b9a8c69..50b47565d74 100644
---
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException;
import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -54,6 +55,7 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
/**
@@ -265,6 +267,12 @@ public class RestoreTool {
Path tableInfoPath = this.getTableInfoPath(tableName);
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs,
tableInfoPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath,
desc);
+ if (
+ SnapshotDescriptionUtils.isExpiredSnapshot(desc.getTtl(),
desc.getCreationTime(),
+ EnvironmentEdgeManager.currentTime())
+ ) {
+ throw new
SnapshotTTLExpiredException(ProtobufUtil.createSnapshotDesc(desc));
+ }
TableDescriptor tableDescriptor = manifest.getTableDescriptor();
if (!tableDescriptor.getTableName().equals(tableName)) {
LOG.error("couldn't find Table Desc for table: " + tableName + " under
tableInfoPath: "
@@ -310,6 +318,12 @@ public class RestoreTool {
SnapshotDescription desc =
SnapshotDescriptionUtils.readSnapshotInfo(fileSys,
tableSnapshotPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys,
tableSnapshotPath, desc);
+ if (
+ SnapshotDescriptionUtils.isExpiredSnapshot(desc.getTtl(),
desc.getCreationTime(),
+ EnvironmentEdgeManager.currentTime())
+ ) {
+ throw new
SnapshotTTLExpiredException(ProtobufUtil.createSnapshotDesc(desc));
+ }
tableDescriptor = manifest.getTableDescriptor();
} else {
tableDescriptor = getTableDesc(tableName);
diff --git
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreExpiry.java
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreExpiry.java
new file mode 100644
index 00000000000..4126ed74895
--- /dev/null
+++
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRestoreExpiry.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.LogRoller;
+import org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestBackupRestoreExpiry extends TestBackupBase {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestBackupRestoreExpiry.class);
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ TEST_UTIL = new HBaseTestingUtil();
+ conf1 = TEST_UTIL.getConfiguration();
+ conf1.setLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, 30);
+ autoRestoreOnFailure = true;
+ useSecondCluster = false;
+ setUpHelper();
+ }
+
+ public void ensurePreviousBackupTestsAreCleanedUp() throws Exception {
+ TEST_UTIL.flush(table1);
+ TEST_UTIL.flush(table2);
+
+ TEST_UTIL.truncateTable(table1).close();
+ TEST_UTIL.truncateTable(table2).close();
+
+ if (TEST_UTIL.getAdmin().tableExists(table1_restore)) {
+ TEST_UTIL.flush(table1_restore);
+ TEST_UTIL.truncateTable(table1_restore).close();
+ }
+
+ TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().forEach(rst -> {
+ try {
+ LogRoller walRoller = rst.getRegionServer().getWalRoller();
+ walRoller.requestRollAll();
+ walRoller.waitUntilWalRollFinished();
+ } catch (Exception ignored) {
+ }
+ });
+
+ try (Table table = TEST_UTIL.getConnection().getTable(table1)) {
+ loadTable(table);
+ }
+
+ try (Table table = TEST_UTIL.getConnection().getTable(table2)) {
+ loadTable(table);
+ }
+ }
+
+ @Test
+ public void testSequentially() throws Exception {
+ try {
+ testRestoreOnExpiredFullBackup();
+ } catch (Exception e) {
+ throw e;
+ } finally {
+ ensurePreviousBackupTestsAreCleanedUp();
+ }
+
+ try {
+ testIncrementalBackupOnExpiredFullBackup();
+ } catch (Exception e) {
+ throw e;
+ } finally {
+ ensurePreviousBackupTestsAreCleanedUp();
+ }
+ }
+
+ public void testRestoreOnExpiredFullBackup() throws Exception {
+ byte[] mobFam = Bytes.toBytes("mob");
+
+ List<TableName> tables = Lists.newArrayList(table1);
+ TableDescriptor newTable1Desc =
+
TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder
+
.newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build();
+ TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
+
+ Connection conn = TEST_UTIL.getConnection();
+ BackupAdminImpl backupAdmin = new BackupAdminImpl(conn);
+ BackupRequest request = createBackupRequest(BackupType.FULL, tables,
BACKUP_ROOT_DIR);
+ String fullBackupId = backupAdmin.backupTables(request);
+ assertTrue(checkSucceeded(fullBackupId));
+
+ TableName[] fromTables = new TableName[] { table1 };
+ TableName[] toTables = new TableName[] { table1_restore };
+
+ EnvironmentEdgeManager.injectEdge(new EnvironmentEdge() {
+ // time + 30s
+ @Override
+ public long currentTime() {
+ return System.currentTimeMillis() + (30 * 1000);
+ }
+ });
+
+ assertThrows(SnapshotTTLExpiredException.class, () -> {
+ backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
fullBackupId, false,
+ fromTables, toTables, true, true));
+ });
+
+ EnvironmentEdgeManager.reset();
+ backupAdmin.close();
+ }
+
+ public void testIncrementalBackupOnExpiredFullBackup() throws Exception {
+ byte[] mobFam = Bytes.toBytes("mob");
+
+ List<TableName> tables = Lists.newArrayList(table1);
+ TableDescriptor newTable1Desc =
+
TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder
+
.newBuilder(mobFam).setMobEnabled(true).setMobThreshold(5L).build()).build();
+ TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
+
+ Connection conn = TEST_UTIL.getConnection();
+ BackupAdminImpl backupAdmin = new BackupAdminImpl(conn);
+ BackupRequest request = createBackupRequest(BackupType.FULL, tables,
BACKUP_ROOT_DIR);
+ String fullBackupId = backupAdmin.backupTables(request);
+ assertTrue(checkSucceeded(fullBackupId));
+
+ TableName[] fromTables = new TableName[] { table1 };
+ TableName[] toTables = new TableName[] { table1_restore };
+
+ List<LocatedFileStatus> preRestoreBackupFiles = getBackupFiles();
+ backupAdmin.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR,
fullBackupId, false,
+ fromTables, toTables, true, true));
+ List<LocatedFileStatus> postRestoreBackupFiles = getBackupFiles();
+
+ // Check that the backup files are the same before and after the restore
process
+ Assert.assertEquals(postRestoreBackupFiles, preRestoreBackupFiles);
+ Assert.assertEquals(TEST_UTIL.countRows(table1_restore), NB_ROWS_IN_BATCH);
+
+ int ROWS_TO_ADD = 1_000;
+ // different IDs so that rows don't overlap
+ insertIntoTable(conn, table1, famName, 3, ROWS_TO_ADD);
+ insertIntoTable(conn, table1, mobFam, 4, ROWS_TO_ADD);
+
+ Admin admin = conn.getAdmin();
+ List<HRegion> currentRegions =
TEST_UTIL.getHBaseCluster().getRegions(table1);
+ for (HRegion region : currentRegions) {
+ byte[] name = region.getRegionInfo().getEncodedNameAsBytes();
+ admin.splitRegionAsync(name).get();
+ }
+
+ TEST_UTIL.waitTableAvailable(table1);
+
+ // Make sure we've split regions
+ assertNotEquals(currentRegions,
TEST_UTIL.getHBaseCluster().getRegions(table1));
+
+ EnvironmentEdgeManager.injectEdge(new EnvironmentEdge() {
+ // time + 30s
+ @Override
+ public long currentTime() {
+ return System.currentTimeMillis() + (30 * 1000);
+ }
+ });
+
+ IOException e = assertThrows(IOException.class, () -> {
+ backupAdmin
+ .backupTables(createBackupRequest(BackupType.INCREMENTAL, tables,
BACKUP_ROOT_DIR));
+ });
+ assertTrue(e.getCause() instanceof SnapshotTTLExpiredException);
+
+ EnvironmentEdgeManager.reset();
+ backupAdmin.close();
+ }
+
+ private List<LocatedFileStatus> getBackupFiles() throws IOException {
+ FileSystem fs = TEST_UTIL.getTestFileSystem();
+ RemoteIterator<LocatedFileStatus> iter = fs.listFiles(new
Path(BACKUP_ROOT_DIR), true);
+ List<LocatedFileStatus> files = new ArrayList<>();
+
+ while (iter.hasNext()) {
+ files.add(iter.next());
+ }
+
+ return files;
+ }
+}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotProcedure.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotProcedure.java
index 572cdce0cf6..17a9c083896 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotProcedure.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SnapshotProcedure.java
@@ -50,7 +50,9 @@ import
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException;
import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.apache.hadoop.hbase.util.RetryCounter;
import org.apache.yetus.audience.InterfaceAudience;
@@ -159,6 +161,12 @@ public class SnapshotProcedure extends
AbstractStateMachineTableProcedure<Snapsh
if (isSnapshotCorrupted()) {
throw new CorruptedSnapshotException(snapshot.getName());
}
+ if (
+ SnapshotDescriptionUtils.isExpiredSnapshot(snapshot.getTtl(),
+ snapshot.getCreationTime(), EnvironmentEdgeManager.currentTime())
+ ) {
+ throw new
SnapshotTTLExpiredException(ProtobufUtil.createSnapshotDesc(snapshot));
+ }
completeSnapshot(env);
setNextState(SnapshotState.SNAPSHOT_POST_OPERATION);
return Flow.HAS_MORE_STATE;
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index b24f7949404..a3a25e3d3e3 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -48,7 +48,9 @@ import org.apache.hadoop.hbase.procedure2.LockType;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException;
import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
@@ -225,6 +227,14 @@ public abstract class TakeSnapshotHandler extends
EventHandler
status.setStatus("Verifying snapshot: " + snapshot.getName());
verifier.verifySnapshot(workingDir, true);
+ // HBASE-29296 check snapshot is not expired
+ if (
+ SnapshotDescriptionUtils.isExpiredSnapshot(snapshot.getTtl(),
snapshot.getCreationTime(),
+ EnvironmentEdgeManager.currentTime())
+ ) {
+ throw new
SnapshotTTLExpiredException(ProtobufUtil.createSnapshotDesc(snapshot));
+ }
+
// complete the snapshot, atomically moving from tmp to .snapshot dir.
SnapshotDescriptionUtils.completeSnapshot(this.snapshotDir,
this.workingDir, this.rootFs,
this.workingDirFs, this.conf);
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithTTLFromClient.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithTTLFromClient.java
index 4309b922b8f..9713569e406 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithTTLFromClient.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotWithTTLFromClient.java
@@ -143,7 +143,7 @@ public class TestSnapshotWithTTLFromClient {
assertTrue(UTIL.getAdmin().tableExists(TABLE_NAME));
// create snapshot fo given table with specified ttl
- createSnapshotWithTTL(TABLE_NAME, snapshotName, 1);
+ createSnapshotWithTTL(TABLE_NAME, snapshotName, 5);
Admin admin = UTIL.getAdmin();
// Disable and drop table
@@ -152,7 +152,7 @@ public class TestSnapshotWithTTLFromClient {
assertFalse(UTIL.getAdmin().tableExists(TABLE_NAME));
// Sleep so that TTL may expire
- Threads.sleep(2000);
+ Threads.sleep(10000);
// restore snapshot which has expired
try {
@@ -192,13 +192,13 @@ public class TestSnapshotWithTTLFromClient {
assertTrue(UTIL.getAdmin().tableExists(TABLE_NAME));
// create snapshot fo given table with specified ttl
- createSnapshotWithTTL(TABLE_NAME, snapshotName, 1);
+ createSnapshotWithTTL(TABLE_NAME, snapshotName, 5);
Admin admin = UTIL.getAdmin();
assertTrue(UTIL.getAdmin().tableExists(TABLE_NAME));
// Sleep so that TTL may expire
- Threads.sleep(2000);
+ Threads.sleep(10000);
// clone snapshot which has expired
try {
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureEarlyExpiration.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureEarlyExpiration.java
new file mode 100644
index 00000000000..0870f16face
--- /dev/null
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSnapshotProcedureEarlyExpiration.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.SnapshotType;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RegionSplitter;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SnapshotState;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+
+public class TestSnapshotProcedureEarlyExpiration extends
TestSnapshotProcedure {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestSnapshotProcedureEarlyExpiration.class);
+
+ @Before
+ @Override
+ public void setup() throws Exception { // Copied from TestSnapshotProcedure
with modified
+ // SnapshotDescription
+ TEST_UTIL = new HBaseTestingUtil();
+ Configuration config = TEST_UTIL.getConfiguration();
+ // using SnapshotVerifyProcedure to verify snapshot
+ config.setInt("hbase.snapshot.remote.verify.threshold", 1);
+ // disable info server. Info server is useful when we run unit tests
locally,
+ // but it will
+ // fails integration testing of jenkins.
+ // config.setInt(HConstants.MASTER_INFO_PORT, 8080);
+
+ // delay dispatch so that we can do something, for example kill a target
server
+ config.setInt(RemoteProcedureDispatcher.DISPATCH_DELAY_CONF_KEY, 10000);
+ config.setInt(RemoteProcedureDispatcher.DISPATCH_MAX_QUEUE_SIZE_CONF_KEY,
128);
+ TEST_UTIL.startMiniCluster(3);
+ master = TEST_UTIL.getHBaseCluster().getMaster();
+ TABLE_NAME = TableName.valueOf(Bytes.toBytes("SPTestTable"));
+ CF = Bytes.toBytes("cf");
+ SNAPSHOT_NAME = "SnapshotProcedureTest";
+
+ Map<String, Object> properties = new HashMap<>();
+ properties.put("TTL", 1L);
+ snapshot = new SnapshotDescription(SNAPSHOT_NAME, TABLE_NAME,
SnapshotType.FLUSH, null, -1, -1,
+ properties);
+
+ snapshotProto = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot);
+ snapshotProto = SnapshotDescriptionUtils.validate(snapshotProto,
master.getConfiguration());
+ final byte[][] splitKeys = new RegionSplitter.HexStringSplit().split(10);
+ Table table = TEST_UTIL.createTable(TABLE_NAME, CF, splitKeys);
+ TEST_UTIL.loadTable(table, CF, false);
+ }
+
+ @Test
+ public void testSnapshotEarlyExpiration() throws Exception {
+ ProcedureExecutor<MasterProcedureEnv> procExec =
master.getMasterProcedureExecutor();
+ MasterProcedureEnv env = procExec.getEnvironment();
+ SnapshotProcedure sp = new SnapshotProcedure(env, snapshotProto);
+ SnapshotProcedure spySp = getDelayedOnSpecificStateSnapshotProcedure(sp,
+ procExec.getEnvironment(), SnapshotState.SNAPSHOT_COMPLETE_SNAPSHOT);
+
+ long procId = procExec.submitProcedure(spySp);
+
+ ProcedureTestingUtility.waitProcedure(master.getMasterProcedureExecutor(),
procId);
+ assertTrue(spySp.isFailed());
+ List<SnapshotProtos.SnapshotDescription> snapshots =
+ master.getSnapshotManager().getCompletedSnapshots();
+ assertEquals(0, snapshots.size());
+ }
+}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java
index e9d3b9784d6..d18ef5728d2 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestTakeSnapshotHandler.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
@@ -100,6 +101,14 @@ public class TestTakeSnapshotHandler {
assertEquals(-1, UTIL.getAdmin().getDescriptor(cloned).getMaxFileSize());
}
+ @Test(expected = SnapshotTTLExpiredException.class)
+ public void testSnapshotEarlyExpiration() throws Exception {
+ UTIL.startMiniCluster();
+ Map<String, Object> snapshotProps = new HashMap<>();
+ snapshotProps.put("TTL", 1L);
+ createTableInsertDataAndTakeSnapshot(snapshotProps);
+ }
+
@After
public void shutdown() throws Exception {
UTIL.shutdownMiniCluster();