This is an automated email from the ASF dual-hosted git repository.
apurtell pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2 by this push:
new ee748945a44 HBASE-29662 - Avoid regionDir/tableDir creation as part of
.regioninfo file creation in HRegion initialize (#7406)
ee748945a44 is described below
commit ee748945a44168dbf780e0a07c6cceb728b13cfe
Author: gvprathyusha6 <[email protected]>
AuthorDate: Tue Nov 4 04:38:45 2025 +0530
HBASE-29662 - Avoid regionDir/tableDir creation as part of .regioninfo file
creation in HRegion initialize (#7406)
Signed-off-by: Andrew Purtell <[email protected]>
Signed-off-by: Viraj Jasani <[email protected]>
Conflicts:
hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
---
.../apache/hadoop/hbase/util/CommonFSUtils.java | 34 ++++++-
.../mapreduce/TestTableSnapshotInputFormat.java | 101 +++++++++++++++++++++
.../hadoop/hbase/master/janitor/MetaFixer.java | 26 ++++++
.../master/procedure/TruncateRegionProcedure.java | 15 +++
.../hbase/regionserver/HRegionFileSystem.java | 13 ++-
.../java/org/apache/hadoop/hbase/util/FSUtils.java | 28 +++++-
.../apache/hadoop/hbase/HBaseTestingUtility.java | 20 ++++
.../coprocessor/TestCoreRegionCoprocessor.java | 2 +
.../hadoop/hbase/master/janitor/TestMetaFixer.java | 1 +
.../TestCompactionArchiveConcurrentClose.java | 6 +-
.../TestCompactionArchiveIOException.java | 1 +
.../hadoop/hbase/regionserver/TestHRegion.java | 86 ++++++++++++++++--
.../regionserver/TestStoreFileRefresherChore.java | 3 +-
.../regionserver/wal/AbstractTestWALReplay.java | 3 +-
14 files changed, 323 insertions(+), 16 deletions(-)
diff --git
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index 73bb6f38cd2..26ed7c98258 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -188,11 +188,39 @@ public final class CommonFSUtils {
*/
public static FSDataOutputStream create(FileSystem fs, Path path,
FsPermission perm,
boolean overwrite) throws IOException {
+ return create(fs, path, perm, overwrite, true);
+ }
+
+ /**
+ * Create the specified file on the filesystem. By default, this will:
+ * <ol>
+ * <li>apply the umask in the configuration (if it is enabled)</li>
+ * <li>use the fs configured buffer size (or 4096 if not set)</li>
+ * <li>use the default replication</li>
+ * <li>use the default block size</li>
+ * <li>not track progress</li>
+ * </ol>
+ * @param fs {@link FileSystem} on which to write the file
+ * @param path {@link Path} to the file to write
+ * @param perm intial permissions
+ * @param overwrite Whether or not the created file should be
overwritten.
+ * @param isRecursiveCreate recursively create parent directories
+ * @return output stream to the created file
+ * @throws IOException if the file cannot be created
+ */
+ public static FSDataOutputStream create(FileSystem fs, Path path,
FsPermission perm,
+ boolean overwrite, boolean isRecursiveCreate) throws IOException {
if (LOG.isTraceEnabled()) {
- LOG.trace("Creating file={} with permission={}, overwrite={}", path,
perm, overwrite);
+ LOG.trace("Creating file={} with permission={}, overwrite={},
recursive={}", path, perm,
+ overwrite, isRecursiveCreate);
+ }
+ if (isRecursiveCreate) {
+ return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
+ getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
+ } else {
+ return fs.createNonRecursive(path, perm, overwrite,
getDefaultBufferSize(fs),
+ getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
}
- return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
- getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
}
/**
diff --git
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
index eca275cf0a9..9909ce15af9 100644
---
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
+++
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TestTableSnapshotScanner;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
@@ -583,4 +584,104 @@ public class TestTableSnapshotInputFormat extends
TableSnapshotInputFormatTestBa
TableSnapshotInputFormat.cleanRestoreDir(job, snapshotName);
Assert.assertFalse(fs.exists(restorePath));
}
+
+ /**
+ * Test that explicitly restores a snapshot to a temp directory and reads
the restored regions via
+ * ClientSideRegionScanner through a MapReduce job.
+ * <p>
+ * This test verifies the full workflow: 1. Create and load a table with
data 2. Create a snapshot
+ * and restore the snapshot to a temporary directory 3. Configure a job to
read the restored
+ * regions via ClientSideRegionScanner using TableSnapshotInputFormat and
verify that it succeeds
+ * 4. Delete restored temporary directory 5. Configure a new job and verify
that it fails
+ */
+ @Test
+ public void testReadFromRestoredSnapshotViaMR() throws Exception {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ final String snapshotName = tableName + "_snapshot";
+ try {
+ if (UTIL.getAdmin().tableExists(tableName)) {
+ UTIL.deleteTable(tableName);
+ }
+ UTIL.createTable(tableName, FAMILIES, new byte[][] { bbb, yyy });
+
+ Admin admin = UTIL.getAdmin();
+ int regionNum = admin.getRegions(tableName).size();
+ LOG.info("Created table with {} regions", regionNum);
+
+ Table table = UTIL.getConnection().getTable(tableName);
+ UTIL.loadTable(table, FAMILIES);
+ table.close();
+
+ Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration());
+ FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration());
+ SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
Arrays.asList(FAMILIES),
+ null, snapshotName, rootDir, fs, true);
+ Path tempRestoreDir = UTIL.getDataTestDirOnTestFS("restore_" +
snapshotName);
+ RestoreSnapshotHelper.copySnapshotForScanner(UTIL.getConfiguration(),
fs, rootDir,
+ tempRestoreDir, snapshotName);
+ Assert.assertTrue("Restore directory should exist",
fs.exists(tempRestoreDir));
+
+ Job job = Job.getInstance(UTIL.getConfiguration());
+ job.setJarByClass(TestTableSnapshotInputFormat.class);
+ TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
+ TestTableSnapshotInputFormat.class);
+ Scan scan = new
Scan().withStartRow(getStartRow()).withStopRow(getEndRow());
+ Configuration conf = job.getConfiguration();
+ conf.set("hbase.TableSnapshotInputFormat.snapshot.name", snapshotName);
+ conf.set("hbase.TableSnapshotInputFormat.restore.dir",
tempRestoreDir.toString());
+ conf.setInt("hbase.mapreduce.splits.per.region", 1);
+ job.setReducerClass(TestTableSnapshotReducer.class);
+ job.setNumReduceTasks(1);
+ job.setOutputFormatClass(NullOutputFormat.class);
+ TableMapReduceUtil.initTableMapperJob(snapshotName, // table name
(snapshot name in this case)
+ scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
NullWritable.class, job,
+ false, false, TableSnapshotInputFormat.class);
+ TableMapReduceUtil.resetCacheConfig(conf);
+ Assert.assertTrue(job.waitForCompletion(true));
+ Assert.assertTrue(job.isSuccessful());
+
+ // Now verify that job fails when restore directory is deleted
+ Assert.assertTrue(fs.delete(tempRestoreDir, true));
+ Assert.assertFalse("Restore directory should not exist after deletion",
+ fs.exists(tempRestoreDir));
+ Job failureJob = Job.getInstance(UTIL.getConfiguration());
+ failureJob.setJarByClass(TestTableSnapshotInputFormat.class);
+
TableMapReduceUtil.addDependencyJarsForClasses(failureJob.getConfiguration(),
+ TestTableSnapshotInputFormat.class);
+ Configuration failureConf = failureJob.getConfiguration();
+ // Configure job to use the deleted restore directory
+ failureConf.set("hbase.TableSnapshotInputFormat.snapshot.name",
snapshotName);
+ failureConf.set("hbase.TableSnapshotInputFormat.restore.dir",
tempRestoreDir.toString());
+ failureConf.setInt("hbase.mapreduce.splits.per.region", 1);
+ failureJob.setReducerClass(TestTableSnapshotReducer.class);
+ failureJob.setNumReduceTasks(1);
+ failureJob.setOutputFormatClass(NullOutputFormat.class);
+
+ TableMapReduceUtil.initTableMapperJob(snapshotName, scan,
TestTableSnapshotMapper.class,
+ ImmutableBytesWritable.class, NullWritable.class, failureJob, false,
false,
+ TableSnapshotInputFormat.class);
+ TableMapReduceUtil.resetCacheConfig(failureConf);
+
+ Assert.assertFalse("Restore directory should not exist before job
execution",
+ fs.exists(tempRestoreDir));
+ failureJob.waitForCompletion(true);
+
+ Assert.assertFalse("Job should fail since the restored snapshot
directory is deleted",
+ failureJob.isSuccessful());
+
+ } finally {
+ try {
+ if (UTIL.getAdmin().tableExists(tableName)) {
+ UTIL.deleteTable(tableName);
+ }
+ } catch (Exception e) {
+ LOG.warn("Error deleting table", e);
+ }
+ try {
+ UTIL.getAdmin().deleteSnapshot(snapshotName);
+ } catch (Exception e) {
+ LOG.warn("Error deleting snapshot", e);
+ }
+ }
+ }
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
index 77410c3d91c..1493e4f69b8 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
@@ -29,6 +29,7 @@ import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.stream.Collectors;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -38,10 +39,13 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.yetus.audience.InterfaceAudience;
@@ -105,6 +109,7 @@ public class MetaFixer {
final List<RegionInfo> newRegionInfos = createRegionInfosForHoles(holes);
final List<RegionInfo> newMetaEntries = createMetaEntries(masterServices,
newRegionInfos);
+ createRegionDirectories(masterServices, newMetaEntries);
final TransitRegionStateProcedure[] assignProcedures =
masterServices.getAssignmentManager().createRoundRobinAssignProcedures(newMetaEntries);
@@ -226,6 +231,27 @@ public class MetaFixer {
return createMetaEntriesSuccesses;
}
+ private static void createRegionDirectories(final MasterServices
masterServices,
+ final List<RegionInfo> regions) {
+ if (regions.isEmpty()) {
+ return;
+ }
+ final MasterFileSystem mfs = masterServices.getMasterFileSystem();
+ final Path rootDir = mfs.getRootDir();
+ for (RegionInfo regionInfo : regions) {
+ if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
+ try {
+ Path tableDir = CommonFSUtils.getTableDir(rootDir,
regionInfo.getTable());
+
HRegionFileSystem.createRegionOnFileSystem(masterServices.getConfiguration(),
+ mfs.getFileSystem(), tableDir, regionInfo);
+ } catch (IOException e) {
+ LOG.warn("Failed to create region directory for {}: {}",
+ regionInfo.getRegionNameAsString(), e.getMessage(), e);
+ }
+ }
+ }
+ }
+
/**
* Fix overlaps noted in CJ consistency report.
*/
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java
index 993aca6dd43..7b17439c944 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java
@@ -109,6 +109,7 @@ public class TruncateRegionProcedure
setNextState(TruncateRegionState.TRUNCATE_REGION_MAKE_ONLINE);
break;
case TRUNCATE_REGION_MAKE_ONLINE:
+ createRegionOnFileSystem(env);
addChildProcedure(createAssignProcedures(env));
setNextState(TruncateRegionState.TRUNCATE_REGION_POST_OPERATION);
break;
@@ -130,6 +131,20 @@ public class TruncateRegionProcedure
return Flow.HAS_MORE_STATE;
}
+ private void createRegionOnFileSystem(final MasterProcedureEnv env) throws
IOException {
+ RegionStateNode regionNode =
+
env.getAssignmentManager().getRegionStates().getRegionStateNode(getRegion());
+ regionNode.lock();
+ try {
+ final MasterFileSystem mfs =
env.getMasterServices().getMasterFileSystem();
+ final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(),
getTableName());
+ HRegionFileSystem.createRegionOnFileSystem(env.getMasterConfiguration(),
mfs.getFileSystem(),
+ tableDir, getRegion());
+ } finally {
+ regionNode.unlock();
+ }
+ }
+
private void deleteRegionFromFileSystem(final MasterProcedureEnv env) throws
IOException {
RegionStateNode regionNode =
env.getAssignmentManager().getRegionStates().getRegionStateNode(getRegion());
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 4570eac9ec8..2054f129050 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -810,7 +810,10 @@ public class HRegionFileSystem {
// First check to get the permissions
FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf,
HConstants.DATA_FILE_UMASK_KEY);
// Write the RegionInfo file content
- try (FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile,
perms, null)) {
+ // HBASE-29662: Fail .regioninfo file creation, if the region directory
doesn't exist,
+ // avoiding silent masking of missing region directories during region
initialization.
+ // The region directory should already exist when this method is called.
+ try (FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile,
perms, null, false)) {
out.write(content);
}
}
@@ -894,6 +897,14 @@ public class HRegionFileSystem {
CommonFSUtils.delete(fs, tmpPath, true);
}
+ // Check parent (region) directory exists first to maintain HBASE-29662
protection
+ if (!fs.exists(getRegionDir())) {
+ throw new IOException("Region directory does not exist: " +
getRegionDir());
+ }
+ if (!fs.exists(getTempDir())) {
+ fs.mkdirs(getTempDir());
+ }
+
// Write HRI to a file in case we need to recover hbase:meta
writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index ba201055466..7437143d2ce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -213,6 +213,32 @@ public final class FSUtils {
*/
public static FSDataOutputStream create(Configuration conf, FileSystem fs,
Path path,
FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
+ return create(conf, fs, path, perm, favoredNodes, true);
+ }
+
+ /**
+ * Create the specified file on the filesystem. By default, this will:
+ * <ol>
+ * <li>overwrite the file if it exists</li>
+ * <li>apply the umask in the configuration (if it is enabled)</li>
+ * <li>use the fs configured buffer size (or 4096 if not set)</li>
+ * <li>use the configured column family replication or default replication if
+ * {@link ColumnFamilyDescriptorBuilder#DEFAULT_DFS_REPLICATION}</li>
+ * <li>use the default block size</li>
+ * <li>not track progress</li>
+ * </ol>
+ * @param conf configurations
+ * @param fs {@link FileSystem} on which to write the file
+ * @param path {@link Path} to the file to write
+ * @param perm permissions
+ * @param favoredNodes favored data nodes
+ * @param isRecursiveCreate recursively create parent directories
+ * @return output stream to the created file
+ * @throws IOException if the file cannot be created
+ */
+ public static FSDataOutputStream create(Configuration conf, FileSystem fs,
Path path,
+ FsPermission perm, InetSocketAddress[] favoredNodes, boolean
isRecursiveCreate)
+ throws IOException {
if (fs instanceof HFileSystem) {
FileSystem backingFs = ((HFileSystem) fs).getBackingFs();
if (backingFs instanceof DistributedFileSystem) {
@@ -231,7 +257,7 @@ public final class FSUtils {
}
}
- return CommonFSUtils.create(fs, path, perm, true);
+ return CommonFSUtils.create(fs, path, perm, true, isRecursiveCreate);
}
/**
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index bc49fe40638..48a7615b2a4 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -102,6 +102,7 @@ import
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.logging.Log4jUtils;
import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -112,6 +113,7 @@ import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.ChunkCreator;
import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -4321,4 +4323,22 @@ public class HBaseTestingUtility extends
HBaseZKTestingUtility {
throw e;
}
}
+
+ public void createRegionDir(RegionInfo hri) throws IOException {
+ Path rootDir = getDataTestDir();
+ Path tableDir = CommonFSUtils.getTableDir(rootDir, hri.getTable());
+ Path regionDir = new Path(tableDir, hri.getEncodedName());
+ FileSystem fs = getTestFileSystem();
+ if (!fs.exists(regionDir)) {
+ fs.mkdirs(regionDir);
+ }
+ }
+
+ public void createRegionDir(RegionInfo regionInfo, MasterFileSystem
masterFileSystem)
+ throws IOException {
+ Path tableDir =
+ CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf),
regionInfo.getTable());
+ HRegionFileSystem.createRegionOnFileSystem(conf,
masterFileSystem.getFileSystem(), tableDir,
+ regionInfo);
+ }
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java
index 8c5a2d88601..fb28b802147 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java
@@ -76,12 +76,14 @@ public class TestCoreRegionCoprocessor {
this.rss = new MockRegionServerServices(HTU.getConfiguration());
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0,
null,
MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
+ HTU.createRegionDir(ri);
this.region = HRegion.openHRegion(ri, td, null, HTU.getConfiguration(),
this.rss, null);
}
@After
public void after() throws IOException {
this.region.close();
+ HTU.cleanupTestDir();
}
/**
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
index ea4c93d6de2..cd2d948dd06 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
@@ -170,6 +170,7 @@ public class TestMetaFixer {
throws IOException {
RegionInfo overlapRegion = RegionInfoBuilder.newBuilder(a.getTable())
.setStartKey(a.getStartKey()).setEndKey(b.getEndKey()).build();
+ TEST_UTIL.createRegionDir(overlapRegion, services.getMasterFileSystem());
MetaTableAccessor.putsToMetaTable(services.getConnection(),
Collections.singletonList(MetaTableAccessor.makePutFromRegionInfo(overlapRegion,
EnvironmentEdgeManager.currentTime())));
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
index e64ba3eebf1..9f87bfa707a 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
@@ -178,9 +178,11 @@ public class TestCompactionArchiveConcurrentClose {
CommonFSUtils.setRootDir(walConf, tableDir);
final WALFactory wals = new WALFactory(walConf, "log_" +
info.getEncodedName());
HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null);
-
+ Path regionDir = new Path(tableDir, info.getEncodedName());
+ if (!fs.getFileSystem().exists(regionDir)) {
+ fs.getFileSystem().mkdirs(regionDir);
+ }
region.initialize();
-
return region;
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
index 706b98aeef7..72a3e2a97ae 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
@@ -198,6 +198,7 @@ public class TestCompactionArchiveIOException {
.rename(eq(new Path(storeDir, ERROR_FILE)), any());
HRegionFileSystem fs = new HRegionFileSystem(conf, errFS, tableDir, info);
+ fs.createRegionOnFileSystem(conf, fs.getFileSystem(), tableDir, info);
final Configuration walConf = new Configuration(conf);
CommonFSUtils.setRootDir(walConf, tableDir);
final WALFactory wals = new WALFactory(walConf, "log_" +
info.getEncodedName());
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index d9a73514448..1d53a7d652b 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -7501,12 +7501,13 @@ public class TestHRegion {
final ServerName serverName = ServerName.valueOf(name.getMethodName(),
100, 42);
final RegionServerServices rss =
spy(TEST_UTIL.createMockRegionServerService(serverName));
- HTableDescriptor htd = new
HTableDescriptor(TableName.valueOf(name.getMethodName()));
- htd.addFamily(new HColumnDescriptor(fam1));
- HRegionInfo hri =
- new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY,
HConstants.EMPTY_BYTE_ARRAY);
- region =
- HRegion.openHRegion(hri, htd, rss.getWAL(hri),
TEST_UTIL.getConfiguration(), rss, null);
+ TableDescriptor tableDescriptor =
+
TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build();
+ RegionInfo hri =
RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
+ TEST_UTIL.createRegionDir(hri);
+ region = HRegion.openHRegion(hri, tableDescriptor, rss.getWAL(hri),
+ TEST_UTIL.getConfiguration(), rss, null);
assertTrue(region.conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
false));
String plugins =
region.conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
@@ -7864,4 +7865,77 @@ public class TestHRegion {
public static class NoOpRegionCoprocessor implements RegionCoprocessor,
RegionObserver {
// a empty region coprocessor class
}
+
+ /**
+ * Test for HBASE-29662: HRegion.initialize() should fail when trying to
recreate .regioninfo file
+ * after the region directory has been deleted. This validates that
.regioninfo file creation does
+ * not create parent directories recursively.
+ */
+ @Test
+ public void testHRegionInitializeFailsWithDeletedRegionDir() throws
Exception {
+ LOG.info("Testing HRegion initialize failure with deleted region
directory");
+
+ TEST_UTIL = new HBaseTestingUtility();
+ Configuration conf = TEST_UTIL.getConfiguration();
+ Path testDir = TEST_UTIL.getDataTestDir("testHRegionInitFailure");
+ FileSystem fs = testDir.getFileSystem(conf);
+
+ // Create table descriptor
+ TableName tableName = TableName.valueOf("TestHRegionInitWithDeletedDir");
+ byte[] family = Bytes.toBytes("info");
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
+
+ // Create region info
+ RegionInfo regionInfo =
+
RegionInfoBuilder.newBuilder(tableName).setStartKey(null).setEndKey(null).build();
+
+ Path tableDir = CommonFSUtils.getTableDir(testDir, tableName);
+
+ // Create WAL for the region
+ WAL wal = HBaseTestingUtility.createWal(conf, testDir, regionInfo);
+
+ try {
+ // Create region normally (this should succeed and create region
directory)
+ LOG.info("Creating region normally - should succeed");
+ HRegion region = HRegion.createHRegion(regionInfo, testDir, conf, htd,
wal, true);
+
+ // Verify region directory exists
+ Path regionDir = new Path(tableDir, regionInfo.getEncodedName());
+ assertTrue("Region directory should exist after creation",
fs.exists(regionDir));
+
+ Path regionInfoFile = new Path(regionDir,
HRegionFileSystem.REGION_INFO_FILE);
+ assertTrue("Region info file should exist after creation",
fs.exists(regionInfoFile));
+
+ // Delete the region directory (simulating external deletion or
corruption)
+ assertTrue(fs.delete(regionDir, true));
+ assertFalse("Region directory should not exist after deletion",
fs.exists(regionDir));
+
+ // Try to open/initialize the region again - this should fail
+ LOG.info("Attempting to re-initialize region with deleted directory -
should fail");
+
+ // Create a new region instance (simulating region server restart or
reopen)
+ HRegion newRegion = HRegion.newHRegion(tableDir, wal, fs, conf,
regionInfo, htd, null);
+ // Try to initialize - this should fail because the regionDir doesn't
exist
+ IOException regionInitializeException = null;
+ try {
+ newRegion.initialize(null);
+ } catch (IOException e) {
+ regionInitializeException = e;
+ }
+
+ // Verify the exception is related to missing parent directory
+ assertNotNull("Exception should be thrown", regionInitializeException);
+ String exceptionMessage =
regionInitializeException.getMessage().toLowerCase();
+ assertTrue(exceptionMessage.contains("region directory does not exist"));
+ assertFalse("Region directory should still not exist after failed
initialization",
+ fs.exists(regionDir));
+
+ } finally {
+ if (wal != null) {
+ wal.close();
+ }
+ TEST_UTIL.cleanupTestDir();
+ }
+ }
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
index 5f36d201a75..2ccd5d70ad7 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
@@ -122,9 +122,8 @@ public class TestStoreFileRefresherChore {
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0,
null,
MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null);
-
+ fs.createRegionOnFileSystem(walConf, fs.getFileSystem(), tableDir, info);
region.initialize();
-
return region;
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
index 6069283dd57..2628f7bc35a 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
@@ -819,7 +819,8 @@ public abstract class AbstractTestWALReplay {
// Mock the WAL
MockWAL wal = createMockWAL();
-
+ TEST_UTIL.createRegionDir(hri,
+ TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem());
HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir,
hri, htd, wal);
for (HColumnDescriptor hcd : htd.getFamilies()) {
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region,
"x");