This is an automated email from the ASF dual-hosted git repository.
andor pushed a commit to branch HBASE-29081
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/HBASE-29081 by this push:
new f45bb13fb19 HBASE-29642 Active cluster file is not being updated after
promoting … (#7437)
f45bb13fb19 is described below
commit f45bb13fb19b4cb871d9d6ca2bbe006aca6b0df5
Author: Abhishek Kothalikar <[email protected]>
AuthorDate: Fri Nov 21 19:29:06 2025 +0530
HBASE-29642 Active cluster file is not being updated after promoting …
(#7437)
* HBASE-29642 Active cluster file is not being updated after promoting a
new active cluster
* HBASE-29642 Active cluster file is not being updated after promoting a
new active cluster
* HBASE-29642 Active cluster file is not being updated after promoting a
new active cluster
---
.../coprocessor/MasterCoprocessorEnvironment.java | 3 ++
.../hadoop/hbase/master/MasterCoprocessorHost.java | 5 ++
.../hbase/security/access/ReadOnlyController.java | 55 ++++++++++++++++++++--
.../security/access/TestReadOnlyController.java | 27 +++++++++++
4 files changed, 87 insertions(+), 3 deletions(-)
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java
index 61c98346887..b6950f4c2e8 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@@ -88,4 +89,6 @@ public interface MasterCoprocessorEnvironment extends
CoprocessorEnvironment<Mas
* @return A MetricRegistry for the coprocessor class to track and export
metrics.
*/
MetricRegistry getMetricRegistryForMaster();
+
+ MasterServices getMasterServices();
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index e3d269973f8..71f8810bee0 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -111,6 +111,11 @@ public class MasterCoprocessorHost
return metricRegistry;
}
+ @Override
+ public MasterServices getMasterServices() {
+ return services;
+ }
+
@Override
public void shutdown() {
super.shutdown();
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
index 7bd16d10ef3..6d9efadef21 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
@@ -21,6 +21,8 @@ import java.io.IOException;
import java.util.List;
import java.util.Optional;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -51,11 +53,14 @@ import
org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.yetus.audience.InterfaceAudience;
@@ -74,6 +79,8 @@ public class ReadOnlyController implements MasterCoprocessor,
RegionCoprocessor,
ConfigurationObserver {
private static final Logger LOG =
LoggerFactory.getLogger(ReadOnlyController.class);
+ private MasterServices masterServices;
+
private volatile boolean globalReadOnlyEnabled;
private void internalReadOnlyGuard() throws IOException {
@@ -84,7 +91,13 @@ public class ReadOnlyController implements
MasterCoprocessor, RegionCoprocessor,
@Override
public void start(CoprocessorEnvironment env) throws IOException {
-
+ if (env instanceof MasterCoprocessorEnvironment) {
+ this.masterServices = ((MasterCoprocessorEnvironment)
env).getMasterServices();
+ LOG.info("ReadOnlyController obtained MasterServices reference from
start().");
+ } else {
+ LOG.debug("ReadOnlyController loaded in a non-Master environment. "
+ + "File system operations for read-only state will not work.");
+ }
this.globalReadOnlyEnabled =
env.getConfiguration().getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT);
@@ -411,14 +424,50 @@ public class ReadOnlyController implements
MasterCoprocessor, RegionCoprocessor,
BulkLoadObserver.super.preCleanupBulkLoad(ctx);
}
+ private void manageActiveClusterIdFile(boolean newValue) {
+ MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
+ FileSystem fs = mfs.getFileSystem();
+ Path rootDir = mfs.getRootDir();
+ Path activeClusterFile = new Path(rootDir,
HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME);
+
+ try {
+ if (newValue) {
+ // ENABLING READ-ONLY (false -> true), delete the active cluster file.
+ LOG.debug("Global read-only mode is being ENABLED. Deleting active
cluster file: {}",
+ activeClusterFile);
+ try {
+ fs.delete(activeClusterFile, false);
+ LOG.info("Successfully deleted active cluster file: {}",
activeClusterFile);
+ } catch (IOException e) {
+ LOG.error(
+ "Failed to delete active cluster file: {}. "
+ + "Read-only flag will be updated, but file system state is
inconsistent.",
+ activeClusterFile);
+ }
+ } else {
+ // DISABLING READ-ONLY (true -> false), create the active cluster file
id file
+ int wait =
mfs.getConfiguration().getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
+ FSUtils.setActiveClusterSuffix(fs, rootDir,
mfs.getSuffixFileDataToWrite(), wait);
+ }
+ } catch (IOException e) {
+ // We still update the flag, but log that the operation failed.
+ LOG.error("Failed to perform file operation for read-only switch. "
+ + "Flag will be updated, but file system state may be inconsistent.",
e);
+ }
+ }
+
/* ---- ConfigurationObserver Overrides ---- */
- @Override
public void onConfigurationChange(Configuration conf) {
boolean maybeUpdatedConfValue =
conf.getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT);
if (this.globalReadOnlyEnabled != maybeUpdatedConfValue) {
+ if (this.masterServices != null) {
+ manageActiveClusterIdFile(maybeUpdatedConfValue);
+ } else {
+ LOG.debug("Global R/O flag changed, but not running on master");
+ }
this.globalReadOnlyEnabled = maybeUpdatedConfValue;
- LOG.info("Config {} has been dynamically changed to {}",
+ LOG.info("Config {} has been dynamically changed to {}.",
HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
this.globalReadOnlyEnabled);
}
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyController.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyController.java
index ddf513fcfe7..ff60bde88e5 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyController.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyController.java
@@ -18,11 +18,15 @@
package org.apache.hadoop.hbase.security.access;
import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_RETRIES_NUMBER;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
@@ -36,6 +40,7 @@ import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.SecurityTests;
@@ -154,6 +159,14 @@ public class TestReadOnlyController {
hRegionServer.getConfigurationManager().notifyAllObservers(conf);
}
+ private static boolean isActiveClusterIdFileExists() throws IOException {
+ MasterFileSystem mfs = hMaster.getMasterFileSystem();
+ Path rootDir = mfs.getRootDir();
+ FileSystem fs = mfs.getFileSystem();
+ Path activeClusterFile = new Path(rootDir,
HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME);
+ return fs.exists(activeClusterFile);
+ }
+
// The test case for successfully creating a table with Read-Only mode
disabled happens when
// setting up the test class, so we only need a test function for a failed
table creation.
@Test
@@ -221,4 +234,18 @@ public class TestReadOnlyController {
// This should throw the IOException
testTable.batch(actions, null);
}
+
+ @Test
+ public void testActiveClusterIdFileCreationWhenReadOnlyDisabled()
+ throws IOException, InterruptedException {
+ disableReadOnlyMode();
+ assertTrue(isActiveClusterIdFileExists());
+ }
+
+ @Test
+ public void testActiveClusterIdFileDeletionWhenReadOnlyEnabled()
+ throws IOException, InterruptedException {
+ enableReadOnlyMode();
+ assertFalse(isActiveClusterIdFileExists());
+ }
}