This is an automated email from the ASF dual-hosted git repository.

andor pushed a commit to branch HBASE-29081
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit ac36996ee360aad0f1f02f1cc856fff0f8bf3d40
Author: Abhishek Kothalikar <[email protected]>
AuthorDate: Tue Jun 3 21:22:31 2025 +0530

    HBASE-29228 Add support to prevent running multiple active clusters (#6887)
---
 .../apache/hadoop/hbase/ActiveClusterSuffix.java   |  98 +++++++++++++
 .../java/org/apache/hadoop/hbase/HConstants.java   |   3 +
 .../main/protobuf/server/ActiveClusterSuffix.proto |  33 +++++
 .../hadoop/hbase/master/MasterFileSystem.java      |  77 +++++++++-
 .../java/org/apache/hadoop/hbase/util/FSUtils.java |  93 ++++++++++--
 .../regionserver/TestActiveClusterSuffix.java      | 158 +++++++++++++++++++++
 6 files changed, 446 insertions(+), 16 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClusterSuffix.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClusterSuffix.java
new file mode 100644
index 00000000000..68fd61a2253
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClusterSuffix.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ActiveClusterSuffixProtos;
+
+/**
+ * The suffix for this cluster. It is serialized to the filesystem and up into 
zookeeper. This is a
+ * container for the id. Also knows how to serialize and deserialize the 
cluster id.
+ */
[email protected]
+public class ActiveClusterSuffix {
+  private final String active_cluster_suffix;
+
+  /**
+   * New ActiveClusterSuffix.
+   */
+
+  public ActiveClusterSuffix(final String cs) {
+    this.active_cluster_suffix = cs;
+  }
+
+  public String getActiveClusterSuffix() {
+    return active_cluster_suffix;
+  }
+
+  /** Returns The active cluster suffix serialized using pb w/ pb magic prefix 
*/
+  public byte[] toByteArray() {
+    return ProtobufUtil.prependPBMagic(convert().toByteArray());
+  }
+
+  /**
+   * Parse the serialized representation of the {@link ActiveClusterSuffix}
+   * @param bytes A pb serialized {@link ActiveClusterSuffix} instance with pb 
magic prefix
+   * @return An instance of {@link ActiveClusterSuffix} made from 
<code>bytes</code>
+   * @see #toByteArray()
+   */
+  public static ActiveClusterSuffix parseFrom(final byte[] bytes) throws 
DeserializationException {
+    if (ProtobufUtil.isPBMagicPrefix(bytes)) {
+      int pblen = ProtobufUtil.lengthOfPBMagic();
+      ActiveClusterSuffixProtos.ActiveClusterSuffix.Builder builder =
+        ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder();
+      ActiveClusterSuffixProtos.ActiveClusterSuffix cs = null;
+      try {
+        ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
+        cs = builder.build();
+      } catch (IOException e) {
+        throw new DeserializationException(e);
+      }
+      return convert(cs);
+    } else {
+      // Presume it was written out this way, the old way.
+      return new ActiveClusterSuffix(Bytes.toString(bytes));
+    }
+  }
+
+  /** Returns A pb instance to represent this instance. */
+  public ActiveClusterSuffixProtos.ActiveClusterSuffix convert() {
+    ActiveClusterSuffixProtos.ActiveClusterSuffix.Builder builder =
+      ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder();
+    return builder.setActiveClusterSuffix(this.active_cluster_suffix).build();
+  }
+
+  /** Returns A {@link ActiveClusterSuffix} made from the passed in 
<code>cs</code> */
+  public static ActiveClusterSuffix
+    convert(final ActiveClusterSuffixProtos.ActiveClusterSuffix cs) {
+    return new ActiveClusterSuffix(cs.getActiveClusterSuffix());
+  }
+
+  /**
+   * @see java.lang.Object#toString()
+   */
+  @Override
+  public String toString() {
+    return this.active_cluster_suffix;
+  }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 1a986359339..3e45bc9d42d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1681,6 +1681,9 @@ public final class HConstants {
    */
   public final static boolean HBASE_GLOBAL_READONLY_ENABLED_DEFAULT = false;
 
+  /** name of the file having active cluster suffix */
+  public static final String ACTIVE_CLUSTER_SUFFIX_FILE_NAME = " 
active.cluster.suffix.id";
+
   private HConstants() {
     // Can't be instantiated with this ctor.
   }
diff --git 
a/hbase-protocol-shaded/src/main/protobuf/server/ActiveClusterSuffix.proto 
b/hbase-protocol-shaded/src/main/protobuf/server/ActiveClusterSuffix.proto
new file mode 100644
index 00000000000..89bc086212b
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/protobuf/server/ActiveClusterSuffix.proto
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+// This file contains protocol buffers that are shared throughout HBase
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "ActiveClusterSuffixProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+/**
+ * Content of the '/hbase/active_cluster_suffix.id' file to indicate the 
active cluster.
+ */
+message ActiveClusterSuffix {
+  // This is the active cluster suffix set by the user in the config, as a 
String
+  required string active_cluster_suffix = 1;
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 5a43cd98feb..034faa05802 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -19,12 +19,15 @@ package org.apache.hadoop.hbase.master;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.Objects;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.ActiveClusterSuffix;
 import org.apache.hadoop.hbase.ClusterId;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
@@ -60,6 +63,8 @@ public class MasterFileSystem {
   private final Configuration conf;
   // Persisted unique cluster ID
   private ClusterId clusterId;
+  // Persisted unique Active Cluster Suffix
+  private ActiveClusterSuffix activeClusterSuffix;
   // Keep around for convenience.
   private final FileSystem fs;
   // Keep around for convenience.
@@ -158,6 +163,8 @@ public class MasterFileSystem {
     if (isSecurityEnabled) {
       fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), 
secureRootFilePerms);
       fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), 
secureRootFilePerms);
+      fs.setPermission(new Path(rootdir, 
HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME),
+        secureRootFilePerms);
     }
     FsPermission currentRootPerms = 
fs.getFileStatus(this.rootdir).getPermission();
     if (
@@ -262,10 +269,14 @@ public class MasterFileSystem {
       throw iae;
     }
     // Make sure cluster ID exists
-    if (!FSUtils.checkClusterIdExists(fs, rd, threadWakeFrequency)) {
-      FSUtils.setClusterId(fs, rd, new ClusterId(), threadWakeFrequency);
+    if (
+      !FSUtils.checkFileExistsInHbaseRootDir(fs, rootdir, 
HConstants.CLUSTER_ID_FILE_NAME,
+        threadWakeFrequency)
+    ) {
+      FSUtils.setClusterId(fs, rootdir, new ClusterId(), threadWakeFrequency);
     }
-    clusterId = FSUtils.getClusterId(fs, rd);
+    clusterId = FSUtils.getClusterId(fs, rootdir);
+    negotiateActiveClusterSuffixFile(threadWakeFrequency);
   }
 
   /**
@@ -382,4 +393,64 @@ public class MasterFileSystem {
   public void logFileSystemState(Logger log) throws IOException {
     CommonFSUtils.logFileSystemState(fs, rootdir, log);
   }
+
+  private void negotiateActiveClusterSuffixFile(long wait) throws IOException {
+    if (!isReadOnlyModeEnabled(conf)) {
+      try {
+        // verify the contents against the config set
+        ActiveClusterSuffix acs = FSUtils.getActiveClusterSuffix(fs, rootdir);
+        LOG.debug("File Suffix {} : Configured suffix {} :  Cluster ID : {}", 
acs,
+          getSuffixFromConfig(), getClusterId());
+        if (Objects.equals(acs.getActiveClusterSuffix(), 
getSuffixFromConfig())) {
+          this.activeClusterSuffix = acs;
+        } else {
+          // throw error
+          LOG.info("rootdir {} : Active Cluster File Suffix {} ", rootdir, 
acs);
+          throw new IOException("Cannot start master, because another cluster 
is running in active "
+            + "(read-write) mode on this storage location. Active Cluster Id: 
{} " + acs
+            + " This cluster Id: " + getClusterId());
+        }
+        LOG.info(
+          "This is the active cluster on this storage location, " + "File 
Suffix {} : Suffix {} : ",
+          acs, getActiveClusterSuffix());
+      } catch (FileNotFoundException fnfe) {
+        // this is the active cluster, create active cluster suffix file if it 
does not exist
+        FSUtils.setActiveClusterSuffix(fs, rootdir, 
getSuffixFileDataToWrite(), wait);
+      }
+    } else {
+      // this is a replica cluster
+      LOG.info("Replica cluster is being started in Read Only Mode");
+    }
+  }
+
+  public ActiveClusterSuffix getActiveClusterSuffix() {
+    return activeClusterSuffix;
+  }
+
+  private boolean isReadOnlyModeEnabled(Configuration conf) {
+    return conf.getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
+      HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT);
+  }
+
+  private String getActiveClusterSuffixFromConfig(Configuration conf) {
+    return conf.get(HConstants.HBASE_META_TABLE_SUFFIX,
+      HConstants.HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE);
+  }
+
+  public String getSuffixFromConfig() {
+    return getClusterId().toString() + ":" + 
getActiveClusterSuffixFromConfig(conf);
+  }
+
+  // Used only for testing
+  public byte[] getSuffixFileDataToCompare() {
+    String str = this.activeClusterSuffix.toString();
+    return str.getBytes(StandardCharsets.UTF_8);
+  }
+
+  //
+  public byte[] getSuffixFileDataToWrite() {
+    String str = getClusterId().toString() + ":" + 
getActiveClusterSuffixFromConfig(conf);
+    this.activeClusterSuffix = new ActiveClusterSuffix(str);
+    return str.getBytes(StandardCharsets.UTF_8);
+  }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 55b77b6aed1..98e1afc4d97 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -31,6 +31,7 @@ import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -65,6 +66,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.ActiveClusterSuffix;
 import org.apache.hadoop.hbase.ClusterId;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
@@ -515,15 +517,15 @@ public final class FSUtils {
    * @return <code>true</code> if the file exists, otherwise <code>false</code>
    * @throws IOException if checking the FileSystem fails
    */
-  public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, long 
wait)
-    throws IOException {
+  public static boolean checkFileExistsInHbaseRootDir(FileSystem fs, Path 
rootdir, String file,
+    long wait) throws IOException {
     while (true) {
       try {
-        Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
+        Path filePath = new Path(rootdir, file);
         return fs.exists(filePath);
       } catch (IOException ioe) {
         if (wait > 0L) {
-          LOG.warn("Unable to check cluster ID file in {}, retrying in {}ms", 
rootdir, wait, ioe);
+          LOG.warn("Unable to check file {} in {}, retrying in {}ms", file, 
rootdir, wait, ioe);
           try {
             Thread.sleep(wait);
           } catch (InterruptedException e) {
@@ -585,6 +587,46 @@ public final class FSUtils {
     return clusterId;
   }
 
+  public static ActiveClusterSuffix getActiveClusterSuffix(FileSystem fs, Path 
rootdir)
+    throws IOException {
+    Path idPath = new Path(rootdir, 
HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME);
+    ActiveClusterSuffix cs = null;
+    FileStatus status = fs.exists(idPath) ? fs.getFileStatus(idPath) : null;
+    if (status != null) {
+      int len = Ints.checkedCast(status.getLen());
+      byte[] content = new byte[len];
+      FSDataInputStream in = fs.open(idPath);
+      try {
+        in.readFully(content);
+      } catch (EOFException eof) {
+        LOG.warn("Cluster Suffix file {} is empty   ", idPath);
+      } finally {
+        in.close();
+      }
+      try {
+        cs = ActiveClusterSuffix.parseFrom(content);
+      } catch (DeserializationException e) {
+        throw new IOException("content=" + Bytes.toString(content), e);
+      }
+      // If not pb'd, make it so.
+      if (!ProtobufUtil.isPBMagicPrefix(content)) {
+        String data = null;
+        in = fs.open(idPath);
+        try {
+          data = in.readUTF();
+          cs = new ActiveClusterSuffix(data);
+        } catch (EOFException eof) {
+          LOG.warn("Active Cluster Suffix File {} is empty ", idPath);
+        } finally {
+          in.close();
+        }
+      }
+      return cs;
+    } else {
+      throw new FileNotFoundException("Active Cluster Suffix File " + idPath + 
" not found");
+    }
+  }
+
   /**
    *   */
   private static void rewriteAsPb(final FileSystem fs, final Path rootdir, 
final Path p,
@@ -612,31 +654,57 @@ public final class FSUtils {
    */
   public static void setClusterId(final FileSystem fs, final Path rootdir,
     final ClusterId clusterId, final long wait) throws IOException {
-
     final Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
     final Path tempDir = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY);
     final Path tempIdFile = new Path(tempDir, HConstants.CLUSTER_ID_FILE_NAME);
 
     LOG.debug("Create cluster ID file [{}] with ID: {}", idFile, clusterId);
+    writeClusterInfo(fs, rootdir, idFile, tempIdFile, clusterId.toByteArray(), 
wait);
+  }
+
+  /**
+   * Writes a user provided suffix for this cluster to the 
"active_cluster_suffix.id" file in the
+   * HBase root directory. If any operations on the ID file fails, and {@code 
wait} is a positive
+   * value, the method will retry to produce the ID file until the thread is 
forcibly interrupted.
+   */
+
+  public static void setActiveClusterSuffix(final FileSystem fs, final Path 
rootdir, byte[] bdata,
+    final long wait) throws IOException {
+    final Path idFile = new Path(rootdir, 
HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME);
+    final Path tempDir = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY);
+    final Path tempIdFile = new Path(tempDir, 
HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME);
+    String fsuffix = new String(bdata, StandardCharsets.US_ASCII);
+
+    LOG.debug("Create Active cluster Suffix file [{}] with Suffix: {}", 
idFile, fsuffix);
+    writeClusterInfo(fs, rootdir, idFile, tempIdFile, bdata, wait);
+  }
+
+  /**
+   * Writes information about this cluster to the specified file. For ex, it 
is used for writing
+   * cluster id in "hbase.id" file in the HBase root directory. Also, used for 
writing active
+   * cluster suffix in "active_cluster_suffix.id" file. If any operations on 
the ID file fails, and
+   * {@code wait} is a positive value, the method will retry to produce the ID 
file until the thread
+   * is forcibly interrupted.
+   */
 
+  private static void writeClusterInfo(final FileSystem fs, final Path 
rootdir, final Path idFile,
+    final Path tempIdFile, byte[] fileData, final long wait) throws 
IOException {
     while (true) {
       Optional<IOException> failure = Optional.empty();
 
-      LOG.debug("Write the cluster ID file to a temporary location: {}", 
tempIdFile);
+      LOG.debug("Write the file to a temporary location: {}", tempIdFile);
       try (FSDataOutputStream s = fs.create(tempIdFile)) {
-        s.write(clusterId.toByteArray());
+        s.write(fileData);
       } catch (IOException ioe) {
         failure = Optional.of(ioe);
       }
 
       if (!failure.isPresent()) {
         try {
-          LOG.debug("Move the temporary cluster ID file to its target location 
[{}]:[{}]",
-            tempIdFile, idFile);
+          LOG.debug("Move the temporary file to its target location 
[{}]:[{}]", tempIdFile, idFile);
 
           if (!fs.rename(tempIdFile, idFile)) {
-            failure =
-              Optional.of(new IOException("Unable to move temp cluster ID file 
to " + idFile));
+            failure = Optional.of(new IOException("Unable to move temp file to 
" + idFile));
           }
         } catch (IOException ioe) {
           failure = Optional.of(ioe);
@@ -646,8 +714,7 @@ public final class FSUtils {
       if (failure.isPresent()) {
         final IOException cause = failure.get();
         if (wait > 0L) {
-          LOG.warn("Unable to create cluster ID file in {}, retrying in {}ms", 
rootdir, wait,
-            cause);
+          LOG.warn("Unable to create file in {}, retrying in {}ms", rootdir, 
wait, cause);
           try {
             Thread.sleep(wait);
           } catch (InterruptedException e) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestActiveClusterSuffix.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestActiveClusterSuffix.java
new file mode 100644
index 00000000000..df036f08f00
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestActiveClusterSuffix.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test Active Cluster Suffix file.
+ */
+@Category({ RegionServerTests.class, MediumTests.class })
+public class TestActiveClusterSuffix {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestActiveClusterSuffix.class);
+
+  private final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+
+  private JVMClusterUtil.RegionServerThread rst;
+
+  @Before
+  public void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().setBoolean(ShutdownHook.RUN_SHUTDOWN_HOOK, 
false);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+    if (rst != null && rst.getRegionServer() != null) {
+      rst.getRegionServer().stop("end of test");
+      rst.join();
+    }
+  }
+
+  @Test
+  public void testActiveClusterSuffixCreated() throws Exception {
+    TEST_UTIL.startMiniZKCluster();
+    TEST_UTIL.startMiniDFSCluster(1);
+    TEST_UTIL.startMiniHBaseCluster();
+
+    Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration());
+    FileSystem fs = rootDir.getFileSystem(TEST_UTIL.getConfiguration());
+    Path filePath = new Path(rootDir, 
HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME);
+
+    assertTrue(filePath + " should exists ", fs.exists(filePath));
+    assertTrue(filePath + " should not be empty  ", 
fs.getFileStatus(filePath).getLen() > 0);
+
+    MasterFileSystem mfs = 
TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem();
+    // Compute string using currently set suffix and the cluster id
+    String cluster_suffix1 =
+      new String(mfs.getSuffixFileDataToCompare(), StandardCharsets.US_ASCII);
+    // Compute string member variable
+    String cluster_suffix2 = mfs.getActiveClusterSuffix().toString();
+    assertEquals(cluster_suffix1, cluster_suffix2);
+  }
+
+  @Test
+  public void testSuffixFileOnRestart() throws Exception {
+    TEST_UTIL.startMiniZKCluster();
+    TEST_UTIL.startMiniDFSCluster(1);
+    TEST_UTIL.createRootDir();
+    TEST_UTIL.getConfiguration().set(HConstants.HBASE_META_TABLE_SUFFIX, 
"Test");
+
+    String clusterId = HBaseCommonTestingUtil.getRandomUUID().toString();
+    String cluster_suffix = clusterId + ":" + TEST_UTIL.getConfiguration()
+      .get(HConstants.HBASE_META_TABLE_SUFFIX, 
HConstants.HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE);
+
+    writeIdFile(clusterId, HConstants.CLUSTER_ID_FILE_NAME);
+    writeIdFile(cluster_suffix, HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME);
+
+    try {
+      TEST_UTIL.startMiniHBaseCluster();
+    } catch (IOException ioe) {
+      Assert.fail("Can't start mini hbase cluster.");
+    }
+
+    MasterFileSystem mfs = 
TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem();
+    // Compute using file contents
+    String cluster_suffix1 =
+      new String(mfs.getSuffixFileDataToCompare(), StandardCharsets.US_ASCII);
+    // Compute using config
+    String cluster_suffix2 = mfs.getSuffixFromConfig();
+
+    assertEquals(cluster_suffix1, cluster_suffix2);
+    assertEquals(cluster_suffix, cluster_suffix1);
+  }
+
+  @Test
+  public void testVerifyErrorWhenSuffixNotMatched() throws Exception {
+    TEST_UTIL.startMiniZKCluster();
+    TEST_UTIL.startMiniDFSCluster(1);
+    TEST_UTIL.createRootDir();
+    
TEST_UTIL.getConfiguration().setInt("hbase.master.start.timeout.localHBaseCluster",
 10000);
+    String cluster_suffix = 
String.valueOf("2df92f65-d801-46e6-b892-c2bae2df3c21:test");
+    writeIdFile(cluster_suffix, HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME);
+    // Exception: as config in the file and the one set by the user are not 
matching
+    boolean threwIOE = false;
+    try {
+      TEST_UTIL.startMiniHBaseCluster();
+    } catch (IOException ioe) {
+      threwIOE = true;
+    } finally {
+      assertTrue("The master should have thrown an exception", threwIOE);
+    }
+  }
+
+  private void writeIdFile(String id, String fileName) throws Exception {
+    Path rootDir = CommonFSUtils.getRootDir(TEST_UTIL.getConfiguration());
+    FileSystem fs = rootDir.getFileSystem(TEST_UTIL.getConfiguration());
+    Path filePath = new Path(rootDir, fileName);
+    FSDataOutputStream s = null;
+    try {
+      s = fs.create(filePath);
+      s.writeUTF(id);
+    } finally {
+      if (s != null) {
+        s.close();
+      }
+    }
+  }
+}

Reply via email to