This is an automated email from the ASF dual-hosted git repository.

andor pushed a commit to branch HBASE-29081
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-29081 by this push:
     new 365f16e3c2e HBASE-29611: With FILE based SFT, the list of HFiles we 
maintain in .filelist is n… (#7361)
365f16e3c2e is described below

commit 365f16e3c2e9b94f8ce12a66ba0fd7cd946de501
Author: Anuj Sharma <[email protected]>
AuthorDate: Mon Oct 27 21:39:36 2025 +0530

    HBASE-29611: With FILE based SFT, the list of HFiles we maintain in 
.filelist is n… (#7361)
    
    * HBASE-29611: With FILE based SFT, the list of HFiles we maintain in 
.filelist is not getting updated for read replica
    
    Link to JIRA: https://issues.apache.org/jira/browse/HBASE-29611
    
    Description:
    Steps to Repro (For detailed steps, check JIRA):
    - Create two clusters on the same storage location.
    - Create table on active, then refresh meta on the read replica to get the 
table meta data updated.
    - Add some rows and flush on the active cluster, do refresh_hfiles on the 
read replica and scan table.
    - If you now again add the rows in the table on active and do 
refresh_hfiles then the rows added are not visible in the read replica.
    
    Cause:
    The refresh store file is a two step process:
    1. Load the existing store file from the .filelist (choose the file with 
higher timestamp for loading)
    2. refresh store file internals (clean up old/compacted files, replace 
store file in .filelist)
    
    In the current scenario, what is happening is that for the first time 
read-replica is loading the list of Hfiles from the file in .filelist created 
by active cluster but then it is creating the new file with greater timestamp. 
Now we have two files in .filelist. On the subsequent flush from active the 
file in .filelist created by the active gets updated but the file created by 
read-replica is not. While loading in the refresh_hfiles as we take the file 
with higher timestamp the file c [...]
    
    Fix:
    As we just wanted the file from active to be loaded anytime we perform 
refresh store files, we must not create a new file in the .filelist from the 
read-replica, in this way we will stop the timestamp mismatch.
    
    NOTE:
    Also we don't want to initialize the tracker file 
(StoreFileListFile.java:load()) from read-replica as we are not writing it 
hence we have added check for read only property in 
StoreFileTrackerBase.java:load()
    
    * Make read only cluster to behave like secondary replica
---
 .../storefiletracker/StoreFileTrackerBase.java     |  19 ++-
 .../DummyStoreFileTrackerForReadOnlyMode.java      |  79 +++++++++++
 .../TestStoreFileTrackerBaseReadOnlyMode.java      | 146 +++++++++++++++++++++
 3 files changed, 238 insertions(+), 6 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
index 779a114af59..bc24ad57944 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -85,12 +86,12 @@ abstract class StoreFileTrackerBase implements 
StoreFileTracker {
 
   @Override
   public final List<StoreFileInfo> load() throws IOException {
-    return doLoadStoreFiles(!isPrimaryReplica);
+    return doLoadStoreFiles(!isPrimaryReplica || isReadOnlyEnabled());
   }
 
   @Override
   public final void add(Collection<StoreFileInfo> newFiles) throws IOException 
{
-    if (isPrimaryReplica) {
+    if (isPrimaryReplica && !isReadOnlyEnabled()) {
       doAddNewStoreFiles(newFiles);
     }
   }
@@ -98,14 +99,14 @@ abstract class StoreFileTrackerBase implements 
StoreFileTracker {
   @Override
   public final void replace(Collection<StoreFileInfo> compactedFiles,
     Collection<StoreFileInfo> newFiles) throws IOException {
-    if (isPrimaryReplica) {
+    if (isPrimaryReplica && !isReadOnlyEnabled()) {
       doAddCompactionResults(compactedFiles, newFiles);
     }
   }
 
   @Override
   public final void set(List<StoreFileInfo> files) throws IOException {
-    if (isPrimaryReplica) {
+    if (isPrimaryReplica && !isReadOnlyEnabled()) {
       doSetStoreFiles(files);
     }
   }
@@ -140,8 +141,9 @@ abstract class StoreFileTrackerBase implements 
StoreFileTracker {
 
   @Override
   public final StoreFileWriter createWriter(CreateStoreFileWriterParams 
params) throws IOException {
-    if (!isPrimaryReplica) {
-      throw new IllegalStateException("Should not call create writer on 
secondary replicas");
+    if (!isPrimaryReplica || isReadOnlyEnabled()) {
+      throw new IllegalStateException(
+        "Should not call create writer on secondary replicas or in read only 
mode");
     }
     // creating new cache config for each new writer
     final CacheConfig cacheConf = ctx.getCacheConf();
@@ -385,6 +387,11 @@ abstract class StoreFileTrackerBase implements 
StoreFileTracker {
       storeFiles);
   }
 
+  private boolean isReadOnlyEnabled() {
+    return conf.getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
+      HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT);
+  }
+
   /**
    * For primary replica, we will call load once when opening a region, and 
the implementation could
    * choose to do some cleanup work. So here we use {@code readOnly} to 
indicate that whether you
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DummyStoreFileTrackerForReadOnlyMode.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DummyStoreFileTrackerForReadOnlyMode.java
new file mode 100644
index 00000000000..5f8268b3ac7
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DummyStoreFileTrackerForReadOnlyMode.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.storefiletracker;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+
+public class DummyStoreFileTrackerForReadOnlyMode extends StoreFileTrackerBase 
{
+  private boolean readOnlyUsed = false;
+  private boolean compactionExecuted = false;
+  private boolean addExecuted = false;
+  private boolean setExecuted = false;
+
+  public DummyStoreFileTrackerForReadOnlyMode(Configuration conf, boolean 
isPrimaryReplica) {
+    super(conf, isPrimaryReplica, null);
+  }
+
+  @Override
+  protected void doAddCompactionResults(Collection<StoreFileInfo> 
compactedFiles,
+    Collection<StoreFileInfo> newFiles) {
+    compactionExecuted = true;
+  }
+
+  @Override
+  protected void doSetStoreFiles(Collection<StoreFileInfo> files) throws 
IOException {
+    setExecuted = true;
+  }
+
+  @Override
+  protected List<StoreFileInfo> doLoadStoreFiles(boolean readOnly) {
+    readOnlyUsed = readOnly;
+    return Collections.emptyList();
+  }
+
+  @Override
+  protected void doAddNewStoreFiles(Collection<StoreFileInfo> newFiles) throws 
IOException {
+    addExecuted = true;
+  }
+
+  boolean wasReadOnlyLoad() {
+    return readOnlyUsed;
+  }
+
+  boolean wasCompactionExecuted() {
+    return compactionExecuted;
+  }
+
+  boolean wasAddExecuted() {
+    return addExecuted;
+  }
+
+  boolean wasSetExecuted() {
+    return setExecuted;
+  }
+
+  @Override
+  public boolean requireWritingToTmpDirFirst() {
+    return false;
+  }
+}
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerBaseReadOnlyMode.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerBaseReadOnlyMode.java
new file mode 100644
index 00000000000..8a1cee55832
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerBaseReadOnlyMode.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.storefiletracker;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Collections;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TestRefreshHFilesBase;
+import 
org.apache.hadoop.hbase.master.procedure.TestRefreshHFilesProcedureWithReadOnlyConf;
+import org.apache.hadoop.hbase.regionserver.CreateStoreFileWriterParams;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ RegionServerTests.class, SmallTests.class })
+public class TestStoreFileTrackerBaseReadOnlyMode extends 
TestRefreshHFilesBase {
+  private DummyStoreFileTrackerForReadOnlyMode tracker;
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    
HBaseClassTestRule.forClass(TestRefreshHFilesProcedureWithReadOnlyConf.class);
+
+  @Before
+  public void setup() throws Exception {
+    // When true is passed only setup for readonly property is done.
+    // The initial ReadOnly property will be false for table creation
+    baseSetup(true);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    baseTearDown();
+  }
+
+  @Test
+  public void testLoadReadOnlyWhenGlobalReadOnlyEnabled() throws Exception {
+    try {
+      setReadOnlyMode(true);
+      tracker = new DummyStoreFileTrackerForReadOnlyMode(conf, true);
+      tracker.load();
+      assertTrue("Tracker should be in read-only mode", 
tracker.wasReadOnlyLoad());
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } finally {
+      setReadOnlyMode(false);
+    }
+  }
+
+  @Test
+  public void testReplaceSkippedWhenGlobalReadOnlyEnabled() throws Exception {
+    try {
+      setReadOnlyMode(true);
+      tracker = new DummyStoreFileTrackerForReadOnlyMode(conf, true);
+      tracker.replace(Collections.emptyList(), Collections.emptyList());
+      assertFalse("Compaction should not be executed in readonly mode",
+        tracker.wasCompactionExecuted());
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } finally {
+      setReadOnlyMode(false);
+    }
+  }
+
+  @Test
+  public void testReplaceExecutedWhenWritable() throws Exception {
+    tracker = new DummyStoreFileTrackerForReadOnlyMode(conf, true);
+    tracker.replace(Collections.emptyList(), Collections.emptyList());
+    assertTrue("Compaction should run when not readonly", 
tracker.wasCompactionExecuted());
+  }
+
+  @Test
+  public void testAddSkippedWhenGlobalReadOnlyEnabled() throws Exception {
+    try {
+      setReadOnlyMode(true);
+      tracker = new DummyStoreFileTrackerForReadOnlyMode(conf, true);
+      tracker.add(Collections.emptyList());
+      assertFalse("Add should not be executed in readonly mode", 
tracker.wasAddExecuted());
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } finally {
+      setReadOnlyMode(false);
+    }
+  }
+
+  @Test
+  public void testAddExecutedWhenWritable() throws Exception {
+    tracker = new DummyStoreFileTrackerForReadOnlyMode(conf, true);
+    tracker.add(Collections.emptyList());
+    assertTrue("Add should run when not readonly", tracker.wasAddExecuted());
+  }
+
+  @Test
+  public void testSetSkippedWhenGlobalReadOnlyEnabled() throws Exception {
+    try {
+      setReadOnlyMode(true);
+      tracker = new DummyStoreFileTrackerForReadOnlyMode(conf, true);
+      tracker.set(Collections.emptyList());
+      assertFalse("Set should not be executed in readonly mode", 
tracker.wasSetExecuted());
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } finally {
+      setReadOnlyMode(false);
+    }
+  }
+
+  @Test
+  public void testSetExecutedWhenWritable() throws Exception {
+    tracker = new DummyStoreFileTrackerForReadOnlyMode(conf, true);
+    tracker.set(Collections.emptyList());
+    assertTrue("Set should run when not readonly", tracker.wasSetExecuted());
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void testCreateWriterThrowExceptionWhenGlobalReadOnlyEnabled() throws 
Exception {
+    try {
+      setReadOnlyMode(true);
+      tracker = new DummyStoreFileTrackerForReadOnlyMode(conf, true);
+      CreateStoreFileWriterParams params = 
CreateStoreFileWriterParams.create().maxKeyCount(4)
+        
.isCompaction(false).includeMVCCReadpoint(true).includesTag(false).shouldDropBehind(false);
+      tracker.createWriter(params);
+    } finally {
+      setReadOnlyMode(false);
+    }
+  }
+}

Reply via email to