This is an automated email from the ASF dual-hosted git repository.
andor pushed a commit to branch HBASE-29081
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/HBASE-29081 by this push:
new 459db1d0c58 HBASE-29644: Refresh_meta triggering compaction on user
table (#7385)
459db1d0c58 is described below
commit 459db1d0c5865911ac7be54478a5fb88f053c0bf
Author: Anuj Sharma <[email protected]>
AuthorDate: Thu Nov 13 20:32:27 2025 +0530
HBASE-29644: Refresh_meta triggering compaction on user table (#7385)
Link to JIRA: https://issues.apache.org/jira/browse/HBASE-29644
Description:
Consider the two cluster setup with one being active and one read replica.
If active cluster create a table with FILE based SFT. If you add few rows
through active and do flushes to create few Hfiles and then do refresh_meta
from read replica its triggering minor compaction. Which should not happen via
read replica, it may create inconsitencies because active is not aware of that
event.
Cause:
This is happening because we should block the compaction event in
ReadOnlyController but we missed adding read only guard to
preCompactSelection() function.
Fix:
Add internalReadOnlyGuard to preCompactSelection() in ReadOnlyController
---
.../hadoop/hbase/regionserver/CompactSplit.java | 19 +++++
.../storefiletracker/StoreFileTrackerBase.java | 2 +-
.../hbase/security/access/ReadOnlyController.java | 11 +++
.../regionserver/TestCompactSplitReadOnly.java | 87 ++++++++++++++++++++++
4 files changed, 118 insertions(+), 1 deletion(-)
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
index 4ac1d7c6396..4aa7ab619d8 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
@@ -36,6 +36,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.IntSupplier;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.conf.ConfigurationManager;
import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
@@ -344,6 +345,12 @@ public class CompactSplit implements CompactionRequester,
PropagatingConfigurati
return;
}
+ // Should not allow compaction if cluster is in read-only mode
+ if (isReadOnlyEnabled()) {
+ LOG.info("Ignoring compaction request for " + region + ",because
read-only mode is on.");
+ return;
+ }
+
if (
this.server.isStopped() || (region.getTableDescriptor() != null
&& !region.getTableDescriptor().isCompactionEnabled())
@@ -442,6 +449,13 @@ public class CompactSplit implements CompactionRequester,
PropagatingConfigurati
LOG.info(String.format("User has disabled compactions"));
return Optional.empty();
}
+
+ // Should not allow compaction if cluster is in read-only mode
+ if (isReadOnlyEnabled()) {
+ LOG.info(String.format("Compaction request skipped as read-only mode is
on"));
+ return Optional.empty();
+ }
+
Optional<CompactionContext> compaction = store.requestCompaction(priority,
tracker, user);
if (!compaction.isPresent() && region.getRegionInfo() != null) {
String reason = "Not compacting " +
region.getRegionInfo().getRegionNameAsString()
@@ -856,6 +870,11 @@ public class CompactSplit implements CompactionRequester,
PropagatingConfigurati
return compactionsEnabled;
}
+ private boolean isReadOnlyEnabled() {
+ return conf.getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
+ HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT);
+ }
+
public void setCompactionsEnabled(boolean compactionsEnabled) {
this.compactionsEnabled = compactionsEnabled;
this.conf.setBoolean(HBASE_REGION_SERVER_ENABLE_COMPACTION,
compactionsEnabled);
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
index bc24ad57944..29d8e0bcb48 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/storefiletracker/StoreFileTrackerBase.java
@@ -143,7 +143,7 @@ abstract class StoreFileTrackerBase implements
StoreFileTracker {
public final StoreFileWriter createWriter(CreateStoreFileWriterParams
params) throws IOException {
if (!isPrimaryReplica || isReadOnlyEnabled()) {
throw new IllegalStateException(
- "Should not call create writer on secondary replicas or in read only
mode");
+ "Should not call create writer on secondary replicas or in read-only
mode");
}
// creating new cache config for each new writer
final CacheConfig cacheConf = ctx.getCacheConf();
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
index 5b7ab67df0b..7bd16d10ef3 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java
@@ -53,6 +53,9 @@ import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.yetus.audience.InterfaceAudience;
@@ -81,6 +84,7 @@ public class ReadOnlyController implements MasterCoprocessor,
RegionCoprocessor,
@Override
public void start(CoprocessorEnvironment env) throws IOException {
+
this.globalReadOnlyEnabled =
env.getConfiguration().getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT);
@@ -131,6 +135,13 @@ public class ReadOnlyController implements
MasterCoprocessor, RegionCoprocessor,
internalReadOnlyGuard();
}
+ @Override
+ public void preCompactSelection(ObserverContext<? extends
RegionCoprocessorEnvironment> c,
+ Store store, List<? extends StoreFile> candidates,
CompactionLifeCycleTracker tracker)
+ throws IOException {
+ internalReadOnlyGuard();
+ }
+
@Override
public boolean preCheckAndPut(ObserverContext<? extends
RegionCoprocessorEnvironment> c,
byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
ByteArrayComparable comparator,
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitReadOnly.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitReadOnly.java
new file mode 100644
index 00000000000..065d42cd24c
--- /dev/null
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitReadOnly.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ RegionServerTests.class, SmallTests.class })
+public class TestCompactSplitReadOnly {
+
+ private CompactSplit compactSplit;
+ private Configuration conf;
+
+ @Before
+ public void setUp() {
+ conf = new Configuration();
+ // enable read-only mode
+ conf.setBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY, true);
+ // create CompactSplit with conf-only constructor (available for tests)
+ compactSplit = new CompactSplit(conf);
+ }
+
+ @After
+ public void tearDown() {
+ // ensure thread pools are shutdown to avoid leakage
+ compactSplit.interruptIfNecessary();
+ }
+
+ @Test
+ public void testRequestSystemCompactionIgnoredWhenReadOnly() throws
IOException {
+ // Mock HRegion and HStore minimal behavior
+ HRegion region = mock(HRegion.class);
+ HStore store = mock(HStore.class);
+
+ // Ensure compaction queues start empty
+ assertEquals(0, compactSplit.getCompactionQueueSize());
+
+ // Call requestSystemCompaction for a single store (selectNow = false)
+ compactSplit.requestSystemCompaction(region, store, "test-readonly");
+
+ // Because read-only mode is enabled, no compaction should be queued
+ assertEquals(0, compactSplit.getCompactionQueueSize());
+ }
+
+ @Test
+ public void testSelectCompactionIgnoredWhenReadOnly() throws IOException {
+ HRegion region = mock(HRegion.class);
+ HStore store = mock(HStore.class);
+
+ // Ensure compaction queues start empty
+ assertEquals(0, compactSplit.getCompactionQueueSize());
+
+ // Call requestCompaction which uses selectNow = true and should call
selectCompaction
+ compactSplit.requestCompaction(region, store, "test-select-readonly", 0,
+ CompactionLifeCycleTracker.DUMMY, (User) null);
+
+ // Because read-only mode is enabled, selectCompaction should be skipped
and no task queued
+ assertEquals(0, compactSplit.getCompactionQueueSize());
+ }
+}