This is an automated email from the ASF dual-hosted git repository.
wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2 by this push:
new 9932b00c54f HBASE-29035: Amount of region cached in the region metrics
not updated for a region immediately after it is flushed with cacheOnWrite
turned on (#6549)
9932b00c54f is described below
commit 9932b00c54ff0465290efa22594b420f95406292
Author: Rahul Agarkar <[email protected]>
AuthorDate: Thu Dec 19 16:06:17 2024 +0530
HBASE-29035: Amount of region cached in the region metrics not updated for
a region immediately after it is flushed with cacheOnWrite turned on (#6549)
Signed-off-by: Wellington Chevreuil <[email protected]>
---
.../hadoop/hbase/io/hfile/HFileWriterImpl.java | 11 ++++-
.../org/apache/hadoop/hbase/TestCacheEviction.java | 54 ++++++++++++++++++++--
2 files changed, 59 insertions(+), 6 deletions(-)
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index 0f54fafba95..c8c21e0625c 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -556,9 +556,9 @@ public class HFileWriterImpl implements HFile.Writer {
private void doCacheOnWrite(long offset) {
cacheConf.getBlockCache().ifPresent(cache -> {
HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf);
+ BlockCacheKey key = buildCacheBlockKey(offset,
cacheFormatBlock.getBlockType());
try {
- cache.cacheBlock(new BlockCacheKey(name, offset, true,
cacheFormatBlock.getBlockType()),
- cacheFormatBlock, cacheConf.isInMemory(), true);
+ cache.cacheBlock(key, cacheFormatBlock, cacheConf.isInMemory(), true);
} finally {
// refCnt will auto increase when block add to Cache, see
RAMCache#putIfAbsent
cacheFormatBlock.release();
@@ -566,6 +566,13 @@ public class HFileWriterImpl implements HFile.Writer {
});
}
+ private BlockCacheKey buildCacheBlockKey(long offset, BlockType blockType) {
+ if (path != null) {
+ return new BlockCacheKey(path, offset, true, blockType);
+ }
+ return new BlockCacheKey(name, offset, true, blockType);
+ }
+
/**
* Ready a new block for writing.
*/
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
index 8defda91555..82a06ade8e6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
+import
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -67,6 +68,7 @@ public class TestCacheEviction {
UTIL.getConfiguration().setBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, true);
UTIL.getConfiguration().set(BUCKET_CACHE_IOENGINE_KEY, "offheap");
UTIL.getConfiguration().setInt(BUCKET_CACHE_SIZE_KEY, 200);
+ UTIL.getConfiguration().set(StoreFileTrackerFactory.TRACKER_IMPL, "FILE");
}
@Test
@@ -103,7 +105,7 @@ public class TestCacheEviction {
UTIL.startMiniCluster(1);
try {
TableName tableName = TableName.valueOf(table);
- createAndCacheTable(tableName);
+ createTable(tableName, true);
Collection<HStoreFile> files =
UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles();
checkCacheForBlocks(tableName, files, predicateBeforeSplit);
@@ -125,7 +127,7 @@ public class TestCacheEviction {
UTIL.startMiniCluster(1);
try {
TableName tableName = TableName.valueOf(table);
- createAndCacheTable(tableName);
+ createTable(tableName, true);
Collection<HStoreFile> files =
UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles();
checkCacheForBlocks(tableName, files, predicateBeforeClose);
@@ -139,7 +141,8 @@ public class TestCacheEviction {
}
}
- private void createAndCacheTable(TableName tableName) throws IOException,
InterruptedException {
+ private void createTable(TableName tableName, boolean shouldFlushTable)
+ throws IOException, InterruptedException {
byte[] family = Bytes.toBytes("CF");
TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
@@ -153,7 +156,10 @@ public class TestCacheEviction {
puts.add(p);
}
tbl.put(puts);
- UTIL.getAdmin().flush(tableName);
+ if (shouldFlushTable) {
+ UTIL.getAdmin().flush(tableName);
+ Thread.sleep(5000);
+ }
}
private void checkCacheForBlocks(TableName tableName, Collection<HStoreFile>
files,
@@ -167,4 +173,44 @@ public class TestCacheEviction {
});
});
}
+
+ @Test
+ public void testNoCacheWithoutFlush() throws Exception {
+ UTIL.startMiniCluster(1);
+ try {
+ TableName tableName = TableName.valueOf("tableNoCache");
+ createTable(tableName, false);
+ checkRegionCached(tableName, false);
+ } finally {
+ UTIL.shutdownMiniCluster();
+ }
+ }
+
+ @Test
+ public void testCacheWithFlush() throws Exception {
+ UTIL.startMiniCluster(1);
+ try {
+ TableName tableName = TableName.valueOf("tableWithFlush");
+ createTable(tableName, true);
+ checkRegionCached(tableName, true);
+ } finally {
+ UTIL.shutdownMiniCluster();
+ }
+ }
+
+ private void checkRegionCached(TableName tableName, boolean isCached) throws
IOException {
+ UTIL.getMiniHBaseCluster().getRegions(tableName).forEach(r -> {
+ try {
+
UTIL.getMiniHBaseCluster().getClusterMetrics().getLiveServerMetrics().forEach((sn,
sm) -> {
+ for (Map.Entry<byte[], RegionMetrics> rm :
sm.getRegionMetrics().entrySet()) {
+ if
(rm.getValue().getNameAsString().equals(r.getRegionInfo().getRegionNameAsString()))
{
+ assertTrue(isCached ==
(rm.getValue().getCurrentRegionCachedRatio() > 0.0f));
+ }
+ }
+ });
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ }
}