This is an automated email from the ASF dual-hosted git repository.
wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/master by this push:
new 290ae1e9f5d HBASE-29707 Fix region cache % metrics miss calculation
(#7451)
290ae1e9f5d is described below
commit 290ae1e9f5da06ba883b8f4d9069b646a533c81e
Author: Wellington Ramos Chevreuil <[email protected]>
AuthorDate: Fri Nov 14 11:22:06 2025 +0000
HBASE-29707 Fix region cache % metrics miss calculation (#7451)
Signed-off-by: Peter Somogyi <[email protected]>
Reviewed-by: Kevin Geiszler <[email protected]>
---
.../apache/hadoop/hbase/io/hfile/BlockCache.java | 8 ++
.../hadoop/hbase/io/hfile/BlockCacheKey.java | 6 +
.../hadoop/hbase/io/hfile/CombinedBlockCache.java | 5 +
.../hadoop/hbase/io/hfile/HFilePreadReader.java | 2 +-
.../hadoop/hbase/io/hfile/bucket/BucketCache.java | 61 ++++++----
.../apache/hadoop/hbase/util/HFileArchiveUtil.java | 12 ++
.../org/apache/hadoop/hbase/TestCacheEviction.java | 8 +-
.../hfile/bucket/TestPrefetchWithBucketCache.java | 131 ++++++++++++++++++++-
8 files changed, 205 insertions(+), 28 deletions(-)
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index 9297e7074a9..8bbf5e75e67 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -100,6 +100,14 @@ public interface BlockCache extends Iterable<CachedBlock>,
ConfigurationObserver
*/
int evictBlocksByHfileName(String hfileName);
+ /**
+ * Evicts all blocks for the given HFile by path.
+ * @return the number of blocks evicted
+ */
+ default int evictBlocksByHfilePath(Path hfilePath) {
+ return evictBlocksByHfileName(hfilePath.getName());
+ }
+
/**
* Get the statistics for this block cache.
*/
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
index bcc1f58ba5e..f87b456c29b 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
@@ -32,6 +32,7 @@ public class BlockCacheKey implements HeapSize,
java.io.Serializable {
private final long offset;
private BlockType blockType;
private final boolean isPrimaryReplicaBlock;
+
private Path filePath;
/**
@@ -116,4 +117,9 @@ public class BlockCacheKey implements HeapSize,
java.io.Serializable {
public Path getFilePath() {
return filePath;
}
+
+ public void setFilePath(Path filePath) {
+ this.filePath = filePath;
+ }
+
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index e5d52858ab6..eb3e959b316 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -159,6 +159,11 @@ public class CombinedBlockCache implements
ResizableBlockCache, HeapSize {
return l1Cache.evictBlocksByHfileName(hfileName) +
l2Cache.evictBlocksByHfileName(hfileName);
}
+ @Override
+ public int evictBlocksByHfilePath(Path hfilePath) {
+ return l1Cache.evictBlocksByHfilePath(hfilePath) +
l2Cache.evictBlocksByHfilePath(hfilePath);
+ }
+
@Override
public CacheStats getStats() {
return this.combinedCacheStats;
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
index 147e2598ef9..39af3585112 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
@@ -185,7 +185,7 @@ public class HFilePreadReader extends HFileReaderImpl {
// Deallocate data blocks
cacheConf.getBlockCache().ifPresent(cache -> {
if (evictOnClose) {
- int numEvicted = cache.evictBlocksByHfileName(name);
+ int numEvicted = cache.evictBlocksByHfilePath(path);
if (LOG.isTraceEnabled()) {
LOG.trace("On close, file= {} evicted= {} block(s)", name,
numEvicted);
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 5867fff0861..d779e32dc5b 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -88,6 +88,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.IdReadWriteLock;
import org.apache.hadoop.hbase.util.IdReadWriteLockStrongRef;
import org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool;
@@ -722,7 +723,7 @@ public class BucketCache implements BlockCache, HeapSize {
// the cache map state might differ from the actual cache. If we reach
this block,
// we should remove the cache key entry from the backing map
backingMap.remove(key);
- fullyCachedFiles.remove(key.getHfileName());
+ fileNotFullyCached(key, bucketEntry);
LOG.debug("Failed to fetch block for cache key: {}.", key, hioex);
} catch (IOException ioex) {
LOG.error("Failed reading block " + key + " from bucket cache", ioex);
@@ -747,7 +748,7 @@ public class BucketCache implements BlockCache, HeapSize {
if (decrementBlockNumber) {
this.blockNumber.decrement();
if (ioEngine.isPersistent()) {
- fileNotFullyCached(cacheKey.getHfileName());
+ fileNotFullyCached(cacheKey, bucketEntry);
}
}
if (evictedByEvictionProcess) {
@@ -758,23 +759,11 @@ public class BucketCache implements BlockCache, HeapSize {
}
}
- private void fileNotFullyCached(String hfileName) {
- // Update the regionPrefetchedSizeMap before removing the file from
prefetchCompleted
- if (fullyCachedFiles.containsKey(hfileName)) {
- Pair<String, Long> regionEntry = fullyCachedFiles.get(hfileName);
- String regionEncodedName = regionEntry.getFirst();
- long filePrefetchSize = regionEntry.getSecond();
- LOG.debug("Removing file {} for region {}", hfileName,
regionEncodedName);
- regionCachedSize.computeIfPresent(regionEncodedName, (rn, pf) -> pf -
filePrefetchSize);
- // If all the blocks for a region are evicted from the cache, remove the
entry for that region
- if (
- regionCachedSize.containsKey(regionEncodedName)
- && regionCachedSize.get(regionEncodedName) == 0
- ) {
- regionCachedSize.remove(regionEncodedName);
- }
- }
- fullyCachedFiles.remove(hfileName);
+ private void fileNotFullyCached(BlockCacheKey key, BucketEntry entry) {
+ // Update the updateRegionCachedSize before removing the file from
fullyCachedFiles.
+ // This computation should happen even if the file is not in
fullyCachedFiles map.
+ updateRegionCachedSize(key.getFilePath(), (entry.getLength() * -1));
+ fullyCachedFiles.remove(key.getHfileName());
}
public void fileCacheCompleted(Path filePath, long size) {
@@ -788,9 +777,19 @@ public class BucketCache implements BlockCache, HeapSize {
private void updateRegionCachedSize(Path filePath, long cachedSize) {
if (filePath != null) {
- String regionName = filePath.getParent().getParent().getName();
- regionCachedSize.merge(regionName, cachedSize,
- (previousSize, newBlockSize) -> previousSize + newBlockSize);
+ if (HFileArchiveUtil.isHFileArchived(filePath)) {
+ LOG.trace("Skipping region cached size update for archived file: {}",
filePath);
+ } else {
+ String regionName = filePath.getParent().getParent().getName();
+ regionCachedSize.merge(regionName, cachedSize,
+ (previousSize, newBlockSize) -> previousSize + newBlockSize);
+ LOG.trace("Updating region cached size for region: {}", regionName);
+ // If all the blocks for a region are evicted from the cache,
+ // remove the entry for that region from regionCachedSize map.
+ if (regionCachedSize.get(regionName) <= 0) {
+ regionCachedSize.remove(regionName);
+ }
+ }
}
}
@@ -1698,7 +1697,7 @@ public class BucketCache implements BlockCache, HeapSize {
} catch (IOException e1) {
LOG.debug("Check for key {} failed. Evicting.", keyEntry.getKey());
evictBlock(keyEntry.getKey());
- fileNotFullyCached(keyEntry.getKey().getHfileName());
+ fileNotFullyCached(keyEntry.getKey(), keyEntry.getValue());
}
}
backingMapValidated.set(true);
@@ -1928,13 +1927,20 @@ public class BucketCache implements BlockCache,
HeapSize {
}
@Override
- public int evictBlocksRangeByHfileName(String hfileName, long initOffset,
long endOffset) {
- fileNotFullyCached(hfileName);
+ public int evictBlocksByHfilePath(Path hfilePath) {
+ return evictBlocksRangeByHfileName(hfilePath.getName(), hfilePath, 0,
Long.MAX_VALUE);
+ }
+
+ public int evictBlocksRangeByHfileName(String hfileName, Path filePath, long
initOffset,
+ long endOffset) {
Set<BlockCacheKey> keySet = getAllCacheKeysForFile(hfileName, initOffset,
endOffset);
LOG.debug("found {} blocks for file {}, starting offset: {}, end offset:
{}", keySet.size(),
hfileName, initOffset, endOffset);
int numEvicted = 0;
for (BlockCacheKey key : keySet) {
+ if (filePath != null) {
+ key.setFilePath(filePath);
+ }
if (evictBlock(key)) {
++numEvicted;
}
@@ -1942,6 +1948,11 @@ public class BucketCache implements BlockCache, HeapSize
{
return numEvicted;
}
+ @Override
+ public int evictBlocksRangeByHfileName(String hfileName, long initOffset,
long endOffset) {
+ return evictBlocksRangeByHfileName(hfileName, null, initOffset, endOffset);
+ }
+
private Set<BlockCacheKey> getAllCacheKeysForFile(String hfileName, long
init, long end) {
return blocksByHFile.subSet(new BlockCacheKey(hfileName, init), true,
new BlockCacheKey(hfileName, end), true);
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
index 9f26eda12c1..0731fe654dc 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
@@ -200,4 +200,16 @@ public final class HFileArchiveUtil {
if (p == null) return null;
return TableName.valueOf(p.getName(), tbl);
}
+
+ public static boolean isHFileArchived(Path path) {
+ Path currentDir = path;
+ for (int i = 0; i < 6; i++) {
+ currentDir = currentDir.getParent();
+ if (currentDir == null) {
+ return false;
+ }
+ }
+ return HConstants.HFILE_ARCHIVE_DIRECTORY.equals(currentDir.getName());
+ }
+
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
index 8951b1b7241..802a0979d20 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
+import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
@@ -66,11 +67,16 @@ public class TestCacheEviction {
UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
UTIL.getConfiguration().setBoolean(CACHE_BLOCKS_ON_WRITE_KEY, true);
UTIL.getConfiguration().setBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, true);
- UTIL.getConfiguration().set(BUCKET_CACHE_IOENGINE_KEY, "offheap");
UTIL.getConfiguration().setInt(BUCKET_CACHE_SIZE_KEY, 200);
UTIL.getConfiguration().set(StoreFileTrackerFactory.TRACKER_IMPL, "FILE");
}
+ @Before
+ public void testSetup() {
+ UTIL.getConfiguration().set(BUCKET_CACHE_IOENGINE_KEY,
+ "file:" + UTIL.getDataTestDir() + "/bucketcache");
+ }
+
@Test
public void testEvictOnSplit() throws Exception {
doTestEvictOnSplit("testEvictOnSplit", true,
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchWithBucketCache.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchWithBucketCache.java
index 34341ce983d..8e341979a59 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchWithBucketCache.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestPrefetchWithBucketCache.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.ByteBuffAllocator;
+import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
@@ -71,12 +72,14 @@ import
org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreContext;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
import
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.testclassification.IOTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
@@ -363,7 +366,7 @@ public class TestPrefetchWithBucketCache {
BucketCache bc =
BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
MutableLong regionCachedSize = new MutableLong(0);
// Our file should have 6 DATA blocks. We should wait for all of them to
be cached
- long waitedTime = Waiter.waitFor(conf, 300, () -> {
+ Waiter.waitFor(conf, 300, () -> {
if (bc.getBackingMap().size() > 0) {
long currentSize = bc.getRegionCachedInfo().get().get(regionName);
assertTrue(regionCachedSize.getValue() <= currentSize);
@@ -374,6 +377,132 @@ public class TestPrefetchWithBucketCache {
});
}
+ @Test
+ public void testPrefetchMetricProgressForLinks() throws Exception {
+ conf.setLong(BUCKET_CACHE_SIZE_KEY, 200);
+ blockCache = BlockCacheFactory.createBlockCache(conf);
+ cacheConf = new CacheConfig(conf, blockCache);
+ final RegionInfo hri =
+
RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
+ // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
+ Configuration testConf = new Configuration(this.conf);
+ Path testDir = TEST_UTIL.getDataTestDir(name.getMethodName());
+ CommonFSUtils.setRootDir(testConf, testDir);
+ Path tableDir = CommonFSUtils.getTableDir(testDir, hri.getTable());
+ RegionInfo region =
RegionInfoBuilder.newBuilder(TableName.valueOf(tableDir.getName())).build();
+ Path regionDir = new Path(tableDir, region.getEncodedName());
+ Path cfDir = new Path(regionDir, "cf");
+ HRegionFileSystem regionFS =
+ HRegionFileSystem.createRegionOnFileSystem(testConf, fs, tableDir,
region);
+ Path storeFile = writeStoreFile(100, cfDir);
+ // Prefetches the file blocks
+ LOG.debug("First read should prefetch the blocks.");
+ readStoreFile(storeFile);
+ BucketCache bc =
BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
+ // Our file should have 6 DATA blocks. We should wait for all of them to
be cached
+ Waiter.waitFor(testConf, 300, () -> bc.getBackingMap().size() == 6);
+ long cachedSize =
bc.getRegionCachedInfo().get().get(region.getEncodedName());
+
+ final RegionInfo dstHri =
+
RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
+ HRegionFileSystem dstRegionFs =
HRegionFileSystem.createRegionOnFileSystem(testConf, fs,
+ CommonFSUtils.getTableDir(testDir, dstHri.getTable()), dstHri);
+
+ Path dstPath = new Path(regionFS.getTableDir(), new
Path(dstHri.getRegionNameAsString(), "cf"));
+
+ Path linkFilePath =
+ new Path(dstPath, HFileLink.createHFileLinkName(region,
storeFile.getName()));
+
+ StoreFileTracker sft = StoreFileTrackerFactory.create(testConf, false,
+ StoreContext.getBuilder().withFamilyStoreDirectoryPath(dstPath)
+ .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of("cf"))
+ .withRegionFileSystem(dstRegionFs).build());
+ sft.createHFileLink(hri.getTable(), hri.getEncodedName(),
storeFile.getName(), true);
+ StoreFileInfo sfi = sft.getStoreFileInfo(linkFilePath, true);
+
+ HStoreFile hsf = new HStoreFile(sfi, BloomType.NONE, cacheConf);
+ assertTrue(sfi.isLink());
+ hsf.initReader();
+ HFile.Reader reader = hsf.getReader().getHFileReader();
+ while (!reader.prefetchComplete()) {
+ // Sleep for a bit
+ Thread.sleep(1000);
+ }
+ // HFileLink use the path of the target file to create a reader, so it
should resolve to the
+ // already cached blocks and not insert new blocks in the cache.
+ Waiter.waitFor(testConf, 300, () -> bc.getBackingMap().size() == 6);
+
+ assertEquals(cachedSize, (long)
bc.getRegionCachedInfo().get().get(region.getEncodedName()));
+ }
+
+ @Test
+ public void testPrefetchMetricProgressForLinksToArchived() throws Exception {
+ conf.setLong(BUCKET_CACHE_SIZE_KEY, 200);
+ blockCache = BlockCacheFactory.createBlockCache(conf);
+ cacheConf = new CacheConfig(conf, blockCache);
+
+ // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
+ Configuration testConf = new Configuration(this.conf);
+ Path testDir = TEST_UTIL.getDataTestDir(name.getMethodName());
+ CommonFSUtils.setRootDir(testConf, testDir);
+
+ final RegionInfo hri =
+
RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
+ Path tableDir = CommonFSUtils.getTableDir(testDir, hri.getTable());
+ RegionInfo region =
RegionInfoBuilder.newBuilder(TableName.valueOf(tableDir.getName())).build();
+ Path regionDir = new Path(tableDir, region.getEncodedName());
+ Path cfDir = new Path(regionDir, "cf");
+
+ Path storeFile = writeStoreFile(100, cfDir);
+ // Prefetches the file blocks
+ LOG.debug("First read should prefetch the blocks.");
+ readStoreFile(storeFile);
+ BucketCache bc =
BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
+ // Our file should have 6 DATA blocks. We should wait for all of them to
be cached
+ Waiter.waitFor(testConf, 300, () -> bc.getBackingMap().size() == 6);
+ long cachedSize =
bc.getRegionCachedInfo().get().get(region.getEncodedName());
+
+ // create another file, but in the archive dir, hence it won't be cached
+ Path archiveRoot = new Path(testDir, "archive");
+ Path archiveTableDir = CommonFSUtils.getTableDir(archiveRoot,
hri.getTable());
+ Path archiveRegionDir = new Path(archiveTableDir, region.getEncodedName());
+ Path archiveCfDir = new Path(archiveRegionDir, "cf");
+ Path archivedFile = writeStoreFile(100, archiveCfDir);
+
+ final RegionInfo testRegion =
+
RegionInfoBuilder.newBuilder(TableName.valueOf(tableDir.getName())).build();
+ final HRegionFileSystem testRegionFs =
HRegionFileSystem.createRegionOnFileSystem(testConf, fs,
+ CommonFSUtils.getTableDir(testDir, testRegion.getTable()), testRegion);
+ // Just create a link to the archived file
+ Path dstPath = new Path(tableDir, new Path(testRegion.getEncodedName(),
"cf"));
+
+ Path linkFilePath =
+ new Path(dstPath, HFileLink.createHFileLinkName(region,
archivedFile.getName()));
+
+ StoreFileTracker sft = StoreFileTrackerFactory.create(testConf, false,
+ StoreContext.getBuilder().withFamilyStoreDirectoryPath(dstPath)
+ .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of("cf"))
+ .withRegionFileSystem(testRegionFs).build());
+ sft.createHFileLink(hri.getTable(), hri.getEncodedName(),
storeFile.getName(), true);
+ StoreFileInfo sfi = sft.getStoreFileInfo(linkFilePath, true);
+
+ HStoreFile hsf = new HStoreFile(sfi, BloomType.NONE, cacheConf);
+ assertTrue(sfi.isLink());
+ hsf.initReader();
+ HFile.Reader reader = hsf.getReader().getHFileReader();
+ while (!reader.prefetchComplete()) {
+ // Sleep for a bit
+ Thread.sleep(1000);
+ }
+ // HFileLink use the path of the target file to create a reader, but the
target file is in the
+ // archive, so it wasn't cached previously and should be cached when we
open the link.
+ Waiter.waitFor(testConf, 300, () -> bc.getBackingMap().size() == 12);
+ // cached size for the region of target file shouldn't change
+ assertEquals(cachedSize, (long)
bc.getRegionCachedInfo().get().get(region.getEncodedName()));
+ // cached size for the region with link pointing to archive dir shouldn't
be updated
+
assertNull(bc.getRegionCachedInfo().get().get(testRegion.getEncodedName()));
+ }
+
private void readStoreFile(Path storeFilePath) throws Exception {
readStoreFile(storeFilePath, (r, o) -> {
HFileBlock block = null;