This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
     new 32bd1ab4fdf HBASE-29707 Fix region cache % metrics miss calculation 
(#7467)
32bd1ab4fdf is described below

commit 32bd1ab4fdf2ce64b3d5f933d300e211a0e63710
Author: Wellington Ramos Chevreuil <[email protected]>
AuthorDate: Mon Nov 17 17:31:20 2025 +0000

    HBASE-29707 Fix region cache % metrics miss calculation (#7467)
    
    (cherry picked from commit 290ae1e9f5da06ba883b8f4d9069b646a533c81e)
    
    Signed-off-by: Peter Somogyi <[email protected]>
    Reviewed-by: Kevin Geiszler <[email protected]>
    Change-Id: Icc6fd32e3b9e56702a48bfa7ba53cb0a84764e0e
---
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |   8 ++
 .../hadoop/hbase/io/hfile/BlockCacheKey.java       |   6 +
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |   5 +
 .../hadoop/hbase/io/hfile/HFilePreadReader.java    |   2 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  61 ++++++----
 .../apache/hadoop/hbase/util/HFileArchiveUtil.java |  12 ++
 .../apache/hadoop/hbase/TestSplitWithCache.java    |   8 +-
 .../io/hfile/TestPrefetchWithBucketCache.java      | 131 ++++++++++++++++++++-
 8 files changed, 203 insertions(+), 30 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index 313b4034fb8..d3b7eb2057e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -85,6 +85,14 @@ public interface BlockCache extends Iterable<CachedBlock>, 
ConfigurationObserver
    */
   int evictBlocksByHfileName(String hfileName);
 
+  /**
+   * Evicts all blocks for the given HFile by path.
+   * @return the number of blocks evicted
+   */
+  default int evictBlocksByHfilePath(Path hfilePath) {
+    return evictBlocksByHfileName(hfilePath.getName());
+  }
+
   /**
    * Get the statistics for this block cache.
    */
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
index bcc1f58ba5e..f87b456c29b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
@@ -32,6 +32,7 @@ public class BlockCacheKey implements HeapSize, 
java.io.Serializable {
   private final long offset;
   private BlockType blockType;
   private final boolean isPrimaryReplicaBlock;
+
   private Path filePath;
 
   /**
@@ -116,4 +117,9 @@ public class BlockCacheKey implements HeapSize, 
java.io.Serializable {
   public Path getFilePath() {
     return filePath;
   }
+
+  public void setFilePath(Path filePath) {
+    this.filePath = filePath;
+  }
+
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index 856b5da6d11..45301abe08c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -149,6 +149,11 @@ public class CombinedBlockCache implements 
ResizableBlockCache, HeapSize {
     return l1Cache.evictBlocksByHfileName(hfileName) + 
l2Cache.evictBlocksByHfileName(hfileName);
   }
 
+  @Override
+  public int evictBlocksByHfilePath(Path hfilePath) {
+    return l1Cache.evictBlocksByHfilePath(hfilePath) + 
l2Cache.evictBlocksByHfilePath(hfilePath);
+  }
+
   @Override
   public CacheStats getStats() {
     return this.combinedCacheStats;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
index 3ef5f50db02..86dcdf97065 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
@@ -168,7 +168,7 @@ public class HFilePreadReader extends HFileReaderImpl {
     // Deallocate data blocks
     cacheConf.getBlockCache().ifPresent(cache -> {
       if (evictOnClose) {
-        int numEvicted = cache.evictBlocksByHfileName(name);
+        int numEvicted = cache.evictBlocksByHfilePath(path);
         if (LOG.isTraceEnabled()) {
           LOG.trace("On close, file= {} evicted= {} block(s)", name, 
numEvicted);
         }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 8b333bce0b5..9174be34b08 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -88,6 +88,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.IdReadWriteLock;
 import org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
 import org.apache.hadoop.hbase.util.Pair;
@@ -670,7 +671,7 @@ public class BucketCache implements BlockCache, HeapSize {
         // the cache map state might differ from the actual cache. If we reach 
this block,
         // we should remove the cache key entry from the backing map
         backingMap.remove(key);
-        fileNotFullyCached(key.getHfileName());
+        fileNotFullyCached(key, bucketEntry);
         LOG.debug("Failed to fetch block for cache key: {}.", key, hioex);
       } catch (IOException ioex) {
         LOG.error("Failed reading block " + key + " from bucket cache", ioex);
@@ -695,7 +696,7 @@ public class BucketCache implements BlockCache, HeapSize {
     if (decrementBlockNumber) {
       this.blockNumber.decrement();
       if (ioEngine.isPersistent()) {
-        fileNotFullyCached(cacheKey.getHfileName());
+        fileNotFullyCached(cacheKey, bucketEntry);
       }
     }
     if (evictedByEvictionProcess) {
@@ -706,23 +707,11 @@ public class BucketCache implements BlockCache, HeapSize {
     }
   }
 
-  private void fileNotFullyCached(String hfileName) {
-    // Update the regionPrefetchedSizeMap before removing the file from 
prefetchCompleted
-    if (fullyCachedFiles.containsKey(hfileName)) {
-      Pair<String, Long> regionEntry = fullyCachedFiles.get(hfileName);
-      String regionEncodedName = regionEntry.getFirst();
-      long filePrefetchSize = regionEntry.getSecond();
-      LOG.debug("Removing file {} for region {}", hfileName, 
regionEncodedName);
-      regionCachedSize.computeIfPresent(regionEncodedName, (rn, pf) -> pf - 
filePrefetchSize);
-      // If all the blocks for a region are evicted from the cache, remove the 
entry for that region
-      if (
-        regionCachedSize.containsKey(regionEncodedName)
-          && regionCachedSize.get(regionEncodedName) == 0
-      ) {
-        regionCachedSize.remove(regionEncodedName);
-      }
-    }
-    fullyCachedFiles.remove(hfileName);
+  private void fileNotFullyCached(BlockCacheKey key, BucketEntry entry) {
+    // Update the updateRegionCachedSize before removing the file from 
fullyCachedFiles.
+    // This computation should happen even if the file is not in 
fullyCachedFiles map.
+    updateRegionCachedSize(key.getFilePath(), (entry.getLength() * -1));
+    fullyCachedFiles.remove(key.getHfileName());
   }
 
   public void fileCacheCompleted(Path filePath, long size) {
@@ -736,9 +725,19 @@ public class BucketCache implements BlockCache, HeapSize {
 
   private void updateRegionCachedSize(Path filePath, long cachedSize) {
     if (filePath != null) {
-      String regionName = filePath.getParent().getParent().getName();
-      regionCachedSize.merge(regionName, cachedSize,
-        (previousSize, newBlockSize) -> previousSize + newBlockSize);
+      if (HFileArchiveUtil.isHFileArchived(filePath)) {
+        LOG.trace("Skipping region cached size update for archived file: {}", 
filePath);
+      } else {
+        String regionName = filePath.getParent().getParent().getName();
+        regionCachedSize.merge(regionName, cachedSize,
+          (previousSize, newBlockSize) -> previousSize + newBlockSize);
+        LOG.trace("Updating region cached size for region: {}", regionName);
+        // If all the blocks for a region are evicted from the cache,
+        // remove the entry for that region from regionCachedSize map.
+        if (regionCachedSize.get(regionName) <= 0) {
+          regionCachedSize.remove(regionName);
+        }
+      }
     }
   }
 
@@ -1608,7 +1607,7 @@ public class BucketCache implements BlockCache, HeapSize {
           } catch (IOException e1) {
             LOG.debug("Check for key {} failed. Evicting.", keyEntry.getKey());
             evictBlock(keyEntry.getKey());
-            fileNotFullyCached(keyEntry.getKey().getHfileName());
+            fileNotFullyCached(keyEntry.getKey(), keyEntry.getValue());
           }
         }
         backingMapValidated.set(true);
@@ -1854,13 +1853,20 @@ public class BucketCache implements BlockCache, 
HeapSize {
   }
 
   @Override
-  public int evictBlocksRangeByHfileName(String hfileName, long initOffset, 
long endOffset) {
-    fileNotFullyCached(hfileName);
+  public int evictBlocksByHfilePath(Path hfilePath) {
+    return evictBlocksRangeByHfileName(hfilePath.getName(), hfilePath, 0, 
Long.MAX_VALUE);
+  }
+
+  public int evictBlocksRangeByHfileName(String hfileName, Path filePath, long 
initOffset,
+    long endOffset) {
     Set<BlockCacheKey> keySet = getAllCacheKeysForFile(hfileName, initOffset, 
endOffset);
     LOG.debug("found {} blocks for file {}, starting offset: {}, end offset: 
{}", keySet.size(),
       hfileName, initOffset, endOffset);
     int numEvicted = 0;
     for (BlockCacheKey key : keySet) {
+      if (filePath != null) {
+        key.setFilePath(filePath);
+      }
       if (evictBlock(key)) {
         ++numEvicted;
       }
@@ -1868,6 +1874,11 @@ public class BucketCache implements BlockCache, HeapSize 
{
     return numEvicted;
   }
 
+  @Override
+  public int evictBlocksRangeByHfileName(String hfileName, long initOffset, 
long endOffset) {
+    return evictBlocksRangeByHfileName(hfileName, null, initOffset, endOffset);
+  }
+
   private Set<BlockCacheKey> getAllCacheKeysForFile(String hfileName, long 
init, long end) {
     return blocksByHFile.subSet(new BlockCacheKey(hfileName, init), true,
       new BlockCacheKey(hfileName, end), true);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
index 9f26eda12c1..0731fe654dc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
@@ -200,4 +200,16 @@ public final class HFileArchiveUtil {
     if (p == null) return null;
     return TableName.valueOf(p.getName(), tbl);
   }
+
+  public static boolean isHFileArchived(Path path) {
+    Path currentDir = path;
+    for (int i = 0; i < 6; i++) {
+      currentDir = currentDir.getParent();
+      if (currentDir == null) {
+        return false;
+      }
+    }
+    return HConstants.HFILE_ARCHIVE_DIRECTORY.equals(currentDir.getName());
+  }
+
 }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java
index 91e65610f81..74bd2f5a379 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSplitWithCache.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -63,10 +64,15 @@ public class TestSplitWithCache {
     UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
     UTIL.getConfiguration().setBoolean(CACHE_BLOCKS_ON_WRITE_KEY, true);
     UTIL.getConfiguration().setBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, true);
-    UTIL.getConfiguration().set(BUCKET_CACHE_IOENGINE_KEY, "offheap");
     UTIL.getConfiguration().setInt(BUCKET_CACHE_SIZE_KEY, 200);
   }
 
+  @Before
+  public void testSetup() {
+    UTIL.getConfiguration().set(BUCKET_CACHE_IOENGINE_KEY,
+      "file:" + UTIL.getDataTestDir() + "/bucketcache");
+  }
+
   @Test
   public void testEvictOnSplit() throws Exception {
     doTest("testEvictOnSplit", true,
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java
index 15fc42656ad..714c857963a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java
@@ -25,7 +25,6 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-
 import java.io.File;
 import java.io.IOException;
 import java.util.Map;
@@ -48,6 +47,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.ByteBuffAllocator;
+import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketEntry;
 import org.apache.hadoop.hbase.regionserver.BloomType;
@@ -55,12 +55,14 @@ import 
org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreContext;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
 import 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.ClassRule;
@@ -70,7 +72,6 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
 import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
 
 @Category({ IOTests.class, MediumTests.class })
@@ -268,7 +269,7 @@ public class TestPrefetchWithBucketCache {
     BucketCache bc = 
BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
     MutableLong regionCachedSize = new MutableLong(0);
     // Our file should have 6 DATA blocks. We should wait for all of them to 
be cached
-    long waitedTime = Waiter.waitFor(conf, 300, () -> {
+    Waiter.waitFor(conf, 300, () -> {
       if (bc.getBackingMap().size() > 0) {
         long currentSize = bc.getRegionCachedInfo().get().get(regionName);
         assertTrue(regionCachedSize.getValue() <= currentSize);
@@ -279,6 +280,130 @@ public class TestPrefetchWithBucketCache {
     });
   }
 
+  @Test
+  public void testPrefetchMetricProgressForLinks() throws Exception {
+    conf.setLong(BUCKET_CACHE_SIZE_KEY, 200);
+    blockCache = BlockCacheFactory.createBlockCache(conf);
+    cacheConf = new CacheConfig(conf, blockCache);
+    final RegionInfo hri =
+      
RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
+    // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
+    Configuration testConf = new Configuration(this.conf);
+    Path testDir = TEST_UTIL.getDataTestDir(name.getMethodName());
+    CommonFSUtils.setRootDir(testConf, testDir);
+    Path tableDir = CommonFSUtils.getTableDir(testDir, hri.getTable());
+    RegionInfo region = 
RegionInfoBuilder.newBuilder(TableName.valueOf(tableDir.getName())).build();
+    Path regionDir = new Path(tableDir, region.getEncodedName());
+    Path cfDir = new Path(regionDir, "cf");
+    HRegionFileSystem regionFS =
+      HRegionFileSystem.createRegionOnFileSystem(testConf, fs, tableDir, 
region);
+    Path storeFile = writeStoreFile(100, cfDir);
+    // Prefetches the file blocks
+    LOG.debug("First read should prefetch the blocks.");
+    readStoreFile(storeFile);
+    BucketCache bc = 
BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
+    // Our file should have 6 DATA blocks. We should wait for all of them to 
be cached
+    Waiter.waitFor(testConf, 300, () -> bc.getBackingMap().size() == 6);
+    long cachedSize = 
bc.getRegionCachedInfo().get().get(region.getEncodedName());
+
+    final RegionInfo dstHri =
+      
RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
+    HRegionFileSystem dstRegionFs = 
HRegionFileSystem.createRegionOnFileSystem(testConf, fs,
+      CommonFSUtils.getTableDir(testDir, dstHri.getTable()), dstHri);
+
+    Path dstPath = new Path(regionFS.getTableDir(), new 
Path(dstHri.getRegionNameAsString(), "cf"));
+
+    Path linkFilePath =
+      new Path(dstPath, HFileLink.createHFileLinkName(region, 
storeFile.getName()));
+
+    StoreFileTracker sft = StoreFileTrackerFactory.create(testConf, false,
+      StoreContext.getBuilder().withFamilyStoreDirectoryPath(dstPath)
+        .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of("cf"))
+        .withRegionFileSystem(dstRegionFs).build());
+    StoreFileInfo sfi = sft.getStoreFileInfo(linkFilePath, true);
+
+    HStoreFile hsf = new HStoreFile(sfi, BloomType.NONE, cacheConf);
+    assertTrue(sfi.isLink());
+    hsf.initReader();
+    HFile.Reader reader = hsf.getReader().getHFileReader();
+    while (!reader.prefetchComplete()) {
+      // Sleep for a bit
+      Thread.sleep(1000);
+    }
+    // HFileLink use the path of the target file to create a reader, so it 
should resolve to the
+    // already cached blocks and not insert new blocks in the cache.
+    Waiter.waitFor(testConf, 300, () -> bc.getBackingMap().size() == 6);
+
+    assertEquals(cachedSize, (long) 
bc.getRegionCachedInfo().get().get(region.getEncodedName()));
+  }
+
+  @Test
+  public void testPrefetchMetricProgressForLinksToArchived() throws Exception {
+    conf.setLong(BUCKET_CACHE_SIZE_KEY, 200);
+    blockCache = BlockCacheFactory.createBlockCache(conf);
+    cacheConf = new CacheConfig(conf, blockCache);
+
+    // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
+    Configuration testConf = new Configuration(this.conf);
+    Path testDir = TEST_UTIL.getDataTestDir(name.getMethodName());
+    CommonFSUtils.setRootDir(testConf, testDir);
+
+    final RegionInfo hri =
+      
RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
+    Path tableDir = CommonFSUtils.getTableDir(testDir, hri.getTable());
+    RegionInfo region = 
RegionInfoBuilder.newBuilder(TableName.valueOf(tableDir.getName())).build();
+    Path regionDir = new Path(tableDir, region.getEncodedName());
+    Path cfDir = new Path(regionDir, "cf");
+
+    Path storeFile = writeStoreFile(100, cfDir);
+    // Prefetches the file blocks
+    LOG.debug("First read should prefetch the blocks.");
+    readStoreFile(storeFile);
+    BucketCache bc = 
BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
+    // Our file should have 6 DATA blocks. We should wait for all of them to 
be cached
+    Waiter.waitFor(testConf, 300, () -> bc.getBackingMap().size() == 6);
+    long cachedSize = 
bc.getRegionCachedInfo().get().get(region.getEncodedName());
+
+    // create another file, but in the archive dir, hence it won't be cached
+    Path archiveRoot = new Path(testDir, "archive");
+    Path archiveTableDir = CommonFSUtils.getTableDir(archiveRoot, 
hri.getTable());
+    Path archiveRegionDir = new Path(archiveTableDir, region.getEncodedName());
+    Path archiveCfDir = new Path(archiveRegionDir, "cf");
+    Path archivedFile = writeStoreFile(100, archiveCfDir);
+
+    final RegionInfo testRegion =
+      
RegionInfoBuilder.newBuilder(TableName.valueOf(tableDir.getName())).build();
+    final HRegionFileSystem testRegionFs = 
HRegionFileSystem.createRegionOnFileSystem(testConf, fs,
+      CommonFSUtils.getTableDir(testDir, testRegion.getTable()), testRegion);
+    // Just create a link to the archived file
+    Path dstPath = new Path(tableDir, new Path(testRegion.getEncodedName(), 
"cf"));
+
+    Path linkFilePath =
+      new Path(dstPath, HFileLink.createHFileLinkName(region, 
archivedFile.getName()));
+
+    StoreFileTracker sft = StoreFileTrackerFactory.create(testConf, false,
+      StoreContext.getBuilder().withFamilyStoreDirectoryPath(dstPath)
+        .withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.of("cf"))
+        .withRegionFileSystem(testRegionFs).build());
+    StoreFileInfo sfi = sft.getStoreFileInfo(linkFilePath, true);
+
+    HStoreFile hsf = new HStoreFile(sfi, BloomType.NONE, cacheConf);
+    assertTrue(sfi.isLink());
+    hsf.initReader();
+    HFile.Reader reader = hsf.getReader().getHFileReader();
+    while (!reader.prefetchComplete()) {
+      // Sleep for a bit
+      Thread.sleep(1000);
+    }
+    // HFileLink use the path of the target file to create a reader, but the 
target file is in the
+    // archive, so it wasn't cached previously and should be cached when we 
open the link.
+    Waiter.waitFor(testConf, 300, () -> bc.getBackingMap().size() == 12);
+    // cached size for the region of target file shouldn't change
+    assertEquals(cachedSize, (long) 
bc.getRegionCachedInfo().get().get(region.getEncodedName()));
+    // cached size for the region with link pointing to archive dir shouldn't 
be updated
+    
assertNull(bc.getRegionCachedInfo().get().get(testRegion.getEncodedName()));
+  }
+
   private void readStoreFile(Path storeFilePath) throws Exception {
     readStoreFile(storeFilePath, (r, o) -> {
       HFileBlock block = null;

Reply via email to