This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 697f72419fb75a817c27635c03612e983c5bc453
Author: jhungund <[email protected]>
AuthorDate: Thu May 2 13:54:33 2024 +0530

    HBASE-28535: Add a region-server wide key to enable data-tiering. (#5856)
    
    Signed-off-by: Wellington Chevreuil <[email protected]>
    Change-Id: Iace49e6f57b15ebe44ab12591ed72be1d20e0391
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 20 ++---
 .../hbase/regionserver/DataTieringManager.java     | 32 +++++---
 .../hadoop/hbase/regionserver/HRegionServer.java   |  4 +-
 .../hbase/regionserver/TestDataTieringManager.java | 91 +++++++++++++++++++---
 4 files changed, 113 insertions(+), 34 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 72a595dd570..f5b29b4d747 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -1143,11 +1143,10 @@ public class BucketCache implements BlockCache, 
HeapSize {
       long bytesFreed = 0;
       // Check the list of files to determine the cold files which can be 
readily evicted.
       Map<String, String> coldFiles = null;
-      try {
-        DataTieringManager dataTieringManager = 
DataTieringManager.getInstance();
+
+      DataTieringManager dataTieringManager = DataTieringManager.getInstance();
+      if (dataTieringManager != null) {
         coldFiles = dataTieringManager.getColdFilesList();
-      } catch (IllegalStateException e) {
-        LOG.warn("Data Tiering Manager is not set. Ignore time-based block 
evictions.");
       }
       // Scan entire map putting bucket entry into appropriate bucket entry
       // group
@@ -2436,16 +2435,11 @@ public class BucketCache implements BlockCache, 
HeapSize {
   @Override
   public Optional<Boolean> shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
     String fileName = hFileInfo.getHFileContext().getHFileName();
-    try {
-      DataTieringManager dataTieringManager = DataTieringManager.getInstance();
-      if (!dataTieringManager.isHotData(hFileInfo, conf)) {
-        LOG.debug("Data tiering is enabled for file: '{}' and it is not hot 
data", fileName);
-        return Optional.of(false);
-      }
-    } catch (IllegalStateException e) {
-      LOG.error("Error while getting DataTieringManager instance: {}", 
e.getMessage());
+    DataTieringManager dataTieringManager = DataTieringManager.getInstance();
+    if (dataTieringManager != null && !dataTieringManager.isHotData(hFileInfo, 
conf)) {
+      LOG.debug("Data tiering is enabled for file: '{}' and it is not hot 
data", fileName);
+      return Optional.of(false);
     }
-
     // if we don't have the file in fullyCachedFiles, we should cache it
     return Optional.of(!fullyCachedFiles.containsKey(fileName));
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
index a1bc2660380..d3bdbf330cc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -45,6 +45,9 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Private
 public class DataTieringManager {
   private static final Logger LOG = 
LoggerFactory.getLogger(DataTieringManager.class);
+  public static final String GLOBAL_DATA_TIERING_ENABLED_KEY =
+    "hbase.regionserver.datatiering.enable";
+  public static final boolean DEFAULT_GLOBAL_DATA_TIERING_ENABLED = false; // 
disabled by default
   public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type";
   public static final String DATATIERING_HOT_DATA_AGE_KEY =
     "hbase.hstore.datatiering.hot.age.millis";
@@ -58,28 +61,29 @@ public class DataTieringManager {
   }
 
   /**
-   * Initializes the DataTieringManager instance with the provided map of 
online regions.
+   * Initializes the DataTieringManager instance with the provided map of 
online regions, only if
+   * the configuration "hbase.regionserver.datatiering.enable" is enabled.
+   * @param conf          Configuration object.
    * @param onlineRegions A map containing online regions.
+   * @return True if the instance is instantiated successfully, false 
otherwise.
    */
-  public static synchronized void instantiate(Map<String, HRegion> 
onlineRegions) {
-    if (instance == null) {
+  public static synchronized boolean instantiate(Configuration conf,
+    Map<String, HRegion> onlineRegions) {
+    if (isDataTieringFeatureEnabled(conf) && instance == null) {
       instance = new DataTieringManager(onlineRegions);
       LOG.info("DataTieringManager instantiated successfully.");
+      return true;
     } else {
       LOG.warn("DataTieringManager is already instantiated.");
     }
+    return false;
   }
 
   /**
    * Retrieves the instance of DataTieringManager.
-   * @return The instance of DataTieringManager.
-   * @throws IllegalStateException if DataTieringManager has not been 
instantiated.
+   * @return The instance of DataTieringManager, if instantiated, null 
otherwise.
    */
   public static synchronized DataTieringManager getInstance() {
-    if (instance == null) {
-      throw new IllegalStateException(
-        "DataTieringManager has not been instantiated. Call instantiate() 
first.");
-    }
     return instance;
   }
 
@@ -308,4 +312,14 @@ public class DataTieringManager {
     }
     return coldFiles;
   }
+
+  private static boolean isDataTieringFeatureEnabled(Configuration conf) {
+    return conf.getBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY,
+      DataTieringManager.DEFAULT_GLOBAL_DATA_TIERING_ENABLED);
+  }
+
+  // Resets the instance to null. To be used only for testing.
+  public static void resetForTestingOnly() {
+    instance = null;
+  }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index f1615e5e1e9..8fa555cc088 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -700,7 +700,9 @@ public class HRegionServer extends Thread
       // no need to instantiate block cache and mob file cache when master not 
carry table
       if (!isMasterNotCarryTable) {
         blockCache = BlockCacheFactory.createBlockCache(conf);
-        DataTieringManager.instantiate(onlineRegions);
+        // The call below, instantiates the DataTieringManager only when
+      // the configuration "hbase.regionserver.datatiering.enable" is set to 
true.
+      DataTieringManager.instantiate(conf,onlineRegions);
         mobFileCache = new MobFileCache(conf);
       }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java
index 9c8073961b8..b74937bf94d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.regionserver;
 import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
 import static 
org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_ERROR_TOLERATION_DURATION;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import java.io.IOException;
@@ -66,6 +68,8 @@ import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is used to test the functionality of the DataTieringManager.
@@ -94,6 +98,7 @@ public class TestDataTieringManager {
     HBaseClassTestRule.forClass(TestDataTieringManager.class);
 
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestDataTieringManager.class);
   private static Configuration defaultConf;
   private static FileSystem fs;
   private static BlockCache blockCache;
@@ -118,10 +123,11 @@ public class TestDataTieringManager {
     defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true);
     defaultConf.setStrings(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
     defaultConf.setLong(BUCKET_CACHE_SIZE_KEY, 32);
+    defaultConf.setBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY, 
true);
     fs = HFileSystem.get(defaultConf);
     blockCache = BlockCacheFactory.createBlockCache(defaultConf);
     cacheConf = new CacheConfig(defaultConf, blockCache);
-    DataTieringManager.instantiate(testOnlineRegions);
+    assertTrue(DataTieringManager.instantiate(defaultConf, testOnlineRegions));
     setupOnlineRegions();
     dataTieringManager = DataTieringManager.getInstance();
   }
@@ -283,9 +289,9 @@ public class TestDataTieringManager {
     int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
 
     // Setup: Create a bucket cache with lower capacity
-    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
-      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
-      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+    BucketCache bucketCache =
+      new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, 
bucketSizes,
+        writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, 
defaultConf);
 
     // Create three Cache keys with cold data files and a block with hot data.
     // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a 
hot file.
@@ -333,9 +339,9 @@ public class TestDataTieringManager {
     int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
 
     // Setup: Create a bucket cache with lower capacity
-    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
-      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
-      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+    BucketCache bucketCache =
+      new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, 
bucketSizes,
+        writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, 
defaultConf);
 
     // Create three Cache keys with three cold data blocks.
     // hStoreFiles.get(3) is a cold data file.
@@ -380,9 +386,9 @@ public class TestDataTieringManager {
     int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
 
     // Setup: Create a bucket cache with lower capacity
-    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
-      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
-      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+    BucketCache bucketCache =
+      new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, 
bucketSizes,
+        writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, 
defaultConf);
 
     // Create three Cache keys with two hot data blocks and one cold data block
     // hStoreFiles.get(0) is a hot data file and hStoreFiles.get(3) is a cold 
data file.
@@ -417,11 +423,74 @@ public class TestDataTieringManager {
     validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0);
   }
 
+  @Test
+  public void testFeatureKeyDisabled() throws Exception {
+    DataTieringManager.resetForTestingOnly();
+    defaultConf.setBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY, 
false);
+    try {
+      assertFalse(DataTieringManager.instantiate(defaultConf, 
testOnlineRegions));
+      // Verify that the DataaTieringManager instance is not instantiated in 
the
+      // instantiate call above.
+      assertNull(DataTieringManager.getInstance());
+
+      // Also validate that data temperature is not honoured.
+      long capacitySize = 40 * 1024;
+      int writeThreads = 3;
+      int writerQLen = 64;
+      int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+      // Setup: Create a bucket cache with lower capacity
+      BucketCache bucketCache =
+        new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 
8192, bucketSizes,
+          writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, 
defaultConf);
+
+      // Create three Cache keys with two hot data blocks and one cold data 
block
+      // hStoreFiles.get(0) is a hot data file and hStoreFiles.get(3) is a 
cold data file.
+      List<BlockCacheKey> cacheKeys = new ArrayList<>();
+      cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, 
BlockType.DATA));
+      cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 8192, 
true, BlockType.DATA));
+      cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, 
BlockType.DATA));
+
+      // Create dummy data to be cached and fill the cache completely.
+      CacheTestUtils.HFileBlockPair[] blocks = 
CacheTestUtils.generateHFileBlocks(8192, 3);
+
+      int blocksIter = 0;
+      for (BlockCacheKey key : cacheKeys) {
+        LOG.info("Adding {}", key);
+        bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock());
+        // Ensure that the block is persisted to the file.
+        Waiter.waitFor(defaultConf, 10000, 100,
+          () -> (bucketCache.getBackingMap().containsKey(key)));
+      }
+
+      // Verify that the bucket cache contains 3 blocks.
+      assertEquals(3, bucketCache.getBackingMap().keySet().size());
+
+      // Add an additional hot block, which triggers eviction.
+      BlockCacheKey newKey =
+        new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, 
BlockType.DATA);
+      CacheTestUtils.HFileBlockPair[] newBlock = 
CacheTestUtils.generateHFileBlocks(8192, 1);
+
+      bucketCache.cacheBlock(newKey, newBlock[0].getBlock());
+      Waiter.waitFor(defaultConf, 10000, 100,
+        () -> (bucketCache.getBackingMap().containsKey(newKey)));
+
+      // Verify that the bucket still contains the only cold block and one 
newly added hot block.
+      // The older hot blocks are evicted and data-tiering mechanism does not 
kick in to evict
+      // the cold block.
+      validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1);
+    } finally {
+      DataTieringManager.resetForTestingOnly();
+      
defaultConf.setBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY, 
true);
+      assertTrue(DataTieringManager.instantiate(defaultConf, 
testOnlineRegions));
+    }
+  }
+
   private void validateBlocks(Set<BlockCacheKey> keys, int expectedTotalKeys, 
int expectedHotBlocks,
     int expectedColdBlocks) {
     int numHotBlocks = 0, numColdBlocks = 0;
 
-    assertEquals(expectedTotalKeys, keys.size());
+    Waiter.waitFor(defaultConf, 10000, 100, () -> (expectedTotalKeys == 
keys.size()));
     int iter = 0;
     for (BlockCacheKey key : keys) {
       try {

Reply via email to