(hbase) branch branch-2.6 updated (a85cf651941 -> eaa8e3499bb)

2024-07-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


from a85cf651941 HBASE-28734 Improve HBase shell snapshot command Doc with 
TTL option (#6107)
 add eaa8e3499bb HBASE-28724 BucketCache.notifyFileCachingCompleted may 
throw IllegalMonitorStateException (#6074) (#6081)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  1 +
 .../hadoop/hbase/io/hfile/CacheTestUtils.java  | 25 ++
 .../hbase/io/hfile/bucket/TestBucketCache.java | 53 ++
 3 files changed, 71 insertions(+), 8 deletions(-)



(hbase) branch branch-2 updated: HBASE-28724 BucketCache.notifyFileCachingCompleted may throw IllegalMonitorStateException (#6074) (#6081)

2024-07-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 317d3aad115 HBASE-28724 BucketCache.notifyFileCachingCompleted may 
throw IllegalMonitorStateException (#6074) (#6081)
317d3aad115 is described below

commit 317d3aad115aae2e4a3c0261a932419c4a6a1bb9
Author: Wellington Ramos Chevreuil 
AuthorDate: Mon Jul 22 15:39:00 2024 +0100

HBASE-28724 BucketCache.notifyFileCachingCompleted may throw 
IllegalMonitorStateException (#6074) (#6081)

Signed-off-by: Peter Somogyi 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  1 +
 .../hadoop/hbase/io/hfile/CacheTestUtils.java  | 25 ++
 .../hbase/io/hfile/bucket/TestBucketCache.java | 53 ++
 3 files changed, 71 insertions(+), 8 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 9446c662aff..2d360da0be7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -2092,6 +2092,7 @@ public class BucketCache implements BlockCache, HeapSize {
   for (ReentrantReadWriteLock lock : locks) {
 lock.readLock().unlock();
   }
+  locks.clear();
   LOG.debug("There are still blocks pending caching for file {}. Will 
sleep 100ms "
 + "and try the verification again.", fileName.getName());
   Thread.sleep(100);
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
index 40de5b31b10..728969c1275 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
@@ -32,6 +32,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MultithreadedTestUtil;
 import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
@@ -275,6 +276,10 @@ public class CacheTestUtils {
   }
 
   public static HFileBlockPair[] generateHFileBlocks(int blockSize, int 
numBlocks) {
+return generateBlocksForPath(blockSize, numBlocks, null);
+  }
+
+  public static HFileBlockPair[] generateBlocksForPath(int blockSize, int 
numBlocks, Path path) {
 HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks];
 Random rand = ThreadLocalRandom.current();
 HashSet usedStrings = new HashSet<>();
@@ -299,16 +304,20 @@ public class CacheTestUtils {
   prevBlockOffset, ByteBuff.wrap(cachedBuffer), 
HFileBlock.DONT_FILL_HEADER, blockSize,
   onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE, -1, 
meta,
   ByteBuffAllocator.HEAP);
-
-  String strKey;
-  /* No conflicting keys */
-  strKey = Long.toString(rand.nextLong());
-  while (!usedStrings.add(strKey)) {
-strKey = Long.toString(rand.nextLong());
+  String key = null;
+  long offset = 0;
+  if (path != null) {
+key = path.getName();
+offset = i * blockSize;
+  } else {
+/* No conflicting keys */
+key = Long.toString(rand.nextLong());
+while (!usedStrings.add(key)) {
+  key = Long.toString(rand.nextLong());
+}
   }
-
   returnedBlocks[i] = new HFileBlockPair();
-  returnedBlocks[i].blockName = new BlockCacheKey(strKey, 0);
+  returnedBlocks[i].blockName = new BlockCacheKey(key, offset);
   returnedBlocks[i].block = generated;
 }
 return returnedBlocks;
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
index 3dcb6d6abfb..fbe4843d152 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
@@ -890,4 +890,57 @@ public class TestBucketCache {
   HBASE_TESTING_UTILITY.cleanupTestDir();
 }
   }
+
+  @Test
+  public void testNotifyFileCachingCompletedSuccess() throws Exception {
+BucketCache bucketCache = null;
+try {
+  Path filePath =
+new Path(HBASE_TESTING_UTILITY.getDataTestDir(), 
"testNotifyFileCachingCompletedSuccess");
+  bucketCache = testNot

(hbase) branch branch-3 updated: HBASE-28724 BucketCache.notifyFileCachingCompleted may throw IllegalMonitorStateException (#6074)

2024-07-15 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 946bc39b4f8 HBASE-28724 BucketCache.notifyFileCachingCompleted may 
throw IllegalMonitorStateException (#6074)
946bc39b4f8 is described below

commit 946bc39b4f8a4d05a892d26bd6274841fb273381
Author: Wellington Ramos Chevreuil 
AuthorDate: Mon Jul 15 11:00:10 2024 +0100

HBASE-28724 BucketCache.notifyFileCachingCompleted may throw 
IllegalMonitorStateException (#6074)

Signed-off-by: Peter Somogyi 

(cherry picked from commit 2b673bd429a4bcbdacd7e44716ccb324398affe6)
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  1 +
 .../hadoop/hbase/io/hfile/CacheTestUtils.java  | 25 ++
 .../hbase/io/hfile/bucket/TestBucketCache.java | 53 ++
 3 files changed, 71 insertions(+), 8 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 8ee0b6b98ad..5816b8ff160 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -2108,6 +2108,7 @@ public class BucketCache implements BlockCache, HeapSize {
   for (ReentrantReadWriteLock lock : locks) {
 lock.readLock().unlock();
   }
+  locks.clear();
   LOG.debug("There are still blocks pending caching for file {}. Will 
sleep 100ms "
 + "and try the verification again.", fileName);
   Thread.sleep(100);
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
index 262408e91a8..848f33bb9c3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
@@ -32,6 +32,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MultithreadedTestUtil;
 import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
@@ -275,6 +276,10 @@ public class CacheTestUtils {
   }
 
   public static HFileBlockPair[] generateHFileBlocks(int blockSize, int 
numBlocks) {
+return generateBlocksForPath(blockSize, numBlocks, null);
+  }
+
+  public static HFileBlockPair[] generateBlocksForPath(int blockSize, int 
numBlocks, Path path) {
 HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks];
 Random rand = ThreadLocalRandom.current();
 HashSet usedStrings = new HashSet<>();
@@ -299,16 +304,20 @@ public class CacheTestUtils {
   prevBlockOffset, ByteBuff.wrap(cachedBuffer), 
HFileBlock.DONT_FILL_HEADER, blockSize,
   onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE, -1, 
meta,
   ByteBuffAllocator.HEAP);
-
-  String strKey;
-  /* No conflicting keys */
-  strKey = Long.toString(rand.nextLong());
-  while (!usedStrings.add(strKey)) {
-strKey = Long.toString(rand.nextLong());
+  String key = null;
+  long offset = 0;
+  if (path != null) {
+key = path.getName();
+offset = i * blockSize;
+  } else {
+/* No conflicting keys */
+key = Long.toString(rand.nextLong());
+while (!usedStrings.add(key)) {
+  key = Long.toString(rand.nextLong());
+}
   }
-
   returnedBlocks[i] = new HFileBlockPair();
-  returnedBlocks[i].blockName = new BlockCacheKey(strKey, 0);
+  returnedBlocks[i].blockName = new BlockCacheKey(key, offset);
   returnedBlocks[i].block = generated;
 }
 return returnedBlocks;
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
index 6a9b5bf382a..78a781994e8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
@@ -890,4 +890,57 @@ public class TestBucketCache {
   HBASE_TESTING_UTILITY.cleanupTestDir();
 }
   }
+
+  @Test
+  public void testNotifyFileCachingCompletedSuccess() throws Exception {
+BucketCache bucketCache = null;
+try {
+  Path filePath =
+new Path(HBASE_TESTING_UTILITY.getDataTestDir(), 
"testNotifyFileCachingComplet

(hbase) branch master updated: HBASE-28724 BucketCache.notifyFileCachingCompleted may throw IllegalMonitorStateException (#6074)

2024-07-15 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 2b673bd429a HBASE-28724 BucketCache.notifyFileCachingCompleted may 
throw IllegalMonitorStateException (#6074)
2b673bd429a is described below

commit 2b673bd429a4bcbdacd7e44716ccb324398affe6
Author: Wellington Ramos Chevreuil 
AuthorDate: Mon Jul 15 11:00:10 2024 +0100

HBASE-28724 BucketCache.notifyFileCachingCompleted may throw 
IllegalMonitorStateException (#6074)

Signed-off-by: Peter Somogyi 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  1 +
 .../hadoop/hbase/io/hfile/CacheTestUtils.java  | 25 ++
 .../hbase/io/hfile/bucket/TestBucketCache.java | 53 ++
 3 files changed, 71 insertions(+), 8 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 8ee0b6b98ad..5816b8ff160 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -2108,6 +2108,7 @@ public class BucketCache implements BlockCache, HeapSize {
   for (ReentrantReadWriteLock lock : locks) {
 lock.readLock().unlock();
   }
+  locks.clear();
   LOG.debug("There are still blocks pending caching for file {}. Will 
sleep 100ms "
 + "and try the verification again.", fileName);
   Thread.sleep(100);
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
index 262408e91a8..848f33bb9c3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java
@@ -32,6 +32,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MultithreadedTestUtil;
 import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
@@ -275,6 +276,10 @@ public class CacheTestUtils {
   }
 
   public static HFileBlockPair[] generateHFileBlocks(int blockSize, int 
numBlocks) {
+return generateBlocksForPath(blockSize, numBlocks, null);
+  }
+
+  public static HFileBlockPair[] generateBlocksForPath(int blockSize, int 
numBlocks, Path path) {
 HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks];
 Random rand = ThreadLocalRandom.current();
 HashSet usedStrings = new HashSet<>();
@@ -299,16 +304,20 @@ public class CacheTestUtils {
   prevBlockOffset, ByteBuff.wrap(cachedBuffer), 
HFileBlock.DONT_FILL_HEADER, blockSize,
   onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE, -1, 
meta,
   ByteBuffAllocator.HEAP);
-
-  String strKey;
-  /* No conflicting keys */
-  strKey = Long.toString(rand.nextLong());
-  while (!usedStrings.add(strKey)) {
-strKey = Long.toString(rand.nextLong());
+  String key = null;
+  long offset = 0;
+  if (path != null) {
+key = path.getName();
+offset = i * blockSize;
+  } else {
+/* No conflicting keys */
+key = Long.toString(rand.nextLong());
+while (!usedStrings.add(key)) {
+  key = Long.toString(rand.nextLong());
+}
   }
-
   returnedBlocks[i] = new HFileBlockPair();
-  returnedBlocks[i].blockName = new BlockCacheKey(strKey, 0);
+  returnedBlocks[i].blockName = new BlockCacheKey(key, offset);
   returnedBlocks[i].block = generated;
 }
 return returnedBlocks;
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
index 6a9b5bf382a..78a781994e8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java
@@ -890,4 +890,57 @@ public class TestBucketCache {
   HBASE_TESTING_UTILITY.cleanupTestDir();
 }
   }
+
+  @Test
+  public void testNotifyFileCachingCompletedSuccess() throws Exception {
+BucketCache bucketCache = null;
+try {
+  Path filePath =
+new Path(HBASE_TESTING_UTILITY.getDataTestDir(), 
"testNotifyFileCachingCompletedSuccess");
+  bucketCache = testNotifyFileCachingComple

(hbase) branch branch-2.5 updated: [HBASE-28364] Warn: Cache key had block type null, but was found in L1 cache (#6068)

2024-07-10 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
 new c7d1d2e2ef5 [HBASE-28364] Warn: Cache key had block type null, but was 
found in L1 cache (#6068)
c7d1d2e2ef5 is described below

commit c7d1d2e2ef54deb5604a4e6558ca4524b02f59b3
Author: Nikita Pande <37657012+nikita...@users.noreply.github.com>
AuthorDate: Wed Jul 10 14:11:17 2024 +0530

[HBASE-28364] Warn: Cache key had block type null, but was found in L1 
cache (#6068)

Signed-off-by: Wellington Chevreuil 

(cherry picked from commit 8852e3a8354b4bc4d479a4904a32924d9bca1754)
---
 .../main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java  | 2 --
 1 file changed, 2 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index d12f4ebe5c7..328506bdb9a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -103,8 +103,6 @@ public class CombinedBlockCache implements 
ResizableBlockCache, HeapSize {
 }
   } else {
 if (existInL1) {
-  LOG.warn("Cache key {} had block type {}, but was found in L1 
cache.", cacheKey,
-cacheKey.getBlockType());
   updateBlockMetrics(block, cacheKey, l1Cache, caching);
 } else {
   updateBlockMetrics(block, cacheKey, l2Cache, caching);



(hbase) branch branch-2.6 updated: [HBASE-28364] Warn: Cache key had block type null, but was found in L1 cache (#6068)

2024-07-10 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 8852e3a8354 [HBASE-28364] Warn: Cache key had block type null, but was 
found in L1 cache (#6068)
8852e3a8354 is described below

commit 8852e3a8354b4bc4d479a4904a32924d9bca1754
Author: Nikita Pande <37657012+nikita...@users.noreply.github.com>
AuthorDate: Wed Jul 10 14:11:17 2024 +0530

[HBASE-28364] Warn: Cache key had block type null, but was found in L1 
cache (#6068)
---
 .../main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java  | 2 --
 1 file changed, 2 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index 3a2d4ccc25d..00dc8e4a555 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -109,8 +109,6 @@ public class CombinedBlockCache implements 
ResizableBlockCache, HeapSize {
 }
   } else {
 if (existInL1) {
-  LOG.warn("Cache key {} had block type {}, but was found in L1 
cache.", cacheKey,
-cacheKey.getBlockType());
   updateBlockMetrics(block, cacheKey, l1Cache, caching);
 } else {
   updateBlockMetrics(block, cacheKey, l2Cache, caching);



(hbase) branch branch-2 updated: HBASE-28596 Optimise BucketCache usage upon regions splits/merges. (#5906)

2024-06-21 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new b087fb95d52 HBASE-28596 Optimise BucketCache usage upon regions 
splits/merges. (#5906)
b087fb95d52 is described below

commit b087fb95d5275acba79bc3addab3b6d32be7aebe
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Jun 19 14:38:18 2024 +0100

HBASE-28596 Optimise BucketCache usage upon regions splits/merges. (#5906)

Signed-off-by: Tak Lon (Stephen) Wu 
Reviewed-by: Duo Zhang 
---
 .../hadoop/hbase/io/HalfStoreFileReader.java   |  42 +++
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |  11 ++
 .../hadoop/hbase/io/hfile/BlockCacheUtil.java  |  42 +++
 .../apache/hadoop/hbase/io/hfile/CacheConfig.java  |   3 +
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |   7 +-
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   |  13 +--
 .../hadoop/hbase/io/hfile/HFilePreadReader.java|   2 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  42 ---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 116 --
 .../assignment/TransitRegionStateProcedure.java|   6 +-
 .../hadoop/hbase/regionserver/StoreFileReader.java |   2 +-
 .../handler/UnassignRegionHandler.java |   8 +-
 .../apache/hadoop/hbase/TestSplitWithCache.java| 130 +
 .../hadoop/hbase/io/TestHalfStoreFileReader.java   |  37 --
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java |   8 --
 .../io/hfile/TestPrefetchWithBucketCache.java  |  70 ++-
 .../io/hfile/bucket/TestBucketCachePersister.java  |   6 +
 17 files changed, 447 insertions(+), 98 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
index b3de99ecdb6..f1f124e6a80 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.IntConsumer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ExtendedCell;
@@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileInfo;
+import org.apache.hadoop.hbase.io.hfile.HFileReaderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.ReaderContext;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -64,6 +66,8 @@ public class HalfStoreFileReader extends StoreFileReader {
 
   private boolean firstKeySeeked = false;
 
+  private AtomicBoolean closed = new AtomicBoolean(false);
+
   /**
* Creates a half file reader for a hfile referred to by an hfilelink.
* @param context   Reader context info
@@ -354,4 +358,42 @@ public class HalfStoreFileReader extends StoreFileReader {
 // Estimate the number of entries as half the original file; this may be 
wildly inaccurate.
 return super.getFilterEntries() / 2;
   }
+
+  /**
+   * Overrides close method to handle cache evictions for the referred file. 
If evictionOnClose is
+   * true, we will seek to the block containing the splitCell and evict all 
blocks from offset 0 up
+   * to that block offset if this is a bottom half reader, or the from the 
split block offset up to
+   * the end of the file if this is a top half reader.
+   * @param evictOnClose true if it should evict the file blocks from the 
cache.
+   */
+  @Override
+  public void close(boolean evictOnClose) throws IOException {
+if (closed.compareAndSet(false, true)) {
+  if (evictOnClose) {
+final HFileReaderImpl.HFileScannerImpl s =
+  (HFileReaderImpl.HFileScannerImpl) super.getScanner(false, true, 
false);
+final String reference = 
this.reader.getHFileInfo().getHFileContext().getHFileName();
+final String referred = 
StoreFileInfo.getReferredToRegionAndFile(reference).getSecond();
+s.seekTo(splitCell);
+if (s.getCurBlock() != null) {
+  long offset = s.getCurBlock().getOffset();
+  LOG.trace("Seeking to split cell in reader: {} for file: {} top: {}, 
split offset: {}",
+this, reference, top, offset);
+  ((HFileReaderImpl) 
reader).getCacheConf().getBlockCache().ifPresent(cache -> {
+int numEvictedReferred = top
+  ? cache.evictBlocksRangeByHfileName(referred, offset, 
Long.MAX_VALUE)
+  : cache.evictBlocksRangeByHfileNam

(hbase) branch branch-3 updated: HBASE-28596 Optimise BucketCache usage upon regions splits/merges. (#5906)

2024-06-19 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 05e09b9aa44 HBASE-28596 Optimise BucketCache usage upon regions 
splits/merges. (#5906)
05e09b9aa44 is described below

commit 05e09b9aa445a1d56725b4208ada2779fffb6680
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Jun 19 14:38:18 2024 +0100

HBASE-28596 Optimise BucketCache usage upon regions splits/merges. (#5906)

Signed-off-by: Tak Lon (Stephen) Wu 
Reviewed-by: Duo Zhang 
---
 .../hadoop/hbase/io/HalfStoreFileReader.java   |  42 +++
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |  11 ++
 .../hadoop/hbase/io/hfile/BlockCacheUtil.java  |  42 +++
 .../apache/hadoop/hbase/io/hfile/CacheConfig.java  |   3 +
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |   7 +-
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   |  13 +--
 .../hadoop/hbase/io/hfile/HFilePreadReader.java|   2 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  43 ---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 117 +--
 .../assignment/TransitRegionStateProcedure.java|  21 ++--
 .../hadoop/hbase/regionserver/StoreFileReader.java |   2 +-
 .../handler/UnassignRegionHandler.java |   8 +-
 .../apache/hadoop/hbase/TestSplitWithCache.java| 130 +
 .../hadoop/hbase/io/TestHalfStoreFileReader.java   |  37 --
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java |   8 --
 .../io/hfile/TestPrefetchWithBucketCache.java  |  70 ++-
 .../io/hfile/bucket/TestBucketCachePersister.java  |   6 +
 17 files changed, 457 insertions(+), 105 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
index 0989f73df0a..862fbc69809 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.IntConsumer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
@@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileInfo;
+import org.apache.hadoop.hbase.io.hfile.HFileReaderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.ReaderContext;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -64,6 +66,8 @@ public class HalfStoreFileReader extends StoreFileReader {
 
   private boolean firstKeySeeked = false;
 
+  private AtomicBoolean closed = new AtomicBoolean(false);
+
   /**
* Creates a half file reader for a hfile referred to by an hfilelink.
* @param context   Reader context info
@@ -335,4 +339,42 @@ public class HalfStoreFileReader extends StoreFileReader {
 // Estimate the number of entries as half the original file; this may be 
wildly inaccurate.
 return super.getFilterEntries() / 2;
   }
+
+  /**
+   * Overrides close method to handle cache evictions for the referred file. 
If evictionOnClose is
+   * true, we will seek to the block containing the splitCell and evict all 
blocks from offset 0 up
+   * to that block offset if this is a bottom half reader, or the from the 
split block offset up to
+   * the end of the file if this is a top half reader.
+   * @param evictOnClose true if it should evict the file blocks from the 
cache.
+   */
+  @Override
+  public void close(boolean evictOnClose) throws IOException {
+if (closed.compareAndSet(false, true)) {
+  if (evictOnClose) {
+final HFileReaderImpl.HFileScannerImpl s =
+  (HFileReaderImpl.HFileScannerImpl) super.getScanner(false, true, 
false);
+final String reference = 
this.reader.getHFileInfo().getHFileContext().getHFileName();
+final String referred = 
StoreFileInfo.getReferredToRegionAndFile(reference).getSecond();
+s.seekTo(splitCell);
+if (s.getCurBlock() != null) {
+  long offset = s.getCurBlock().getOffset();
+  LOG.trace("Seeking to split cell in reader: {} for file: {} top: {}, 
split offset: {}",
+this, reference, top, offset);
+  ((HFileReaderImpl) 
reader).getCacheConf().getBlockCache().ifPresent(cache -> {
+int numEvictedReferred = top
+  ? cache.evictBlocksRangeByHfileName(referred, offset, 
Long.MAX_VALUE)
+  : cache.evictBlocksRangeByHfileNam

(hbase) branch master updated: HBASE-28596 Optimise BucketCache usage upon regions splits/merges. (#5906)

2024-06-19 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new bd9053ca4a8 HBASE-28596 Optimise BucketCache usage upon regions 
splits/merges. (#5906)
bd9053ca4a8 is described below

commit bd9053ca4a8af1a1271b39602fc6fc57de36d76a
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Jun 19 14:38:18 2024 +0100

HBASE-28596 Optimise BucketCache usage upon regions splits/merges. (#5906)

Signed-off-by: Tak Lon (Stephen) Wu 
Reviewed0by: Duo Zhang 
---
 .../hadoop/hbase/io/HalfStoreFileReader.java   |  42 +++
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |  11 ++
 .../hadoop/hbase/io/hfile/BlockCacheUtil.java  |  42 +++
 .../apache/hadoop/hbase/io/hfile/CacheConfig.java  |   3 +
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |   7 +-
 .../apache/hadoop/hbase/io/hfile/HFileBlock.java   |  13 +--
 .../hadoop/hbase/io/hfile/HFilePreadReader.java|   2 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  43 ---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 117 +--
 .../assignment/TransitRegionStateProcedure.java|  21 ++--
 .../hadoop/hbase/regionserver/StoreFileReader.java |   2 +-
 .../handler/UnassignRegionHandler.java |   8 +-
 .../apache/hadoop/hbase/TestSplitWithCache.java| 130 +
 .../hadoop/hbase/io/TestHalfStoreFileReader.java   |  37 --
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java |   8 --
 .../io/hfile/TestPrefetchWithBucketCache.java  |  70 ++-
 .../io/hfile/bucket/TestBucketCachePersister.java  |   6 +
 17 files changed, 457 insertions(+), 105 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
index 0989f73df0a..862fbc69809 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.IntConsumer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
@@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileInfo;
+import org.apache.hadoop.hbase.io.hfile.HFileReaderImpl;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.io.hfile.ReaderContext;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -64,6 +66,8 @@ public class HalfStoreFileReader extends StoreFileReader {
 
   private boolean firstKeySeeked = false;
 
+  private AtomicBoolean closed = new AtomicBoolean(false);
+
   /**
* Creates a half file reader for a hfile referred to by an hfilelink.
* @param context   Reader context info
@@ -335,4 +339,42 @@ public class HalfStoreFileReader extends StoreFileReader {
 // Estimate the number of entries as half the original file; this may be 
wildly inaccurate.
 return super.getFilterEntries() / 2;
   }
+
+  /**
+   * Overrides close method to handle cache evictions for the referred file. 
If evictionOnClose is
+   * true, we will seek to the block containing the splitCell and evict all 
blocks from offset 0 up
+   * to that block offset if this is a bottom half reader, or the from the 
split block offset up to
+   * the end of the file if this is a top half reader.
+   * @param evictOnClose true if it should evict the file blocks from the 
cache.
+   */
+  @Override
+  public void close(boolean evictOnClose) throws IOException {
+if (closed.compareAndSet(false, true)) {
+  if (evictOnClose) {
+final HFileReaderImpl.HFileScannerImpl s =
+  (HFileReaderImpl.HFileScannerImpl) super.getScanner(false, true, 
false);
+final String reference = 
this.reader.getHFileInfo().getHFileContext().getHFileName();
+final String referred = 
StoreFileInfo.getReferredToRegionAndFile(reference).getSecond();
+s.seekTo(splitCell);
+if (s.getCurBlock() != null) {
+  long offset = s.getCurBlock().getOffset();
+  LOG.trace("Seeking to split cell in reader: {} for file: {} top: {}, 
split offset: {}",
+this, reference, top, offset);
+  ((HFileReaderImpl) 
reader).getCacheConf().getBlockCache().ifPresent(cache -> {
+int numEvictedReferred = top
+  ? cache.evictBlocksRangeByHfileName(referred, offset, 
Long.MAX_VALUE)
+  : cache.evictBlocksRangeByHfileName(referr

(hbase) branch branch-2.6 updated: HBASE-28657 Backport HBASE-28246 Expose region cached size over JMX metrics and report in the RS UI (#5565) (#5983)

2024-06-17 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 1602c531b24 HBASE-28657 Backport HBASE-28246 Expose region cached size 
over JMX metrics and report in the RS UI (#5565) (#5983)
1602c531b24 is described below

commit 1602c531b245b4d455b48161757cde2ec3d1930b
Author: szucsvillo <81696283+szucsvi...@users.noreply.github.com>
AuthorDate: Mon Jun 17 11:43:52 2024 +0200

HBASE-28657 Backport HBASE-28246 Expose region cached size over JMX metrics 
and report in the RS UI (#5565) (#5983)

Signed-off-by: Peter Somogyi 
---
 .../regionserver/MetricsRegionServerSource.java|  2 +
 .../hbase/regionserver/MetricsRegionWrapper.java   |  5 ++
 .../regionserver/MetricsRegionSourceImpl.java  |  4 +
 .../regionserver/TestMetricsRegionSourceImpl.java  |  5 ++
 .../hbase/tmpl/regionserver/RegionListTmpl.jamon   |  3 +
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   | 23 +++--
 .../hadoop/hbase/io/hfile/BlockCacheKey.java   | 15 
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  | 11 ++-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  2 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 97 +++---
 .../hadoop/hbase/regionserver/HRegionServer.java   | 25 +++---
 .../regionserver/MetricsRegionWrapperImpl.java | 17 +++-
 .../io/hfile/TestPrefetchWithBucketCache.java  | 27 +-
 .../regionserver/MetricsRegionWrapperStub.java |  5 ++
 14 files changed, 166 insertions(+), 75 deletions(-)

diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 5220f2d82b2..75269e57181 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -632,4 +632,6 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount";
   String SCANNER_LEASE_EXPIRED_COUNT_DESC =
 "Count of scanners which were expired due to scanner lease timeout";
+  String CURRENT_REGION_CACHE_RATIO = "currentRegionCacheRatio";
+  String CURRENT_REGION_CACHE_RATIO_DESC = "The percentage of caching 
completed for this region.";
 }
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index 3115603aabf..4d8a028d89b 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -65,6 +65,11 @@ public interface MetricsRegionWrapper {
*/
   long getStoreFileSize();
 
+  /**
+   * Gets the current cache % ratio for this region.
+   */
+  float getCurrentRegionCacheRatio();
+
   /**
* Get the total number of read requests that have been issued against this 
region
*/
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
index 0c20456e8cb..92ecaa58088 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -233,6 +233,10 @@ public class MetricsRegionSourceImpl implements 
MetricsRegionSource {
 this.regionWrapper.getNumReferenceFiles());
   mrb.addGauge(Interns.info(regionNamePrefix + 
MetricsRegionServerSource.STOREFILE_SIZE,
 MetricsRegionServerSource.STOREFILE_SIZE_DESC), 
this.regionWrapper.getStoreFileSize());
+  mrb.addGauge(
+Interns.info(regionNamePrefix + 
MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO,
+  MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO_DESC),
+this.regionWrapper.getCurrentRegionCacheRatio());
   mrb.addCounter(
 Interns.info(regionNamePrefix + 
MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT,
   MetricsRegionSource.COMPACTIONS_COMPLETED_DESC),
diff --git 
a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
 
b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
index 3fe116a11a7..2c8205085d1 100644
--- 
a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver

(hbase) branch branch-2.4 updated: HBASE-28618: Fixed hadolint check in nightly build. (#5957)

2024-06-06 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 4923544a279 HBASE-28618: Fixed hadolint check in nightly build. (#5957)
4923544a279 is described below

commit 4923544a279953ca628a851cc71591d246a71b98
Author: Subrat Mishra 
AuthorDate: Thu Jun 6 18:01:37 2024 +0530

HBASE-28618: Fixed hadolint check in nightly build. (#5957)

Signed-off-by: Wellington Chevreuil 
Reviewed-by: Duo Zhang 
---
 dev-support/hbase_docker/m1/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
index 5399fa0e5af..fa88638a7ae 100644
--- a/dev-support/hbase_docker/m1/Dockerfile
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+FROM amd64/ubuntu:22.04 AS base_image
 SHELL ["/bin/bash", "-o", "pipefail", "-c"]
 
 RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \



(hbase) branch branch-2.5 updated: HBASE-28618: Fixed hadolint check in nightly build. (#5957)

2024-06-06 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
 new e9c267e6823 HBASE-28618: Fixed hadolint check in nightly build. (#5957)
e9c267e6823 is described below

commit e9c267e6823580ba69f3fddee7880c0cf01c6625
Author: Subrat Mishra 
AuthorDate: Thu Jun 6 18:01:37 2024 +0530

HBASE-28618: Fixed hadolint check in nightly build. (#5957)

Signed-off-by: Wellington Chevreuil 
Reviewed-by: Duo Zhang 
---
 dev-support/hbase_docker/m1/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
index 5399fa0e5af..fa88638a7ae 100644
--- a/dev-support/hbase_docker/m1/Dockerfile
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+FROM amd64/ubuntu:22.04 AS base_image
 SHELL ["/bin/bash", "-o", "pipefail", "-c"]
 
 RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \



(hbase) branch branch-2.6 updated: HBASE-28618: Fixed hadolint check in nightly build. (#5957)

2024-06-06 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new fad4a85ebab HBASE-28618: Fixed hadolint check in nightly build. (#5957)
fad4a85ebab is described below

commit fad4a85ebabc2a8520f3e9e4c60ed7b08705cc15
Author: Subrat Mishra 
AuthorDate: Thu Jun 6 18:01:37 2024 +0530

HBASE-28618: Fixed hadolint check in nightly build. (#5957)

Signed-off-by: Wellington Chevreuil 
Reviewed-by: Duo Zhang 
---
 dev-support/hbase_docker/m1/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
index 5399fa0e5af..fa88638a7ae 100644
--- a/dev-support/hbase_docker/m1/Dockerfile
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+FROM amd64/ubuntu:22.04 AS base_image
 SHELL ["/bin/bash", "-o", "pipefail", "-c"]
 
 RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \



(hbase) branch branch-2 updated: HBASE-28618: Fixed hadolint check in nightly build. (#5957)

2024-06-06 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new c2b5833139e HBASE-28618: Fixed hadolint check in nightly build. (#5957)
c2b5833139e is described below

commit c2b5833139ef081ed03130ec5be9b618455bc60c
Author: Subrat Mishra 
AuthorDate: Thu Jun 6 18:01:37 2024 +0530

HBASE-28618: Fixed hadolint check in nightly build. (#5957)

Signed-off-by: Wellington Chevreuil 
Reviewed-by: Duo Zhang 
---
 dev-support/hbase_docker/m1/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
index 5399fa0e5af..fa88638a7ae 100644
--- a/dev-support/hbase_docker/m1/Dockerfile
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+FROM amd64/ubuntu:22.04 AS base_image
 SHELL ["/bin/bash", "-o", "pipefail", "-c"]
 
 RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \



(hbase) branch branch-3 updated: HBASE-28618: Fixed hadolint check in nightly build. (#5957)

2024-06-06 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 395d8dac8bc HBASE-28618: Fixed hadolint check in nightly build. (#5957)
395d8dac8bc is described below

commit 395d8dac8bccbcd19e62130b512cb207f32e7592
Author: Subrat Mishra 
AuthorDate: Thu Jun 6 18:01:37 2024 +0530

HBASE-28618: Fixed hadolint check in nightly build. (#5957)

Signed-off-by: Wellington Chevreuil 
Reviewed-by: Duo Zhang 
---
 dev-support/hbase_docker/m1/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
index 5399fa0e5af..fa88638a7ae 100644
--- a/dev-support/hbase_docker/m1/Dockerfile
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+FROM amd64/ubuntu:22.04 AS base_image
 SHELL ["/bin/bash", "-o", "pipefail", "-c"]
 
 RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \



(hbase) branch master updated (04816d98a20 -> c865570d772)

2024-06-06 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


from 04816d98a20 HBASE-28562 Correct backup ancestor calculation (#5868)
 add c865570d772 HBASE-28618: Fixed hadolint check in nightly build. (#5957)

No new revisions were added by this update.

Summary of changes:
 dev-support/hbase_docker/m1/Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)



(hbase) branch HBASE-28463 updated: HBASE-28467: Add time-based priority caching checks for cacheOnRead code paths. (#5905)

2024-05-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-28463 by this push:
 new 3a701b933ed HBASE-28467: Add time-based priority caching checks for 
cacheOnRead code paths. (#5905)
3a701b933ed is described below

commit 3a701b933ed6e90fac934331684a26bad568dff9
Author: jhungund <106576553+jhung...@users.noreply.github.com>
AuthorDate: Thu May 23 00:16:49 2024 +0530

HBASE-28467: Add time-based priority caching checks for cacheOnRead code 
paths. (#5905)

Signed-off-by: Wellington Chevreuil 
---
 .../apache/hadoop/hbase/io/hfile/CacheConfig.java  | 12 
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java | 10 ++--
 .../hbase/regionserver/TestDataTieringManager.java | 64 ++
 3 files changed, 82 insertions(+), 4 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 7fb1f1ec85b..40dc0aed494 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -275,6 +275,18 @@ public class CacheConfig implements ConfigurationObserver {
   || (prefetchOnOpen && (category != BlockCategory.META && category != 
BlockCategory.UNKNOWN));
   }
 
+  public boolean shouldCacheBlockOnRead(BlockCategory category, HFileInfo 
hFileInfo,
+Configuration conf) {
+Optional cacheFileBlock = Optional.of(true);
+if (getBlockCache().isPresent()) {
+  Optional result = 
getBlockCache().get().shouldCacheFile(hFileInfo, conf);
+  if (result.isPresent()) {
+cacheFileBlock = result;
+  }
+}
+return shouldCacheBlockOnRead(category) && cacheFileBlock.get();
+  }
+
   /** Returns true if blocks in this file should be flagged as in-memory */
   public boolean isInMemory() {
 return this.inMemory;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index e0585c6edaa..989af7eab88 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -1193,7 +1193,8 @@ public abstract class HFileReaderImpl implements 
HFile.Reader, Configurable {
   BlockCacheKey cacheKey =
 new BlockCacheKey(name, metaBlockOffset, 
this.isPrimaryReplicaReader(), BlockType.META);
 
-  cacheBlock &= 
cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory());
+  cacheBlock &=
+cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory(), 
getHFileInfo(), conf);
   HFileBlock cachedBlock =
 getCachedBlock(cacheKey, cacheBlock, false, true, BlockType.META, 
null);
   if (cachedBlock != null) {
@@ -1346,14 +1347,15 @@ public abstract class HFileReaderImpl implements 
HFile.Reader, Configurable {
 }
 BlockType.BlockCategory category = 
hfileBlock.getBlockType().getCategory();
 final boolean cacheCompressed = 
cacheConf.shouldCacheCompressed(category);
-final boolean cacheOnRead = cacheConf.shouldCacheBlockOnRead(category);
+final boolean cacheOnRead =
+  cacheConf.shouldCacheBlockOnRead(category, getHFileInfo(), conf);
 
 // Don't need the unpacked block back and we're storing the block in 
the cache compressed
 if (cacheOnly && cacheCompressed && cacheOnRead) {
   cacheConf.getBlockCache().ifPresent(cache -> {
 LOG.debug("Skipping decompression of block {} in prefetch", 
cacheKey);
 // Cache the block if necessary
-if (cacheable && cacheConf.shouldCacheBlockOnRead(category)) {
+if (cacheable && cacheOnRead) {
   cache.cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory(), 
cacheOnly);
 }
   });
@@ -1366,7 +1368,7 @@ public abstract class HFileReaderImpl implements 
HFile.Reader, Configurable {
 HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader);
 // Cache the block if necessary
 cacheConf.getBlockCache().ifPresent(cache -> {
-  if (cacheable && cacheConf.shouldCacheBlockOnRead(category)) {
+  if (cacheable && cacheOnRead) {
 // Using the wait on cache during compaction and prefetching.
 cache.cacheBlock(cacheKey, cacheCompressed ? hfileBlock : unpacked,
   cacheConf.isInMemory(), cacheOnly);
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java
 
b/hbase-server/src/test/java/

(hbase) branch branch-2.4 updated: HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with mac m1 platform (#5286)

2024-05-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 6cfb809f60f HBASE-27915 Update hbase_docker with an extra Dockerfile 
compatible with mac m1 platform (#5286)
6cfb809f60f is described below

commit 6cfb809f60fcc28ee39eee55930703cecc8d2568
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed May 22 16:28:33 2024 +0100

HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with 
mac m1 platform (#5286)

Signed-off-by: Tak Lon (Stephen) Wu 
(cherry picked from commit e5b581686dd772a45885f34aaa90d7c4ce3474af)
---
 dev-support/hbase_docker/README.md |  3 ++
 dev-support/hbase_docker/m1/Dockerfile | 92 ++
 2 files changed, 95 insertions(+)

diff --git a/dev-support/hbase_docker/README.md 
b/dev-support/hbase_docker/README.md
index d98f7ada98b..3d0641afaee 100644
--- a/dev-support/hbase_docker/README.md
+++ b/dev-support/hbase_docker/README.md
@@ -41,3 +41,6 @@ this image will start the HMaster and launch the HBase shell 
when run.
**hbase_docker** image. Alternatively, you can type `docker run -it 
hbase_docker
bash` to start a container without a running HMaster. Within this 
environment,
HBase is built in `/root/hbase-bin`.
+
+> NOTE: When running on mac m1 platforms, the docker file requires setting 
platfrom flag explicitly.
+> You may use same instructions above running from to the "./m1" sub-dir.
diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
new file mode 100644
index 000..5399fa0e5af
--- /dev/null
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \
+  DEBIAN_FRONTEND=noninteractive apt-get -qq install --no-install-recommends 
-y \
+ca-certificates=20211016 \
+curl='7.81.0-*' \
+git='1:2.34.1-*' \
+locales='2.35-*' \
+&& \
+apt-get clean && \
+rm -rf /var/lib/apt/lists/* \
+&& \
+locale-gen en_US.UTF-8
+ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8
+
+FROM base_image AS maven_download_image
+ENV MAVEN_VERSION='3.8.6'
+ENV MAVEN_URL 
"https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz;
+ENV MAVEN_SHA512 
'f790857f3b1f90ae8d16281f902c689e4f136ebe584aba45e4b1fa66c80cba826d3e0e52fdd04ed44b4c66f6d3fe3584a057c26dfcac544a60b301e6d0f91c26'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output /tmp/maven.tar.gz 
"${MAVEN_URL}" && \
+  echo "${MAVEN_SHA512} */tmp/maven.tar.gz" | sha512sum -c -
+
+FROM base_image AS openjdk8_download_image
+ENV OPENJDK8_URL 
'https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u352-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u352b08.tar.gz'
+ENV OPENJDK8_SHA256 
'1633bd7590cb1cd72f5a1378ae8294451028b274d798e2a4ac672059a2f00fee'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output 
/tmp/adoptopenjdk8.tar.gz "${OPENJDK8_URL}" && \
+  echo "${OPENJDK8_SHA256} */tmp/adoptopenjdk8.tar.gz" | sha256sum -c -
+
+FROM base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+#
+# when updating java or maven versions here, consider also updating
+# `dev-support/docker/Dockerfile` as well.
+#
+
+# hadolint ignore=DL3010
+COPY --from=maven_download_image /tmp/maven.tar.gz /tmp/maven.tar.gz
+RUN tar xzf /tmp/maven.tar.gz -C /opt && \
+  ln -s "/opt/$(dirname "$(tar -tf /tmp/maven.tar.gz | head -n1)")" /opt/maven 
&& \
+  rm /tmp/maven.tar.gz
+
+# hadolint ignore=DL30

(hbase) branch branch-2.5 updated: HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with mac m1 platform (#5286)

2024-05-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
 new b61a73a9524 HBASE-27915 Update hbase_docker with an extra Dockerfile 
compatible with mac m1 platform (#5286)
b61a73a9524 is described below

commit b61a73a9524a02b3c411713ccb3b2cd69757509e
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed May 22 16:28:33 2024 +0100

HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with 
mac m1 platform (#5286)

Signed-off-by: Tak Lon (Stephen) Wu 
(cherry picked from commit e5b581686dd772a45885f34aaa90d7c4ce3474af)
---
 dev-support/hbase_docker/README.md |  3 ++
 dev-support/hbase_docker/m1/Dockerfile | 92 ++
 2 files changed, 95 insertions(+)

diff --git a/dev-support/hbase_docker/README.md 
b/dev-support/hbase_docker/README.md
index d98f7ada98b..3d0641afaee 100644
--- a/dev-support/hbase_docker/README.md
+++ b/dev-support/hbase_docker/README.md
@@ -41,3 +41,6 @@ this image will start the HMaster and launch the HBase shell 
when run.
**hbase_docker** image. Alternatively, you can type `docker run -it 
hbase_docker
bash` to start a container without a running HMaster. Within this 
environment,
HBase is built in `/root/hbase-bin`.
+
+> NOTE: When running on mac m1 platforms, the docker file requires setting 
platfrom flag explicitly.
+> You may use same instructions above running from to the "./m1" sub-dir.
diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
new file mode 100644
index 000..5399fa0e5af
--- /dev/null
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \
+  DEBIAN_FRONTEND=noninteractive apt-get -qq install --no-install-recommends 
-y \
+ca-certificates=20211016 \
+curl='7.81.0-*' \
+git='1:2.34.1-*' \
+locales='2.35-*' \
+&& \
+apt-get clean && \
+rm -rf /var/lib/apt/lists/* \
+&& \
+locale-gen en_US.UTF-8
+ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8
+
+FROM base_image AS maven_download_image
+ENV MAVEN_VERSION='3.8.6'
+ENV MAVEN_URL 
"https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz;
+ENV MAVEN_SHA512 
'f790857f3b1f90ae8d16281f902c689e4f136ebe584aba45e4b1fa66c80cba826d3e0e52fdd04ed44b4c66f6d3fe3584a057c26dfcac544a60b301e6d0f91c26'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output /tmp/maven.tar.gz 
"${MAVEN_URL}" && \
+  echo "${MAVEN_SHA512} */tmp/maven.tar.gz" | sha512sum -c -
+
+FROM base_image AS openjdk8_download_image
+ENV OPENJDK8_URL 
'https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u352-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u352b08.tar.gz'
+ENV OPENJDK8_SHA256 
'1633bd7590cb1cd72f5a1378ae8294451028b274d798e2a4ac672059a2f00fee'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output 
/tmp/adoptopenjdk8.tar.gz "${OPENJDK8_URL}" && \
+  echo "${OPENJDK8_SHA256} */tmp/adoptopenjdk8.tar.gz" | sha256sum -c -
+
+FROM base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+#
+# when updating java or maven versions here, consider also updating
+# `dev-support/docker/Dockerfile` as well.
+#
+
+# hadolint ignore=DL3010
+COPY --from=maven_download_image /tmp/maven.tar.gz /tmp/maven.tar.gz
+RUN tar xzf /tmp/maven.tar.gz -C /opt && \
+  ln -s "/opt/$(dirname "$(tar -tf /tmp/maven.tar.gz | head -n1)")" /opt/maven 
&& \
+  rm /tmp/maven.tar.gz
+
+# hadolint ignore=DL30

(hbase) branch branch-2.6 updated: HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with mac m1 platform (#5286)

2024-05-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new f8f8555ec4d HBASE-27915 Update hbase_docker with an extra Dockerfile 
compatible with mac m1 platform (#5286)
f8f8555ec4d is described below

commit f8f8555ec4dc8454d53f5623e9f28d16dabe5cd6
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed May 22 16:28:33 2024 +0100

HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with 
mac m1 platform (#5286)

Signed-off-by: Tak Lon (Stephen) Wu 
(cherry picked from commit e5b581686dd772a45885f34aaa90d7c4ce3474af)
---
 dev-support/hbase_docker/README.md |  3 ++
 dev-support/hbase_docker/m1/Dockerfile | 92 ++
 2 files changed, 95 insertions(+)

diff --git a/dev-support/hbase_docker/README.md 
b/dev-support/hbase_docker/README.md
index d98f7ada98b..3d0641afaee 100644
--- a/dev-support/hbase_docker/README.md
+++ b/dev-support/hbase_docker/README.md
@@ -41,3 +41,6 @@ this image will start the HMaster and launch the HBase shell 
when run.
**hbase_docker** image. Alternatively, you can type `docker run -it 
hbase_docker
bash` to start a container without a running HMaster. Within this 
environment,
HBase is built in `/root/hbase-bin`.
+
+> NOTE: When running on mac m1 platforms, the docker file requires setting 
platfrom flag explicitly.
+> You may use same instructions above running from to the "./m1" sub-dir.
diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
new file mode 100644
index 000..5399fa0e5af
--- /dev/null
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \
+  DEBIAN_FRONTEND=noninteractive apt-get -qq install --no-install-recommends 
-y \
+ca-certificates=20211016 \
+curl='7.81.0-*' \
+git='1:2.34.1-*' \
+locales='2.35-*' \
+&& \
+apt-get clean && \
+rm -rf /var/lib/apt/lists/* \
+&& \
+locale-gen en_US.UTF-8
+ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8
+
+FROM base_image AS maven_download_image
+ENV MAVEN_VERSION='3.8.6'
+ENV MAVEN_URL 
"https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz;
+ENV MAVEN_SHA512 
'f790857f3b1f90ae8d16281f902c689e4f136ebe584aba45e4b1fa66c80cba826d3e0e52fdd04ed44b4c66f6d3fe3584a057c26dfcac544a60b301e6d0f91c26'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output /tmp/maven.tar.gz 
"${MAVEN_URL}" && \
+  echo "${MAVEN_SHA512} */tmp/maven.tar.gz" | sha512sum -c -
+
+FROM base_image AS openjdk8_download_image
+ENV OPENJDK8_URL 
'https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u352-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u352b08.tar.gz'
+ENV OPENJDK8_SHA256 
'1633bd7590cb1cd72f5a1378ae8294451028b274d798e2a4ac672059a2f00fee'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output 
/tmp/adoptopenjdk8.tar.gz "${OPENJDK8_URL}" && \
+  echo "${OPENJDK8_SHA256} */tmp/adoptopenjdk8.tar.gz" | sha256sum -c -
+
+FROM base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+#
+# when updating java or maven versions here, consider also updating
+# `dev-support/docker/Dockerfile` as well.
+#
+
+# hadolint ignore=DL3010
+COPY --from=maven_download_image /tmp/maven.tar.gz /tmp/maven.tar.gz
+RUN tar xzf /tmp/maven.tar.gz -C /opt && \
+  ln -s "/opt/$(dirname "$(tar -tf /tmp/maven.tar.gz | head -n1)")" /opt/maven 
&& \
+  rm /tmp/maven.tar.gz
+
+# hadolint ignore=DL30

(hbase) branch branch-2 updated: HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with mac m1 platform (#5286)

2024-05-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 1705d7be692 HBASE-27915 Update hbase_docker with an extra Dockerfile 
compatible with mac m1 platform (#5286)
1705d7be692 is described below

commit 1705d7be692e644af0dbfcf0421d22eb3a678e2b
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed May 22 16:28:33 2024 +0100

HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with 
mac m1 platform (#5286)

Signed-off-by: Tak Lon (Stephen) Wu 
(cherry picked from commit e5b581686dd772a45885f34aaa90d7c4ce3474af)
---
 dev-support/hbase_docker/README.md |  3 ++
 dev-support/hbase_docker/m1/Dockerfile | 92 ++
 2 files changed, 95 insertions(+)

diff --git a/dev-support/hbase_docker/README.md 
b/dev-support/hbase_docker/README.md
index d98f7ada98b..3d0641afaee 100644
--- a/dev-support/hbase_docker/README.md
+++ b/dev-support/hbase_docker/README.md
@@ -41,3 +41,6 @@ this image will start the HMaster and launch the HBase shell 
when run.
**hbase_docker** image. Alternatively, you can type `docker run -it 
hbase_docker
bash` to start a container without a running HMaster. Within this 
environment,
HBase is built in `/root/hbase-bin`.
+
+> NOTE: When running on mac m1 platforms, the docker file requires setting 
platfrom flag explicitly.
+> You may use same instructions above running from to the "./m1" sub-dir.
diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
new file mode 100644
index 000..5399fa0e5af
--- /dev/null
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \
+  DEBIAN_FRONTEND=noninteractive apt-get -qq install --no-install-recommends 
-y \
+ca-certificates=20211016 \
+curl='7.81.0-*' \
+git='1:2.34.1-*' \
+locales='2.35-*' \
+&& \
+apt-get clean && \
+rm -rf /var/lib/apt/lists/* \
+&& \
+locale-gen en_US.UTF-8
+ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8
+
+FROM base_image AS maven_download_image
+ENV MAVEN_VERSION='3.8.6'
+ENV MAVEN_URL 
"https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz;
+ENV MAVEN_SHA512 
'f790857f3b1f90ae8d16281f902c689e4f136ebe584aba45e4b1fa66c80cba826d3e0e52fdd04ed44b4c66f6d3fe3584a057c26dfcac544a60b301e6d0f91c26'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output /tmp/maven.tar.gz 
"${MAVEN_URL}" && \
+  echo "${MAVEN_SHA512} */tmp/maven.tar.gz" | sha512sum -c -
+
+FROM base_image AS openjdk8_download_image
+ENV OPENJDK8_URL 
'https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u352-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u352b08.tar.gz'
+ENV OPENJDK8_SHA256 
'1633bd7590cb1cd72f5a1378ae8294451028b274d798e2a4ac672059a2f00fee'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output 
/tmp/adoptopenjdk8.tar.gz "${OPENJDK8_URL}" && \
+  echo "${OPENJDK8_SHA256} */tmp/adoptopenjdk8.tar.gz" | sha256sum -c -
+
+FROM base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+#
+# when updating java or maven versions here, consider also updating
+# `dev-support/docker/Dockerfile` as well.
+#
+
+# hadolint ignore=DL3010
+COPY --from=maven_download_image /tmp/maven.tar.gz /tmp/maven.tar.gz
+RUN tar xzf /tmp/maven.tar.gz -C /opt && \
+  ln -s "/opt/$(dirname "$(tar -tf /tmp/maven.tar.gz | head -n1)")" /opt/maven 
&& \
+  rm /tmp/maven.tar.gz
+
+# hadolint ignore=DL3010
+

(hbase) branch branch-3 updated: HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with mac m1 platform (#5286)

2024-05-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 8fe72377650 HBASE-27915 Update hbase_docker with an extra Dockerfile 
compatible with mac m1 platform (#5286)
8fe72377650 is described below

commit 8fe723776501ac1b2fa728e710ce55863941eee1
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed May 22 16:28:33 2024 +0100

HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with 
mac m1 platform (#5286)

Signed-off-by: Tak Lon (Stephen) Wu 
(cherry picked from commit e5b581686dd772a45885f34aaa90d7c4ce3474af)
---
 dev-support/hbase_docker/README.md |  3 ++
 dev-support/hbase_docker/m1/Dockerfile | 92 ++
 2 files changed, 95 insertions(+)

diff --git a/dev-support/hbase_docker/README.md 
b/dev-support/hbase_docker/README.md
index d98f7ada98b..3d0641afaee 100644
--- a/dev-support/hbase_docker/README.md
+++ b/dev-support/hbase_docker/README.md
@@ -41,3 +41,6 @@ this image will start the HMaster and launch the HBase shell 
when run.
**hbase_docker** image. Alternatively, you can type `docker run -it 
hbase_docker
bash` to start a container without a running HMaster. Within this 
environment,
HBase is built in `/root/hbase-bin`.
+
+> NOTE: When running on mac m1 platforms, the docker file requires setting 
platfrom flag explicitly.
+> You may use same instructions above running from to the "./m1" sub-dir.
diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
new file mode 100644
index 000..5399fa0e5af
--- /dev/null
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \
+  DEBIAN_FRONTEND=noninteractive apt-get -qq install --no-install-recommends 
-y \
+ca-certificates=20211016 \
+curl='7.81.0-*' \
+git='1:2.34.1-*' \
+locales='2.35-*' \
+&& \
+apt-get clean && \
+rm -rf /var/lib/apt/lists/* \
+&& \
+locale-gen en_US.UTF-8
+ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8
+
+FROM base_image AS maven_download_image
+ENV MAVEN_VERSION='3.8.6'
+ENV MAVEN_URL 
"https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz;
+ENV MAVEN_SHA512 
'f790857f3b1f90ae8d16281f902c689e4f136ebe584aba45e4b1fa66c80cba826d3e0e52fdd04ed44b4c66f6d3fe3584a057c26dfcac544a60b301e6d0f91c26'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output /tmp/maven.tar.gz 
"${MAVEN_URL}" && \
+  echo "${MAVEN_SHA512} */tmp/maven.tar.gz" | sha512sum -c -
+
+FROM base_image AS openjdk8_download_image
+ENV OPENJDK8_URL 
'https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u352-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u352b08.tar.gz'
+ENV OPENJDK8_SHA256 
'1633bd7590cb1cd72f5a1378ae8294451028b274d798e2a4ac672059a2f00fee'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output 
/tmp/adoptopenjdk8.tar.gz "${OPENJDK8_URL}" && \
+  echo "${OPENJDK8_SHA256} */tmp/adoptopenjdk8.tar.gz" | sha256sum -c -
+
+FROM base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+#
+# when updating java or maven versions here, consider also updating
+# `dev-support/docker/Dockerfile` as well.
+#
+
+# hadolint ignore=DL3010
+COPY --from=maven_download_image /tmp/maven.tar.gz /tmp/maven.tar.gz
+RUN tar xzf /tmp/maven.tar.gz -C /opt && \
+  ln -s "/opt/$(dirname "$(tar -tf /tmp/maven.tar.gz | head -n1)")" /opt/maven 
&& \
+  rm /tmp/maven.tar.gz
+
+# hadolint ignore=DL3010
+

(hbase) branch master updated: HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with mac m1 platform (#5286)

2024-05-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new e5b581686dd HBASE-27915 Update hbase_docker with an extra Dockerfile 
compatible with mac m1 platform (#5286)
e5b581686dd is described below

commit e5b581686dd772a45885f34aaa90d7c4ce3474af
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed May 22 16:28:33 2024 +0100

HBASE-27915 Update hbase_docker with an extra Dockerfile compatible with 
mac m1 platform (#5286)

Signed-off-by: Tak Lon (Stephen) Wu 
---
 dev-support/hbase_docker/README.md |  3 ++
 dev-support/hbase_docker/m1/Dockerfile | 92 ++
 2 files changed, 95 insertions(+)

diff --git a/dev-support/hbase_docker/README.md 
b/dev-support/hbase_docker/README.md
index d98f7ada98b..3d0641afaee 100644
--- a/dev-support/hbase_docker/README.md
+++ b/dev-support/hbase_docker/README.md
@@ -41,3 +41,6 @@ this image will start the HMaster and launch the HBase shell 
when run.
**hbase_docker** image. Alternatively, you can type `docker run -it 
hbase_docker
bash` to start a container without a running HMaster. Within this 
environment,
HBase is built in `/root/hbase-bin`.
+
+> NOTE: When running on mac m1 platforms, the docker file requires setting 
platfrom flag explicitly.
+> You may use same instructions above running from to the "./m1" sub-dir.
diff --git a/dev-support/hbase_docker/m1/Dockerfile 
b/dev-support/hbase_docker/m1/Dockerfile
new file mode 100644
index 000..5399fa0e5af
--- /dev/null
+++ b/dev-support/hbase_docker/m1/Dockerfile
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM --platform=linux/amd64 ubuntu:22.04 AS base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \
+  DEBIAN_FRONTEND=noninteractive apt-get -qq install --no-install-recommends 
-y \
+ca-certificates=20211016 \
+curl='7.81.0-*' \
+git='1:2.34.1-*' \
+locales='2.35-*' \
+&& \
+apt-get clean && \
+rm -rf /var/lib/apt/lists/* \
+&& \
+locale-gen en_US.UTF-8
+ENV LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8
+
+FROM base_image AS maven_download_image
+ENV MAVEN_VERSION='3.8.6'
+ENV MAVEN_URL 
"https://archive.apache.org/dist/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz;
+ENV MAVEN_SHA512 
'f790857f3b1f90ae8d16281f902c689e4f136ebe584aba45e4b1fa66c80cba826d3e0e52fdd04ed44b4c66f6d3fe3584a057c26dfcac544a60b301e6d0f91c26'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output /tmp/maven.tar.gz 
"${MAVEN_URL}" && \
+  echo "${MAVEN_SHA512} */tmp/maven.tar.gz" | sha512sum -c -
+
+FROM base_image AS openjdk8_download_image
+ENV OPENJDK8_URL 
'https://github.com/adoptium/temurin8-binaries/releases/download/jdk8u352-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u352b08.tar.gz'
+ENV OPENJDK8_SHA256 
'1633bd7590cb1cd72f5a1378ae8294451028b274d798e2a4ac672059a2f00fee'
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN curl --location --fail --silent --show-error --output 
/tmp/adoptopenjdk8.tar.gz "${OPENJDK8_URL}" && \
+  echo "${OPENJDK8_SHA256} */tmp/adoptopenjdk8.tar.gz" | sha256sum -c -
+
+FROM base_image
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+#
+# when updating java or maven versions here, consider also updating
+# `dev-support/docker/Dockerfile` as well.
+#
+
+# hadolint ignore=DL3010
+COPY --from=maven_download_image /tmp/maven.tar.gz /tmp/maven.tar.gz
+RUN tar xzf /tmp/maven.tar.gz -C /opt && \
+  ln -s "/opt/$(dirname "$(tar -tf /tmp/maven.tar.gz | head -n1)")" /opt/maven 
&& \
+  rm /tmp/maven.tar.gz
+
+# hadolint ignore=DL3010
+COPY --from=openjdk8_download_image /tmp/adoptopenjdk8.tar.gz 
/tmp/adoptopen

(hbase) branch HBASE-28463 updated: HBASE-28469: Integration of time-based priority caching into compaction paths (#5866)

2024-05-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-28463 by this push:
 new d3317caa5ff HBASE-28469: Integration of time-based priority caching 
into compaction paths (#5866)
d3317caa5ff is described below

commit d3317caa5ffe28324840a79661b23dd130d4daf9
Author: vinayak hegde 
AuthorDate: Wed May 22 18:59:49 2024 +0530

HBASE-28469: Integration of time-based priority caching into compaction 
paths (#5866)

Signed-off-by: Wellington Chevreuil 
Reviewed-by: Janardhan Hugund 
---
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |  17 ++
 .../hadoop/hbase/io/hfile/BlockCacheKey.java   |   1 -
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |  20 ++-
 .../org/apache/hadoop/hbase/io/hfile/HFile.java|   5 +
 .../hadoop/hbase/io/hfile/HFileWriterImpl.java |  53 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  13 ++
 .../hbase/regionserver/DataTieringManager.java |  50 +-
 .../hbase/regionserver/HRegionFileSystem.java  |  25 +++
 .../hadoop/hbase/regionserver/StoreFileWriter.java |  23 +--
 .../hbase/regionserver/TimeRangeTracker.java   |   4 +-
 .../hbase/regionserver/TestDataTieringManager.java | 183 ++---
 11 files changed, 336 insertions(+), 58 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index ac83af1053a..922ac5dd144 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -22,6 +22,7 @@ import java.util.Map;
 import java.util.Optional;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -207,6 +208,22 @@ public interface BlockCache extends Iterable {
 return Optional.empty();
   }
 
+  /**
+   * Checks whether the block represented by the given key should be cached or 
not. This method may
+   * not be overridden by all implementing classes. In such cases, the 
returned Optional will be
+   * empty. For subclasses implementing this logic, the returned Optional 
would contain the boolean
+   * value reflecting if the passed block should indeed be cached.
+   * @param key  The key representing the block to check if it 
should be cached.
+   * @param timeRangeTracker the time range tracker containing the timestamps
+   * @param conf The configuration object to use for determining 
caching behavior.
+   * @return An empty Optional if this method is not supported; otherwise, the 
returned Optional
+   * contains the boolean value indicating if the block should be 
cached.
+   */
+  default Optional shouldCacheBlock(BlockCacheKey key, 
TimeRangeTracker timeRangeTracker,
+Configuration conf) {
+return Optional.empty();
+  }
+
   /**
* Checks whether the block for the passed key is already cached. This 
method may not be
* overridden by all implementing classes. In such cases, the returned 
Optional will be empty. For
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
index bf22d38e373..bcc1f58ba5e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
@@ -116,5 +116,4 @@ public class BlockCacheKey implements HeapSize, 
java.io.Serializable {
   public Path getFilePath() {
 return filePath;
   }
-
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index b12510cdccd..c29ed1ecf31 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
+import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -484,11 +485,22 @@ public class CombinedBlockCache implements 
ResizableBlockCache, HeapSize {
 
   @Override
   public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
-Optional l1Result

(hbase) 05/05: HBASE-28535: Add a region-server wide key to enable data-tiering. (#5856)

2024-05-21 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 7527a74ef182ae3a9660ab90c06420fff28be4f2
Author: jhungund <106576553+jhung...@users.noreply.github.com>
AuthorDate: Thu May 2 13:54:33 2024 +0530

HBASE-28535: Add a region-server wide key to enable data-tiering. (#5856)

Signed-off-by: Wellington Chevreuil 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 20 ++---
 .../hbase/regionserver/DataTieringManager.java | 32 +---
 .../hadoop/hbase/regionserver/HRegionServer.java   |  5 +-
 .../hbase/regionserver/TestDataTieringManager.java | 91 +++---
 4 files changed, 114 insertions(+), 34 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 5a9c7795a33..0b53d047990 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -985,11 +985,10 @@ public class BucketCache implements BlockCache, HeapSize {
 
   // Check the list of files to determine the cold files which can be 
readily evicted.
   Map coldFiles = null;
-  try {
-DataTieringManager dataTieringManager = 
DataTieringManager.getInstance();
+
+  DataTieringManager dataTieringManager = DataTieringManager.getInstance();
+  if (dataTieringManager != null) {
 coldFiles = dataTieringManager.getColdFilesList();
-  } catch (IllegalStateException e) {
-LOG.warn("Data Tiering Manager is not set. Ignore time-based block 
evictions.");
   }
   // Scan entire map putting bucket entry into appropriate bucket entry
   // group
@@ -2195,16 +2194,11 @@ public class BucketCache implements BlockCache, 
HeapSize {
   @Override
   public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
 String fileName = hFileInfo.getHFileContext().getHFileName();
-try {
-  DataTieringManager dataTieringManager = DataTieringManager.getInstance();
-  if (!dataTieringManager.isHotData(hFileInfo, conf)) {
-LOG.debug("Data tiering is enabled for file: '{}' and it is not hot 
data", fileName);
-return Optional.of(false);
-  }
-} catch (IllegalStateException e) {
-  LOG.error("Error while getting DataTieringManager instance: {}", 
e.getMessage());
+DataTieringManager dataTieringManager = DataTieringManager.getInstance();
+if (dataTieringManager != null && !dataTieringManager.isHotData(hFileInfo, 
conf)) {
+  LOG.debug("Data tiering is enabled for file: '{}' and it is not hot 
data", fileName);
+  return Optional.of(false);
 }
-
 // if we don't have the file in fullyCachedFiles, we should cache it
 return Optional.of(!fullyCachedFiles.containsKey(fileName));
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
index 6c699e77c2f..952b4d4938d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -45,6 +45,9 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Private
 public class DataTieringManager {
   private static final Logger LOG = 
LoggerFactory.getLogger(DataTieringManager.class);
+  public static final String GLOBAL_DATA_TIERING_ENABLED_KEY =
+"hbase.regionserver.datatiering.enable";
+  public static final boolean DEFAULT_GLOBAL_DATA_TIERING_ENABLED = false; // 
disabled by default
   public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type";
   public static final String DATATIERING_HOT_DATA_AGE_KEY =
 "hbase.hstore.datatiering.hot.age.millis";
@@ -58,28 +61,29 @@ public class DataTieringManager {
   }
 
   /**
-   * Initializes the DataTieringManager instance with the provided map of 
online regions.
+   * Initializes the DataTieringManager instance with the provided map of 
online regions, only if
+   * the configuration "hbase.regionserver.datatiering.enable" is enabled.
+   * @param conf  Configuration object.
* @param onlineRegions A map containing online regions.
+   * @return True if the instance is instantiated successfully, false 
otherwise.
*/
-  public static synchronized void instantiate(Map 
onlineRegions) {
-if (instance == null) {
+  public static synchronized boolean instantiate(Configuration conf,
+Map onlineRegions) {
+if (isDataTieringFeatureEnabled(conf) && instance == null) {
   instance = new DataTieringManage

(hbase) branch HBASE-28463 updated (c3923459efc -> 7527a74ef18)

2024-05-21 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


omit c3923459efc HBASE-28535: Add a region-server wide key to enable 
data-tiering. (#5856)
omit c1be5f1 HBASE-28468: Integrate the data-tiering logic into cache 
evictions. (#5829)
omit 84b3f1934cd HBASE-28466 Integration of time-based priority logic of 
bucket cache in prefetch functionality of HBase (#5808)
omit 99c412fe156 HBASE-28505 Implement enforcement to require Date Tiered 
Compaction for Time Range Data Tiering (#5809)
omit a1fb5928de1 HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)
 add 9f773d4db02 HBASE-28232 Add release manager for 2.6 in ref guide 
(#5921)
 add 8a5337b3e43 HBASE-28501 Support non-SPNEGO authentication methods and 
implement session handling in REST java client library (addendum: revert 
incompatible API change) (#5928)
 add d85574aa1f4  HBASE-28607 Bump requests from 2.31.0 to 2.32.0 in 
/dev-support/flaky-tests (#5929)
 new 76a995d038e HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)
 new 6ff8b067ae2 HBASE-28505 Implement enforcement to require Date Tiered 
Compaction for Time Range Data Tiering (#5809)
 new b45f2b71ea4 HBASE-28466 Integration of time-based priority logic of 
bucket cache in prefetch functionality of HBase (#5808)
 new 7ddd9d7c2d1 HBASE-28468: Integrate the data-tiering logic into cache 
evictions. (#5829)
 new 7527a74ef18 HBASE-28535: Add a region-server wide key to enable 
data-tiering. (#5856)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (c3923459efc)
\
 N -- N -- N   refs/heads/HBASE-28463 (7527a74ef18)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 dev-support/flaky-tests/python-requirements.txt|  2 +-
 .../apache/hadoop/hbase/rest/client/Client.java| 22 +++---
 src/main/asciidoc/_chapters/community.adoc |  5 +
 3 files changed, 17 insertions(+), 12 deletions(-)



(hbase) 01/05: HBASE-28465 Implementation of framework for time-based priority bucket-cache (#5793)

2024-05-21 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 76a995d038e22fc8490a9a24a7afd1dffd986450
Author: vinayak hegde 
AuthorDate: Mon Apr 8 20:54:19 2024 +0530

HBASE-28465 Implementation of framework for time-based priority 
bucket-cache (#5793)

Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DataTieringException.java   |  27 ++
 .../hbase/regionserver/DataTieringManager.java | 222 
 .../hadoop/hbase/regionserver/DataTieringType.java |  26 ++
 .../hadoop/hbase/regionserver/HRegionServer.java   |   1 +
 .../hbase/regionserver/TestDataTieringManager.java | 389 +
 5 files changed, 665 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
new file mode 100644
index 000..8d356422f6e
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class DataTieringException extends Exception {
+  DataTieringException(String reason) {
+super(reason);
+  }
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
new file mode 100644
index 000..0bc04ddc428
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.OptionalLong;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The DataTieringManager class categorizes data into hot data and cold data 
based on the specified
+ * {@link DataTieringType} when DataTiering is enabled. DataTiering is 
disabled by default with
+ * {@link DataTieringType} set to {@link DataTieringType#NONE}. The {@link 
DataTieringType}
+ * determines the logic for distinguishing data into hot or cold. By default, 
all data is considered
+ * as hot.
+ */
+@InterfaceAudience.Private
+public class DataTieringManager {
+  private static final Logger LOG = 
LoggerFactory.getLogger(DataTieringManager.class);
+  public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type";
+  public static final String DATATIERING_HOT_DATA_AGE_KEY =
+"hbase.hstore.datatiering.hot.age.millis";
+  public static final DataTieringType DEFAULT_DATATIERING = 
DataTieringType.NONE;
+  public static final long DEFAULT_DATATIERING_HOT_DATA_AGE = 7 * 24 * 60 * 60 
* 1000; // 7 Days
+  private static Dat

(hbase) 02/05: HBASE-28505 Implement enforcement to require Date Tiered Compaction for Time Range Data Tiering (#5809)

2024-05-21 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 6ff8b067ae23d2f0775aa7bde79b9bdc0b1f6e9c
Author: vinayak hegde 
AuthorDate: Fri Apr 12 14:54:37 2024 +0530

HBASE-28505 Implement enforcement to require Date Tiered Compaction for 
Time Range Data Tiering (#5809)


Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DateTieredStoreEngine.java  |  3 ++
 .../hadoop/hbase/util/TableDescriptorChecker.java  | 36 +
 .../hbase/client/TestIllegalTableDescriptor.java   | 45 ++
 3 files changed, 84 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
index ded6564bce5..26437ab1124 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
@@ -41,6 +41,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DateTieredStoreEngine extends StoreEngine {
+
+  public static final String DATE_TIERED_STORE_ENGINE = 
DateTieredStoreEngine.class.getName();
+
   @Override
   public boolean needsCompaction(List filesCompacting) {
 return compactionPolicy.needsCompaction(storeFileManager.getStoreFiles(), 
filesCompacting);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
index 94e2e4bbfa0..471583b32b7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import static 
org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine.DATE_TIERED_STORE_ENGINE;
+
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompoundConfiguration;
@@ -28,10 +30,13 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.fs.ErasureCodingUtils;
+import org.apache.hadoop.hbase.regionserver.DataTieringManager;
+import org.apache.hadoop.hbase.regionserver.DataTieringType;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.StoreEngine;
 import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
 import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -191,6 +196,8 @@ public final class TableDescriptorChecker {
 
   // check in-memory compaction
   warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction);
+
+  checkDateTieredCompactionForTimeRangeDataTiering(conf, td);
 }
   }
 
@@ -210,6 +217,35 @@ public final class TableDescriptorChecker {
 });
   }
 
+  private static void checkDateTieredCompactionForTimeRangeDataTiering(final 
Configuration conf,
+final TableDescriptor td) throws IOException {
+// Table level configurations
+checkDateTieredCompactionForTimeRangeDataTiering(conf);
+for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
+  // Column family level configurations
+  Configuration cfdConf =
+new 
CompoundConfiguration().add(conf).addStringMap(cfd.getConfiguration());
+  checkDateTieredCompactionForTimeRangeDataTiering(cfdConf);
+}
+  }
+
+  private static void checkDateTieredCompactionForTimeRangeDataTiering(final 
Configuration conf)
+throws IOException {
+final String errorMessage =
+  "Time Range Data Tiering should be enabled with Date Tiered Compaction.";
+
+warnOrThrowExceptionForFailure(false, () -> {
+
+  // Determine whether Date Tiered Compaction will be enabled when Time 
Range Data Tiering is
+  // enabled after the configuration change.
+  if 
(DataTieringType.TIME_RANGE.name().equals(conf.get(DataTieringManager.DATATIERING_KEY)))
 {
+if 
(!DATE_TIERED_STORE_ENGINE.equals(conf.get(StoreEngine.STORE_ENGINE_CLASS_KEY)))
 {
+  throw new IllegalArgumentException(errorMessage);
+}
+  }
+});
+  }
+
   private static void checkCompactionPolicy(final Configuration conf, final 
TableDescriptor td)
 throws IOException {
 warnOrThrowExceptionFor

(hbase) 04/05: HBASE-28468: Integrate the data-tiering logic into cache evictions. (#5829)

2024-05-21 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 7ddd9d7c2d15b29b4eda88bf29fef329bcc13230
Author: jhungund <106576553+jhung...@users.noreply.github.com>
AuthorDate: Thu Apr 25 15:29:36 2024 +0530

HBASE-28468: Integrate the data-tiering logic into cache evictions. (#5829)

Signed-off-by: Wellington Chevreuil 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  45 +-
 .../hbase/regionserver/DataTieringManager.java |  42 -
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java |   4 +-
 .../hbase/regionserver/TestDataTieringManager.java | 178 +
 4 files changed, 263 insertions(+), 6 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 622a57f91c2..5a9c7795a33 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -974,6 +974,7 @@ public class BucketCache implements BlockCache, HeapSize {
   long bytesToFreeWithExtra =
 (long) Math.floor(bytesToFreeWithoutExtra * (1 + extraFreeFactor));
 
+  long bytesFreed = 0;
   // Instantiate priority buckets
   BucketEntryGroup bucketSingle =
 new BucketEntryGroup(bytesToFreeWithExtra, blockSize, 
getPartitionSize(singleFactor));
@@ -982,9 +983,36 @@ public class BucketCache implements BlockCache, HeapSize {
   BucketEntryGroup bucketMemory =
 new BucketEntryGroup(bytesToFreeWithExtra, blockSize, 
getPartitionSize(memoryFactor));
 
+  // Check the list of files to determine the cold files which can be 
readily evicted.
+  Map coldFiles = null;
+  try {
+DataTieringManager dataTieringManager = 
DataTieringManager.getInstance();
+coldFiles = dataTieringManager.getColdFilesList();
+  } catch (IllegalStateException e) {
+LOG.warn("Data Tiering Manager is not set. Ignore time-based block 
evictions.");
+  }
   // Scan entire map putting bucket entry into appropriate bucket entry
   // group
   for (Map.Entry bucketEntryWithKey : 
backingMap.entrySet()) {
+if (
+  coldFiles != null && 
coldFiles.containsKey(bucketEntryWithKey.getKey().getHfileName())
+) {
+  int freedBlockSize = bucketEntryWithKey.getValue().getLength();
+  if (evictBlockIfNoRpcReferenced(bucketEntryWithKey.getKey())) {
+bytesFreed += freedBlockSize;
+  }
+  if (bytesFreed >= bytesToFreeWithExtra) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug(
+"Bucket cache free space completed; required: {} freed: {} 
from cold data blocks.",
+bytesToFreeWithExtra, StringUtils.byteDesc(bytesFreed));
+}
+// Sufficient bytes have been freed.
+return;
+  }
+  continue;
+}
+
 switch (bucketEntryWithKey.getValue().getPriority()) {
   case SINGLE: {
 bucketSingle.add(bucketEntryWithKey);
@@ -1001,6 +1029,21 @@ public class BucketCache implements BlockCache, HeapSize 
{
 }
   }
 
+  // Check if the cold file eviction is sufficient to create enough space.
+  bytesToFreeWithExtra -= bytesFreed;
+  if (bytesToFreeWithExtra <= 0) {
+LOG.debug("Bucket cache free space completed; freed space : {} bytes 
of cold data blocks.",
+  StringUtils.byteDesc(bytesFreed));
+return;
+  }
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug(
+  "Bucket cache free space completed; freed space : {} "
++ "bytes of cold data blocks. {} more bytes required to be freed.",
+  StringUtils.byteDesc(bytesFreed), bytesToFreeWithExtra);
+  }
+
   PriorityQueue bucketQueue =
 new PriorityQueue<>(3, 
Comparator.comparingLong(BucketEntryGroup::overflow));
 
@@ -1009,8 +1052,6 @@ public class BucketCache implements BlockCache, HeapSize {
   bucketQueue.add(bucketMemory);
 
   int remainingBuckets = bucketQueue.size();
-  long bytesFreed = 0;
-
   BucketEntryGroup bucketGroup;
   while ((bucketGroup = bucketQueue.poll()) != null) {
 long overflow = bucketGroup.overflow();
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
index dec96604774..6c699e77c2f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@

(hbase) 03/05: HBASE-28466 Integration of time-based priority logic of bucket cache in prefetch functionality of HBase (#5808)

2024-05-21 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit b45f2b71ea4d1dd3369a4c2c08139355cbbd2d38
Author: vinayak hegde 
AuthorDate: Mon Apr 22 15:23:30 2024 +0530

HBASE-28466 Integration of time-based priority logic of bucket cache in 
prefetch functionality of HBase (#5808)

Signed-off-by: Wellington Chevreuil 
---
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |  6 +-
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |  7 +-
 .../apache/hadoop/hbase/io/hfile/HFileInfo.java|  6 ++
 .../hadoop/hbase/io/hfile/HFilePreadReader.java|  6 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 15 +++-
 .../hbase/regionserver/DataTieringManager.java | 91 +-
 .../hbase/regionserver/TestDataTieringManager.java | 58 ++
 7 files changed, 145 insertions(+), 44 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index bed0194b1fa..ac83af1053a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io.hfile;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Optional;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -197,11 +198,12 @@ public interface BlockCache extends Iterable 
{
* overridden by all implementing classes. In such cases, the returned 
Optional will be empty. For
* subclasses implementing this logic, the returned Optional would contain 
the boolean value
* reflecting if the passed file should indeed be cached.
-   * @param fileName to check if it should be cached.
+   * @param hFileInfo Information about the file to check if it should be 
cached.
+   * @param conf  The configuration object to use for determining caching 
behavior.
* @return empty optional if this method is not supported, otherwise the 
returned optional
* contains the boolean value informing if the file should be cached.
*/
-  default Optional shouldCacheFile(String fileName) {
+  default Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
 return Optional.empty();
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index d6692d2e2bf..b12510cdccd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -22,6 +22,7 @@ import java.util.Map;
 import java.util.Optional;
 import org.apache.commons.lang3.mutable.Mutable;
 import org.apache.commons.lang3.mutable.MutableBoolean;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
@@ -482,9 +483,9 @@ public class CombinedBlockCache implements 
ResizableBlockCache, HeapSize {
   }
 
   @Override
-  public Optional shouldCacheFile(String fileName) {
-Optional l1Result = l1Cache.shouldCacheFile(fileName);
-Optional l2Result = l2Cache.shouldCacheFile(fileName);
+  public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
+Optional l1Result = l1Cache.shouldCacheFile(hFileInfo, conf);
+Optional l2Result = l2Cache.shouldCacheFile(hFileInfo, conf);
 final Mutable combinedResult = new MutableBoolean(true);
 l1Result.ifPresent(b -> combinedResult.setValue(b && 
combinedResult.getValue()));
 l2Result.ifPresent(b -> combinedResult.setValue(b && 
combinedResult.getValue()));
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
index 31e637a0099..e89f86e7c4e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
@@ -122,6 +122,7 @@ public class HFileInfo implements SortedMap 
{
 
   private FixedFileTrailer trailer;
   private HFileContext hfileContext;
+  private boolean initialized = false;
 
   public HFileInfo() {
 super();
@@ -363,6 +364,10 @@ public class HFileInfo implements SortedMap {
* should be called after initTrailerAndContext
*/
   public void initMetaAndIndex(HFile.Reader reader) throws IOException {
+if (initialized) {
+  return;
+}
+
 ReaderContext context = reader.

(hbase) 04/05: HBASE-28468: Integrate the data-tiering logic into cache evictions. (#5829)

2024-05-20 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit c1be5f195252620da511b13b139345ea912e
Author: jhungund <106576553+jhung...@users.noreply.github.com>
AuthorDate: Thu Apr 25 15:29:36 2024 +0530

HBASE-28468: Integrate the data-tiering logic into cache evictions. (#5829)

Signed-off-by: Wellington Chevreuil 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  45 +-
 .../hbase/regionserver/DataTieringManager.java |  42 -
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java |   4 +-
 .../hbase/regionserver/TestDataTieringManager.java | 178 +
 4 files changed, 263 insertions(+), 6 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 622a57f91c2..5a9c7795a33 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -974,6 +974,7 @@ public class BucketCache implements BlockCache, HeapSize {
   long bytesToFreeWithExtra =
 (long) Math.floor(bytesToFreeWithoutExtra * (1 + extraFreeFactor));
 
+  long bytesFreed = 0;
   // Instantiate priority buckets
   BucketEntryGroup bucketSingle =
 new BucketEntryGroup(bytesToFreeWithExtra, blockSize, 
getPartitionSize(singleFactor));
@@ -982,9 +983,36 @@ public class BucketCache implements BlockCache, HeapSize {
   BucketEntryGroup bucketMemory =
 new BucketEntryGroup(bytesToFreeWithExtra, blockSize, 
getPartitionSize(memoryFactor));
 
+  // Check the list of files to determine the cold files which can be 
readily evicted.
+  Map coldFiles = null;
+  try {
+DataTieringManager dataTieringManager = 
DataTieringManager.getInstance();
+coldFiles = dataTieringManager.getColdFilesList();
+  } catch (IllegalStateException e) {
+LOG.warn("Data Tiering Manager is not set. Ignore time-based block 
evictions.");
+  }
   // Scan entire map putting bucket entry into appropriate bucket entry
   // group
   for (Map.Entry bucketEntryWithKey : 
backingMap.entrySet()) {
+if (
+  coldFiles != null && 
coldFiles.containsKey(bucketEntryWithKey.getKey().getHfileName())
+) {
+  int freedBlockSize = bucketEntryWithKey.getValue().getLength();
+  if (evictBlockIfNoRpcReferenced(bucketEntryWithKey.getKey())) {
+bytesFreed += freedBlockSize;
+  }
+  if (bytesFreed >= bytesToFreeWithExtra) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug(
+"Bucket cache free space completed; required: {} freed: {} 
from cold data blocks.",
+bytesToFreeWithExtra, StringUtils.byteDesc(bytesFreed));
+}
+// Sufficient bytes have been freed.
+return;
+  }
+  continue;
+}
+
 switch (bucketEntryWithKey.getValue().getPriority()) {
   case SINGLE: {
 bucketSingle.add(bucketEntryWithKey);
@@ -1001,6 +1029,21 @@ public class BucketCache implements BlockCache, HeapSize 
{
 }
   }
 
+  // Check if the cold file eviction is sufficient to create enough space.
+  bytesToFreeWithExtra -= bytesFreed;
+  if (bytesToFreeWithExtra <= 0) {
+LOG.debug("Bucket cache free space completed; freed space : {} bytes 
of cold data blocks.",
+  StringUtils.byteDesc(bytesFreed));
+return;
+  }
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug(
+  "Bucket cache free space completed; freed space : {} "
++ "bytes of cold data blocks. {} more bytes required to be freed.",
+  StringUtils.byteDesc(bytesFreed), bytesToFreeWithExtra);
+  }
+
   PriorityQueue bucketQueue =
 new PriorityQueue<>(3, 
Comparator.comparingLong(BucketEntryGroup::overflow));
 
@@ -1009,8 +1052,6 @@ public class BucketCache implements BlockCache, HeapSize {
   bucketQueue.add(bucketMemory);
 
   int remainingBuckets = bucketQueue.size();
-  long bytesFreed = 0;
-
   BucketEntryGroup bucketGroup;
   while ((bucketGroup = bucketQueue.poll()) != null) {
 long overflow = bucketGroup.overflow();
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
index dec96604774..6c699e77c2f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@

(hbase) 03/05: HBASE-28466 Integration of time-based priority logic of bucket cache in prefetch functionality of HBase (#5808)

2024-05-20 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 84b3f1934cd0ac107b595545a0e5d2c5f704f56a
Author: vinayak hegde 
AuthorDate: Mon Apr 22 15:23:30 2024 +0530

HBASE-28466 Integration of time-based priority logic of bucket cache in 
prefetch functionality of HBase (#5808)

Signed-off-by: Wellington Chevreuil 
---
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |  6 +-
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |  7 +-
 .../apache/hadoop/hbase/io/hfile/HFileInfo.java|  6 ++
 .../hadoop/hbase/io/hfile/HFilePreadReader.java|  6 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 15 +++-
 .../hbase/regionserver/DataTieringManager.java | 91 +-
 .../hbase/regionserver/TestDataTieringManager.java | 58 ++
 7 files changed, 145 insertions(+), 44 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index bed0194b1fa..ac83af1053a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io.hfile;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Optional;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -197,11 +198,12 @@ public interface BlockCache extends Iterable 
{
* overridden by all implementing classes. In such cases, the returned 
Optional will be empty. For
* subclasses implementing this logic, the returned Optional would contain 
the boolean value
* reflecting if the passed file should indeed be cached.
-   * @param fileName to check if it should be cached.
+   * @param hFileInfo Information about the file to check if it should be 
cached.
+   * @param conf  The configuration object to use for determining caching 
behavior.
* @return empty optional if this method is not supported, otherwise the 
returned optional
* contains the boolean value informing if the file should be cached.
*/
-  default Optional shouldCacheFile(String fileName) {
+  default Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
 return Optional.empty();
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index d6692d2e2bf..b12510cdccd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -22,6 +22,7 @@ import java.util.Map;
 import java.util.Optional;
 import org.apache.commons.lang3.mutable.Mutable;
 import org.apache.commons.lang3.mutable.MutableBoolean;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
@@ -482,9 +483,9 @@ public class CombinedBlockCache implements 
ResizableBlockCache, HeapSize {
   }
 
   @Override
-  public Optional shouldCacheFile(String fileName) {
-Optional l1Result = l1Cache.shouldCacheFile(fileName);
-Optional l2Result = l2Cache.shouldCacheFile(fileName);
+  public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
+Optional l1Result = l1Cache.shouldCacheFile(hFileInfo, conf);
+Optional l2Result = l2Cache.shouldCacheFile(hFileInfo, conf);
 final Mutable combinedResult = new MutableBoolean(true);
 l1Result.ifPresent(b -> combinedResult.setValue(b && 
combinedResult.getValue()));
 l2Result.ifPresent(b -> combinedResult.setValue(b && 
combinedResult.getValue()));
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
index 31e637a0099..e89f86e7c4e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
@@ -122,6 +122,7 @@ public class HFileInfo implements SortedMap 
{
 
   private FixedFileTrailer trailer;
   private HFileContext hfileContext;
+  private boolean initialized = false;
 
   public HFileInfo() {
 super();
@@ -363,6 +364,10 @@ public class HFileInfo implements SortedMap {
* should be called after initTrailerAndContext
*/
   public void initMetaAndIndex(HFile.Reader reader) throws IOException {
+if (initialized) {
+  return;
+}
+
 ReaderContext context = reader.

(hbase) 02/05: HBASE-28505 Implement enforcement to require Date Tiered Compaction for Time Range Data Tiering (#5809)

2024-05-20 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 99c412fe156158004f4fd09eb8c122cf7765b9ac
Author: vinayak hegde 
AuthorDate: Fri Apr 12 14:54:37 2024 +0530

HBASE-28505 Implement enforcement to require Date Tiered Compaction for 
Time Range Data Tiering (#5809)


Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DateTieredStoreEngine.java  |  3 ++
 .../hadoop/hbase/util/TableDescriptorChecker.java  | 36 +
 .../hbase/client/TestIllegalTableDescriptor.java   | 45 ++
 3 files changed, 84 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
index ded6564bce5..26437ab1124 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
@@ -41,6 +41,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DateTieredStoreEngine extends StoreEngine {
+
+  public static final String DATE_TIERED_STORE_ENGINE = 
DateTieredStoreEngine.class.getName();
+
   @Override
   public boolean needsCompaction(List filesCompacting) {
 return compactionPolicy.needsCompaction(storeFileManager.getStoreFiles(), 
filesCompacting);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
index 94e2e4bbfa0..471583b32b7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import static 
org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine.DATE_TIERED_STORE_ENGINE;
+
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompoundConfiguration;
@@ -28,10 +30,13 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.fs.ErasureCodingUtils;
+import org.apache.hadoop.hbase.regionserver.DataTieringManager;
+import org.apache.hadoop.hbase.regionserver.DataTieringType;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.StoreEngine;
 import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
 import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -191,6 +196,8 @@ public final class TableDescriptorChecker {
 
   // check in-memory compaction
   warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction);
+
+  checkDateTieredCompactionForTimeRangeDataTiering(conf, td);
 }
   }
 
@@ -210,6 +217,35 @@ public final class TableDescriptorChecker {
 });
   }
 
+  private static void checkDateTieredCompactionForTimeRangeDataTiering(final 
Configuration conf,
+final TableDescriptor td) throws IOException {
+// Table level configurations
+checkDateTieredCompactionForTimeRangeDataTiering(conf);
+for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
+  // Column family level configurations
+  Configuration cfdConf =
+new 
CompoundConfiguration().add(conf).addStringMap(cfd.getConfiguration());
+  checkDateTieredCompactionForTimeRangeDataTiering(cfdConf);
+}
+  }
+
+  private static void checkDateTieredCompactionForTimeRangeDataTiering(final 
Configuration conf)
+throws IOException {
+final String errorMessage =
+  "Time Range Data Tiering should be enabled with Date Tiered Compaction.";
+
+warnOrThrowExceptionForFailure(false, () -> {
+
+  // Determine whether Date Tiered Compaction will be enabled when Time 
Range Data Tiering is
+  // enabled after the configuration change.
+  if 
(DataTieringType.TIME_RANGE.name().equals(conf.get(DataTieringManager.DATATIERING_KEY)))
 {
+if 
(!DATE_TIERED_STORE_ENGINE.equals(conf.get(StoreEngine.STORE_ENGINE_CLASS_KEY)))
 {
+  throw new IllegalArgumentException(errorMessage);
+}
+  }
+});
+  }
+
   private static void checkCompactionPolicy(final Configuration conf, final 
TableDescriptor td)
 throws IOException {
 warnOrThrowExceptionFor

(hbase) 01/05: HBASE-28465 Implementation of framework for time-based priority bucket-cache (#5793)

2024-05-20 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit a1fb5928de141fce2a6573a20f319fc42c62b435
Author: vinayak hegde 
AuthorDate: Mon Apr 8 20:54:19 2024 +0530

HBASE-28465 Implementation of framework for time-based priority 
bucket-cache (#5793)

Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DataTieringException.java   |  27 ++
 .../hbase/regionserver/DataTieringManager.java | 222 
 .../hadoop/hbase/regionserver/DataTieringType.java |  26 ++
 .../hadoop/hbase/regionserver/HRegionServer.java   |   1 +
 .../hbase/regionserver/TestDataTieringManager.java | 389 +
 5 files changed, 665 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
new file mode 100644
index 000..8d356422f6e
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class DataTieringException extends Exception {
+  DataTieringException(String reason) {
+super(reason);
+  }
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
new file mode 100644
index 000..0bc04ddc428
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.OptionalLong;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The DataTieringManager class categorizes data into hot data and cold data 
based on the specified
+ * {@link DataTieringType} when DataTiering is enabled. DataTiering is 
disabled by default with
+ * {@link DataTieringType} set to {@link DataTieringType#NONE}. The {@link 
DataTieringType}
+ * determines the logic for distinguishing data into hot or cold. By default, 
all data is considered
+ * as hot.
+ */
+@InterfaceAudience.Private
+public class DataTieringManager {
+  private static final Logger LOG = 
LoggerFactory.getLogger(DataTieringManager.class);
+  public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type";
+  public static final String DATATIERING_HOT_DATA_AGE_KEY =
+"hbase.hstore.datatiering.hot.age.millis";
+  public static final DataTieringType DEFAULT_DATATIERING = 
DataTieringType.NONE;
+  public static final long DEFAULT_DATATIERING_HOT_DATA_AGE = 7 * 24 * 60 * 60 
* 1000; // 7 Days
+  private static Dat

(hbase) 05/05: HBASE-28535: Add a region-server wide key to enable data-tiering. (#5856)

2024-05-20 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit c3923459efc8d631f9f3e42f12ac3b8653827c9b
Author: jhungund <106576553+jhung...@users.noreply.github.com>
AuthorDate: Thu May 2 13:54:33 2024 +0530

HBASE-28535: Add a region-server wide key to enable data-tiering. (#5856)

Signed-off-by: Wellington Chevreuil 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 20 ++---
 .../hbase/regionserver/DataTieringManager.java | 32 +---
 .../hadoop/hbase/regionserver/HRegionServer.java   |  5 +-
 .../hbase/regionserver/TestDataTieringManager.java | 91 +++---
 4 files changed, 114 insertions(+), 34 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 5a9c7795a33..0b53d047990 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -985,11 +985,10 @@ public class BucketCache implements BlockCache, HeapSize {
 
   // Check the list of files to determine the cold files which can be 
readily evicted.
   Map coldFiles = null;
-  try {
-DataTieringManager dataTieringManager = 
DataTieringManager.getInstance();
+
+  DataTieringManager dataTieringManager = DataTieringManager.getInstance();
+  if (dataTieringManager != null) {
 coldFiles = dataTieringManager.getColdFilesList();
-  } catch (IllegalStateException e) {
-LOG.warn("Data Tiering Manager is not set. Ignore time-based block 
evictions.");
   }
   // Scan entire map putting bucket entry into appropriate bucket entry
   // group
@@ -2195,16 +2194,11 @@ public class BucketCache implements BlockCache, 
HeapSize {
   @Override
   public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
 String fileName = hFileInfo.getHFileContext().getHFileName();
-try {
-  DataTieringManager dataTieringManager = DataTieringManager.getInstance();
-  if (!dataTieringManager.isHotData(hFileInfo, conf)) {
-LOG.debug("Data tiering is enabled for file: '{}' and it is not hot 
data", fileName);
-return Optional.of(false);
-  }
-} catch (IllegalStateException e) {
-  LOG.error("Error while getting DataTieringManager instance: {}", 
e.getMessage());
+DataTieringManager dataTieringManager = DataTieringManager.getInstance();
+if (dataTieringManager != null && !dataTieringManager.isHotData(hFileInfo, 
conf)) {
+  LOG.debug("Data tiering is enabled for file: '{}' and it is not hot 
data", fileName);
+  return Optional.of(false);
 }
-
 // if we don't have the file in fullyCachedFiles, we should cache it
 return Optional.of(!fullyCachedFiles.containsKey(fileName));
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
index 6c699e77c2f..952b4d4938d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -45,6 +45,9 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Private
 public class DataTieringManager {
   private static final Logger LOG = 
LoggerFactory.getLogger(DataTieringManager.class);
+  public static final String GLOBAL_DATA_TIERING_ENABLED_KEY =
+"hbase.regionserver.datatiering.enable";
+  public static final boolean DEFAULT_GLOBAL_DATA_TIERING_ENABLED = false; // 
disabled by default
   public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type";
   public static final String DATATIERING_HOT_DATA_AGE_KEY =
 "hbase.hstore.datatiering.hot.age.millis";
@@ -58,28 +61,29 @@ public class DataTieringManager {
   }
 
   /**
-   * Initializes the DataTieringManager instance with the provided map of 
online regions.
+   * Initializes the DataTieringManager instance with the provided map of 
online regions, only if
+   * the configuration "hbase.regionserver.datatiering.enable" is enabled.
+   * @param conf  Configuration object.
* @param onlineRegions A map containing online regions.
+   * @return True if the instance is instantiated successfully, false 
otherwise.
*/
-  public static synchronized void instantiate(Map 
onlineRegions) {
-if (instance == null) {
+  public static synchronized boolean instantiate(Configuration conf,
+Map onlineRegions) {
+if (isDataTieringFeatureEnabled(conf) && instance == null) {
   instance = new DataTieringManage

(hbase) branch HBASE-28463 updated (4dee5324aeb -> c3923459efc)

2024-05-20 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


omit 4dee5324aeb HBASE-28535: Add a region-server wide key to enable 
data-tiering. (#5856)
omit f890281bf90 HBASE-28468: Integrate the data-tiering logic into cache 
evictions. (#5829)
omit 84fc9acc224 HBASE-28466 Integration of time-based priority logic of 
bucket cache in prefetch functionality of HBase (#5808)
omit 9572f7d8d87 HBASE-28505 Implement enforcement to require Date Tiered 
Compaction for Time Range Data Tiering (#5809)
omit faada98341f HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)
 add 4a9f6f2fe7b HBASE-28518 Allow specifying a filter for the REST 
multiget endpoint (addendum: add back SCAN_FILTER constant) (#5852)
 add 4b49e53a91b HBASE-28436 Addendum fix naming issue (#5855)
 add 8828e45b610 HBASE-28512 Update error prone to 2.26.1 (#5838)
 add 52e0a8cdaf6 HBASE-28552 Bump up bouncycastle dependency from 1.76 to 
1.78 (#5854)
 add ba099131ca1 HBASE-28405 Fix failed procedure rollback when region was 
not closed and is still in state merging (#5799)
 add d493e2c1827 HBASE-28482 Reverse scan with tags throws 
ArrayIndexOutOfBoundsException with DBE in setCurrentBlock flow (#5792)
 add 4230c42b402 HBASE-28554 TestZooKeeperScanPolicyObserver and 
TestAdminShell fail 100% of times on flaky dashboard (#5859)
 add 0a4daab5393 HBASE-28523 Use a single get call in REST multiget 
endpoint (#5862)
 add 8a2f3ef7930 HBASE-28533 On split procedure rollback revert parent 
region state back to OPEN (#5863)
 add e9ced397269 HBASE-28558 Fix constructors for sub classes of Connection 
(#5861)
 add 3d66866f416 HBASE-28521 Use standard ConnectionRegistry and Client API 
to get region server list in in replication (#5825)
 add 339d7adfcf8 HBASE-28479 Change the deprecation cycle for 
HasMasterServices and HasRegionServerServices (#5872)
 add 708882c6512 HBASE-28480 Remove deprecated methods in 
RegionCoprocessorHost for 3.0.0 (#5873)
 add ce113dd3f4e HBASE-28567 Race condition causes MetaRegionLocationCache 
to never set watcher to populate meta location (#5874)
 add 917f2f1ec0c HBASE-28459 HFileOutputFormat2 ClassCastException with s3 
magic committer (#5851)
 add bcd6205f9d0 HBASE-28566 Remove ZKDataMigrator (#5875)
 add 156e430dc56 HBASE-28556 Reduce memory copying in Rest server when 
serializing CellModel to Protobuf (#5870)
 add c2ea9a1c8d1 HBASE-28574 Bump jinja2 from 3.1.3 to 3.1.4 in 
/dev-support/flaky-tests (#5879)
 add c4f01ede674 HBASE-28571 Remove deprecated methods map reduce utils 
(#5878)
 add f750de2b112 HBASE-28570 Remove deprecated fields in HBTU (#5877)
 add 2a7aa0d439d HBASE-28575 Always printing error log when snapshot table 
(#5880)
 add 23fa363d360 HBASE-28563 Closing ZooKeeper in ZKMainServer (#5869)
 add 328df6abf3e HBASE-28576 Remove FirstKeyValueMatchingQualifiersFilter 
(#5891)
 add ca340100535 HBASE-28581 Remove deprecated methods in 
TableDescriotorBuilder (#5892)
 add d1fc87eb1c3 HBASE-28502 Cleanup old backup manifest logic (#5871)
 add ad88ed3aaca HBASE-27938 PE load any custom implementation of tests at 
runtime (#5307)
 add 716adf50e90 HBASE-28501 Support non-SPNEGO authentication methods and 
implement session handling in REST java client library (#5881)
 add 00f078a05ef HBASE-25972 Dual File Compaction (#5545)
 add 3a3dd66e21d HBASE-28568 Incremental backup set does not correctly 
shrink (#5876)
 add 0db26eccdfc HBASE-28572 Remove deprecated methods in thrift module 
(#5882)
 add 2dbbcdf8493 HBASE-28578 Remove deprecated methods in HFileScanner 
(#5885)
 add b260199882b HBASE-28579 Hide HFileScanner related methods in 
StoreFileReader (#5889)
 add 6c84d3960ff HBASE-26048 [JDK17] Replace the usage of deprecated API 
ThreadGroup.destroy() (#5913)
 add b4c271253a9 HBASE-28568 Incremental backup set does not correctly 
shrink (addendum) (#5917)
 add d4b0e18a5e8 HBASE-28236 Add 2.6.0 to downloads page (#5919)
 add a3ff01d890b HBASE-28595: check seq id of scan RPCs for closed scanners 
(#5910)
 add 6b7aaed6009 HBASE-28604 Fix the error message in ReservoirSample's 
constructor (#5920)
 add 6b3f5ae1fc1 HBASE-28536 Fix `Disable Stripe Compaction` run error in 
document (#5836)
 add dba7dccb65d HBASE-28599 RowTooBigException is thrown when duplicate 
increment RPC call is attempted (#5927)
 add 2dc7e1523b0 HBASE-28547 Support specifying connection configuration 
through queries of the connection uri (#5853)
 new a1fb5928de1 HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)
 new 99c412fe156 HBASE-28505 Implement enforcement to require Date Tiered 
Compaction for Time Range Data Tiering (#5809)
 new 84b3f1934cd HBASE-28466 Integration of time

(hbase) branch HBASE-28463 updated: HBASE-28535: Add a region-server wide key to enable data-tiering. (#5856)

2024-05-02 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-28463 by this push:
 new 4dee5324aeb HBASE-28535: Add a region-server wide key to enable 
data-tiering. (#5856)
4dee5324aeb is described below

commit 4dee5324aebf9dcc1d9d1648b15e1d4ef6964778
Author: jhungund <106576553+jhung...@users.noreply.github.com>
AuthorDate: Thu May 2 13:54:33 2024 +0530

HBASE-28535: Add a region-server wide key to enable data-tiering. (#5856)

Signed-off-by: Wellington Chevreuil 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 20 ++---
 .../hbase/regionserver/DataTieringManager.java | 32 +---
 .../hadoop/hbase/regionserver/HRegionServer.java   |  5 +-
 .../hbase/regionserver/TestDataTieringManager.java | 91 +++---
 4 files changed, 114 insertions(+), 34 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 5a9c7795a33..0b53d047990 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -985,11 +985,10 @@ public class BucketCache implements BlockCache, HeapSize {
 
   // Check the list of files to determine the cold files which can be 
readily evicted.
   Map coldFiles = null;
-  try {
-DataTieringManager dataTieringManager = 
DataTieringManager.getInstance();
+
+  DataTieringManager dataTieringManager = DataTieringManager.getInstance();
+  if (dataTieringManager != null) {
 coldFiles = dataTieringManager.getColdFilesList();
-  } catch (IllegalStateException e) {
-LOG.warn("Data Tiering Manager is not set. Ignore time-based block 
evictions.");
   }
   // Scan entire map putting bucket entry into appropriate bucket entry
   // group
@@ -2195,16 +2194,11 @@ public class BucketCache implements BlockCache, 
HeapSize {
   @Override
   public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
 String fileName = hFileInfo.getHFileContext().getHFileName();
-try {
-  DataTieringManager dataTieringManager = DataTieringManager.getInstance();
-  if (!dataTieringManager.isHotData(hFileInfo, conf)) {
-LOG.debug("Data tiering is enabled for file: '{}' and it is not hot 
data", fileName);
-return Optional.of(false);
-  }
-} catch (IllegalStateException e) {
-  LOG.error("Error while getting DataTieringManager instance: {}", 
e.getMessage());
+DataTieringManager dataTieringManager = DataTieringManager.getInstance();
+if (dataTieringManager != null && !dataTieringManager.isHotData(hFileInfo, 
conf)) {
+  LOG.debug("Data tiering is enabled for file: '{}' and it is not hot 
data", fileName);
+  return Optional.of(false);
 }
-
 // if we don't have the file in fullyCachedFiles, we should cache it
 return Optional.of(!fullyCachedFiles.containsKey(fileName));
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
index 6c699e77c2f..952b4d4938d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -45,6 +45,9 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Private
 public class DataTieringManager {
   private static final Logger LOG = 
LoggerFactory.getLogger(DataTieringManager.class);
+  public static final String GLOBAL_DATA_TIERING_ENABLED_KEY =
+"hbase.regionserver.datatiering.enable";
+  public static final boolean DEFAULT_GLOBAL_DATA_TIERING_ENABLED = false; // 
disabled by default
   public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type";
   public static final String DATATIERING_HOT_DATA_AGE_KEY =
 "hbase.hstore.datatiering.hot.age.millis";
@@ -58,28 +61,29 @@ public class DataTieringManager {
   }
 
   /**
-   * Initializes the DataTieringManager instance with the provided map of 
online regions.
+   * Initializes the DataTieringManager instance with the provided map of 
online regions, only if
+   * the configuration "hbase.regionserver.datatiering.enable" is enabled.
+   * @param conf  Configuration object.
* @param onlineRegions A map containing online regions.
+   * @return True if the instance is instantiated successfully, false 
otherwise.
*/
-  public static synchronized void instantiate(Map 
onlineRegions) {
-if (instance == null) 

(hbase) branch HBASE-28463 updated (bfef12af56d -> f890281bf90)

2024-04-25 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


 discard bfef12af56d HBASE-28468: Integrate the data-tiering logic into cache 
evictions. (#5829)
omit a2321ce9d82 HBASE-28466 Integration of time-based priority logic of 
bucket cache in prefetch functionality of HBase (#5808)
omit c1ea6520f3c HBASE-28505 Implement enforcement to require Date Tiered 
Compaction for Time Range Data Tiering (#5809)
omit 481a77da447 HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)
 add e3761baec11 HBASE-28436 Use connection url to specify the connection 
registry information (#5770)
 add 6c6e776eea6 HBASE-28529 Use ZKClientConfig instead of system 
properties when setting zookeeper configurations (#5835)
 add 7122da5978a HBASE-28517 Make properties dynamically configured (#5823)
 new faada98341f HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)
 new 9572f7d8d87 HBASE-28505 Implement enforcement to require Date Tiered 
Compaction for Time Range Data Tiering (#5809)
 new 84fc9acc224 HBASE-28466 Integration of time-based priority logic of 
bucket cache in prefetch functionality of HBase (#5808)
 new f890281bf90 HBASE-28468: Integrate the data-tiering logic into cache 
evictions. (#5829)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (bfef12af56d)
\
 N -- N -- N   refs/heads/HBASE-28463 (f890281bf90)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 hbase-client/pom.xml   |   5 +
 .../hadoop/hbase/client/ConnectionFactory.java | 313 +
 .../hbase/client/ConnectionRegistryFactory.java|  64 -
 .../hbase/client/ConnectionRegistryURIFactory.java |  26 +-
 .../client/RpcConnectionRegistryCreator.java}  |  40 +--
 .../hbase/client/ZKConnectionRegistryCreator.java} |  43 ++-
 .../hadoop/hbase/zookeeper/ReadOnlyZKClient.java   |  13 +-
 ...adoop.hbase.client.ConnectionRegistryURIFactory |   3 +-
 .../TestConnectionRegistryCreatorUriParsing.java   | 157 +++
 .../apache/hadoop/hbase/zookeeper/ZKConfig.java|  29 +-
 .../hadoop/hbase/zookeeper/TestZKConfig.java   |  47 +---
 .../hbase/client/ClusterConnectionFactory.java |   2 +-
 .../apache/hadoop/hbase/io/hfile/CacheConfig.java  |  21 +-
 .../apache/hadoop/hbase/regionserver/HStore.java   |   5 +-
 .../hbase/client/AbstractTestRegionLocator.java|   2 +-
 .../client/TestAsyncAdminWithRegionReplicas.java   |   2 +-
 .../hbase/client/TestAsyncMetaRegionLocator.java   |   3 +-
 .../client/TestAsyncNonMetaRegionLocator.java  |   2 +-
 ...stAsyncNonMetaRegionLocatorConcurrenyLimit.java |   2 +-
 .../hbase/client/TestAsyncRegionLocator.java   |   2 +-
 .../TestAsyncSingleRequestRpcRetryingCaller.java   |   2 +-
 .../client/TestAsyncTableUseMetaReplicas.java  |   3 +-
 ...ReadWriteWithDifferentConnectionRegistries.java | 177 
 ...estCatalogReplicaLoadBalanceSimpleSelector.java |   3 +-
 .../hbase/client/TestMetaRegionLocationCache.java  |   3 +-
 .../hadoop/hbase/regionserver/TestHStore.java  |  20 ++
 .../hbase/zookeeper/RecoverableZooKeeper.java  |  75 +++--
 .../apache/hadoop/hbase/zookeeper/ZKWatcher.java   |   4 +-
 .../hbase/zookeeper/TestRecoverableZooKeeper.java  |   2 +-
 29 files changed, 820 insertions(+), 250 deletions(-)
 copy 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NoRegionSplitRestriction.java
 => 
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryURIFactory.java
 (63%)
 copy 
hbase-client/src/{test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
 => main/java/org/apache/hadoop/hbase/client/RpcConnectionRegistryCreator.java} 
(55%)
 copy 
hbase-client/src/{test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
 => main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistryCreator.java} 
(52%)
 copy 
hbase-it/src/test/resources/META-INF/services/javax.ws.rs.client.ClientBuilder 
=>

(hbase) 01/04: HBASE-28465 Implementation of framework for time-based priority bucket-cache (#5793)

2024-04-25 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit faada98341f0004aa6bc7f1f56b5e968df8d95c0
Author: vinayak hegde 
AuthorDate: Mon Apr 8 20:54:19 2024 +0530

HBASE-28465 Implementation of framework for time-based priority 
bucket-cache (#5793)

Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DataTieringException.java   |  27 ++
 .../hbase/regionserver/DataTieringManager.java | 222 
 .../hadoop/hbase/regionserver/DataTieringType.java |  26 ++
 .../hadoop/hbase/regionserver/HRegionServer.java   |   1 +
 .../hbase/regionserver/TestDataTieringManager.java | 389 +
 5 files changed, 665 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
new file mode 100644
index 000..8d356422f6e
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class DataTieringException extends Exception {
+  DataTieringException(String reason) {
+super(reason);
+  }
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
new file mode 100644
index 000..0bc04ddc428
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.OptionalLong;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The DataTieringManager class categorizes data into hot data and cold data 
based on the specified
+ * {@link DataTieringType} when DataTiering is enabled. DataTiering is 
disabled by default with
+ * {@link DataTieringType} set to {@link DataTieringType#NONE}. The {@link 
DataTieringType}
+ * determines the logic for distinguishing data into hot or cold. By default, 
all data is considered
+ * as hot.
+ */
+@InterfaceAudience.Private
+public class DataTieringManager {
+  private static final Logger LOG = 
LoggerFactory.getLogger(DataTieringManager.class);
+  public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type";
+  public static final String DATATIERING_HOT_DATA_AGE_KEY =
+"hbase.hstore.datatiering.hot.age.millis";
+  public static final DataTieringType DEFAULT_DATATIERING = 
DataTieringType.NONE;
+  public static final long DEFAULT_DATATIERING_HOT_DATA_AGE = 7 * 24 * 60 * 60 
* 1000; // 7 Days
+  private static Dat

(hbase) 02/04: HBASE-28505 Implement enforcement to require Date Tiered Compaction for Time Range Data Tiering (#5809)

2024-04-25 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 9572f7d8d87fde9b61ac2c7b54d75a0b3328bf4c
Author: vinayak hegde 
AuthorDate: Fri Apr 12 14:54:37 2024 +0530

HBASE-28505 Implement enforcement to require Date Tiered Compaction for 
Time Range Data Tiering (#5809)


Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DateTieredStoreEngine.java  |  3 ++
 .../hadoop/hbase/util/TableDescriptorChecker.java  | 36 +
 .../hbase/client/TestIllegalTableDescriptor.java   | 45 ++
 3 files changed, 84 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
index d15a6c92ef0..8fdbb6035ae 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
@@ -41,6 +41,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DateTieredStoreEngine extends StoreEngine {
+
+  public static final String DATE_TIERED_STORE_ENGINE = 
DateTieredStoreEngine.class.getName();
+
   @Override
   public boolean needsCompaction(List filesCompacting) {
 return compactionPolicy.needsCompaction(storeFileManager.getStorefiles(), 
filesCompacting);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
index 94e2e4bbfa0..471583b32b7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import static 
org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine.DATE_TIERED_STORE_ENGINE;
+
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompoundConfiguration;
@@ -28,10 +30,13 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.fs.ErasureCodingUtils;
+import org.apache.hadoop.hbase.regionserver.DataTieringManager;
+import org.apache.hadoop.hbase.regionserver.DataTieringType;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.StoreEngine;
 import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
 import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -191,6 +196,8 @@ public final class TableDescriptorChecker {
 
   // check in-memory compaction
   warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction);
+
+  checkDateTieredCompactionForTimeRangeDataTiering(conf, td);
 }
   }
 
@@ -210,6 +217,35 @@ public final class TableDescriptorChecker {
 });
   }
 
+  private static void checkDateTieredCompactionForTimeRangeDataTiering(final 
Configuration conf,
+final TableDescriptor td) throws IOException {
+// Table level configurations
+checkDateTieredCompactionForTimeRangeDataTiering(conf);
+for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
+  // Column family level configurations
+  Configuration cfdConf =
+new 
CompoundConfiguration().add(conf).addStringMap(cfd.getConfiguration());
+  checkDateTieredCompactionForTimeRangeDataTiering(cfdConf);
+}
+  }
+
+  private static void checkDateTieredCompactionForTimeRangeDataTiering(final 
Configuration conf)
+throws IOException {
+final String errorMessage =
+  "Time Range Data Tiering should be enabled with Date Tiered Compaction.";
+
+warnOrThrowExceptionForFailure(false, () -> {
+
+  // Determine whether Date Tiered Compaction will be enabled when Time 
Range Data Tiering is
+  // enabled after the configuration change.
+  if 
(DataTieringType.TIME_RANGE.name().equals(conf.get(DataTieringManager.DATATIERING_KEY)))
 {
+if 
(!DATE_TIERED_STORE_ENGINE.equals(conf.get(StoreEngine.STORE_ENGINE_CLASS_KEY)))
 {
+  throw new IllegalArgumentException(errorMessage);
+}
+  }
+});
+  }
+
   private static void checkCompactionPolicy(final Configuration conf, final 
TableDescriptor td)
 throws IOException {
 warnOrThrowExceptionFor

(hbase) 04/04: HBASE-28468: Integrate the data-tiering logic into cache evictions. (#5829)

2024-04-25 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit f890281bf9096b10ef87ad73a6206f51507b1fdc
Author: jhungund <106576553+jhung...@users.noreply.github.com>
AuthorDate: Thu Apr 25 15:29:36 2024 +0530

HBASE-28468: Integrate the data-tiering logic into cache evictions. (#5829)

Signed-off-by: Wellington Chevreuil 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  45 +-
 .../hbase/regionserver/DataTieringManager.java |  42 -
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java |   4 +-
 .../hbase/regionserver/TestDataTieringManager.java | 178 +
 4 files changed, 263 insertions(+), 6 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 622a57f91c2..5a9c7795a33 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -974,6 +974,7 @@ public class BucketCache implements BlockCache, HeapSize {
   long bytesToFreeWithExtra =
 (long) Math.floor(bytesToFreeWithoutExtra * (1 + extraFreeFactor));
 
+  long bytesFreed = 0;
   // Instantiate priority buckets
   BucketEntryGroup bucketSingle =
 new BucketEntryGroup(bytesToFreeWithExtra, blockSize, 
getPartitionSize(singleFactor));
@@ -982,9 +983,36 @@ public class BucketCache implements BlockCache, HeapSize {
   BucketEntryGroup bucketMemory =
 new BucketEntryGroup(bytesToFreeWithExtra, blockSize, 
getPartitionSize(memoryFactor));
 
+  // Check the list of files to determine the cold files which can be 
readily evicted.
+  Map coldFiles = null;
+  try {
+DataTieringManager dataTieringManager = 
DataTieringManager.getInstance();
+coldFiles = dataTieringManager.getColdFilesList();
+  } catch (IllegalStateException e) {
+LOG.warn("Data Tiering Manager is not set. Ignore time-based block 
evictions.");
+  }
   // Scan entire map putting bucket entry into appropriate bucket entry
   // group
   for (Map.Entry bucketEntryWithKey : 
backingMap.entrySet()) {
+if (
+  coldFiles != null && 
coldFiles.containsKey(bucketEntryWithKey.getKey().getHfileName())
+) {
+  int freedBlockSize = bucketEntryWithKey.getValue().getLength();
+  if (evictBlockIfNoRpcReferenced(bucketEntryWithKey.getKey())) {
+bytesFreed += freedBlockSize;
+  }
+  if (bytesFreed >= bytesToFreeWithExtra) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug(
+"Bucket cache free space completed; required: {} freed: {} 
from cold data blocks.",
+bytesToFreeWithExtra, StringUtils.byteDesc(bytesFreed));
+}
+// Sufficient bytes have been freed.
+return;
+  }
+  continue;
+}
+
 switch (bucketEntryWithKey.getValue().getPriority()) {
   case SINGLE: {
 bucketSingle.add(bucketEntryWithKey);
@@ -1001,6 +1029,21 @@ public class BucketCache implements BlockCache, HeapSize 
{
 }
   }
 
+  // Check if the cold file eviction is sufficient to create enough space.
+  bytesToFreeWithExtra -= bytesFreed;
+  if (bytesToFreeWithExtra <= 0) {
+LOG.debug("Bucket cache free space completed; freed space : {} bytes 
of cold data blocks.",
+  StringUtils.byteDesc(bytesFreed));
+return;
+  }
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug(
+  "Bucket cache free space completed; freed space : {} "
++ "bytes of cold data blocks. {} more bytes required to be freed.",
+  StringUtils.byteDesc(bytesFreed), bytesToFreeWithExtra);
+  }
+
   PriorityQueue bucketQueue =
 new PriorityQueue<>(3, 
Comparator.comparingLong(BucketEntryGroup::overflow));
 
@@ -1009,8 +1052,6 @@ public class BucketCache implements BlockCache, HeapSize {
   bucketQueue.add(bucketMemory);
 
   int remainingBuckets = bucketQueue.size();
-  long bytesFreed = 0;
-
   BucketEntryGroup bucketGroup;
   while ((bucketGroup = bucketQueue.poll()) != null) {
 long overflow = bucketGroup.overflow();
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
index dec96604774..6c699e77c2f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@

(hbase) 03/04: HBASE-28466 Integration of time-based priority logic of bucket cache in prefetch functionality of HBase (#5808)

2024-04-25 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 84fc9acc2248d000bee6bada3925fa5800a00184
Author: vinayak hegde 
AuthorDate: Mon Apr 22 15:23:30 2024 +0530

HBASE-28466 Integration of time-based priority logic of bucket cache in 
prefetch functionality of HBase (#5808)

Signed-off-by: Wellington Chevreuil 
---
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |  6 +-
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |  7 +-
 .../apache/hadoop/hbase/io/hfile/HFileInfo.java|  6 ++
 .../hadoop/hbase/io/hfile/HFilePreadReader.java|  6 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 15 +++-
 .../hbase/regionserver/DataTieringManager.java | 91 +-
 .../hbase/regionserver/TestDataTieringManager.java | 58 ++
 7 files changed, 145 insertions(+), 44 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index bed0194b1fa..ac83af1053a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io.hfile;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Optional;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -197,11 +198,12 @@ public interface BlockCache extends Iterable 
{
* overridden by all implementing classes. In such cases, the returned 
Optional will be empty. For
* subclasses implementing this logic, the returned Optional would contain 
the boolean value
* reflecting if the passed file should indeed be cached.
-   * @param fileName to check if it should be cached.
+   * @param hFileInfo Information about the file to check if it should be 
cached.
+   * @param conf  The configuration object to use for determining caching 
behavior.
* @return empty optional if this method is not supported, otherwise the 
returned optional
* contains the boolean value informing if the file should be cached.
*/
-  default Optional shouldCacheFile(String fileName) {
+  default Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
 return Optional.empty();
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index d6692d2e2bf..b12510cdccd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -22,6 +22,7 @@ import java.util.Map;
 import java.util.Optional;
 import org.apache.commons.lang3.mutable.Mutable;
 import org.apache.commons.lang3.mutable.MutableBoolean;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
@@ -482,9 +483,9 @@ public class CombinedBlockCache implements 
ResizableBlockCache, HeapSize {
   }
 
   @Override
-  public Optional shouldCacheFile(String fileName) {
-Optional l1Result = l1Cache.shouldCacheFile(fileName);
-Optional l2Result = l2Cache.shouldCacheFile(fileName);
+  public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
+Optional l1Result = l1Cache.shouldCacheFile(hFileInfo, conf);
+Optional l2Result = l2Cache.shouldCacheFile(hFileInfo, conf);
 final Mutable combinedResult = new MutableBoolean(true);
 l1Result.ifPresent(b -> combinedResult.setValue(b && 
combinedResult.getValue()));
 l2Result.ifPresent(b -> combinedResult.setValue(b && 
combinedResult.getValue()));
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
index 31e637a0099..e89f86e7c4e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
@@ -122,6 +122,7 @@ public class HFileInfo implements SortedMap 
{
 
   private FixedFileTrailer trailer;
   private HFileContext hfileContext;
+  private boolean initialized = false;
 
   public HFileInfo() {
 super();
@@ -363,6 +364,10 @@ public class HFileInfo implements SortedMap {
* should be called after initTrailerAndContext
*/
   public void initMetaAndIndex(HFile.Reader reader) throws IOException {
+if (initialized) {
+  return;
+}
+
 ReaderContext context = reader.

(hbase) branch HBASE-28463 updated: HBASE-28468: Integrate the data-tiering logic into cache evictions. (#5829)

2024-04-25 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-28463 by this push:
 new bfef12af56d HBASE-28468: Integrate the data-tiering logic into cache 
evictions. (#5829)
bfef12af56d is described below

commit bfef12af56d6a5091dad776c5c3b4daa73172b2a
Author: jhungund <106576553+jhung...@users.noreply.github.com>
AuthorDate: Thu Apr 25 15:29:36 2024 +0530

HBASE-28468: Integrate the data-tiering logic into cache evictions. (#5829)

Signed-off-by: Wellington Chevreuil 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  45 +-
 .../hbase/regionserver/DataTieringManager.java |  42 -
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java |   4 +-
 .../hbase/regionserver/TestDataTieringManager.java | 178 +
 4 files changed, 263 insertions(+), 6 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 622a57f91c2..5a9c7795a33 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -974,6 +974,7 @@ public class BucketCache implements BlockCache, HeapSize {
   long bytesToFreeWithExtra =
 (long) Math.floor(bytesToFreeWithoutExtra * (1 + extraFreeFactor));
 
+  long bytesFreed = 0;
   // Instantiate priority buckets
   BucketEntryGroup bucketSingle =
 new BucketEntryGroup(bytesToFreeWithExtra, blockSize, 
getPartitionSize(singleFactor));
@@ -982,9 +983,36 @@ public class BucketCache implements BlockCache, HeapSize {
   BucketEntryGroup bucketMemory =
 new BucketEntryGroup(bytesToFreeWithExtra, blockSize, 
getPartitionSize(memoryFactor));
 
+  // Check the list of files to determine the cold files which can be 
readily evicted.
+  Map coldFiles = null;
+  try {
+DataTieringManager dataTieringManager = 
DataTieringManager.getInstance();
+coldFiles = dataTieringManager.getColdFilesList();
+  } catch (IllegalStateException e) {
+LOG.warn("Data Tiering Manager is not set. Ignore time-based block 
evictions.");
+  }
   // Scan entire map putting bucket entry into appropriate bucket entry
   // group
   for (Map.Entry bucketEntryWithKey : 
backingMap.entrySet()) {
+if (
+  coldFiles != null && 
coldFiles.containsKey(bucketEntryWithKey.getKey().getHfileName())
+) {
+  int freedBlockSize = bucketEntryWithKey.getValue().getLength();
+  if (evictBlockIfNoRpcReferenced(bucketEntryWithKey.getKey())) {
+bytesFreed += freedBlockSize;
+  }
+  if (bytesFreed >= bytesToFreeWithExtra) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug(
+"Bucket cache free space completed; required: {} freed: {} 
from cold data blocks.",
+bytesToFreeWithExtra, StringUtils.byteDesc(bytesFreed));
+}
+// Sufficient bytes have been freed.
+return;
+  }
+  continue;
+}
+
 switch (bucketEntryWithKey.getValue().getPriority()) {
   case SINGLE: {
 bucketSingle.add(bucketEntryWithKey);
@@ -1001,6 +1029,21 @@ public class BucketCache implements BlockCache, HeapSize 
{
 }
   }
 
+  // Check if the cold file eviction is sufficient to create enough space.
+  bytesToFreeWithExtra -= bytesFreed;
+  if (bytesToFreeWithExtra <= 0) {
+LOG.debug("Bucket cache free space completed; freed space : {} bytes 
of cold data blocks.",
+  StringUtils.byteDesc(bytesFreed));
+return;
+  }
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug(
+  "Bucket cache free space completed; freed space : {} "
++ "bytes of cold data blocks. {} more bytes required to be freed.",
+  StringUtils.byteDesc(bytesFreed), bytesToFreeWithExtra);
+  }
+
   PriorityQueue bucketQueue =
 new PriorityQueue<>(3, 
Comparator.comparingLong(BucketEntryGroup::overflow));
 
@@ -1009,8 +1052,6 @@ public class BucketCache implements BlockCache, HeapSize {
   bucketQueue.add(bucketMemory);
 
   int remainingBuckets = bucketQueue.size();
-  long bytesFreed = 0;
-
   BucketEntryGroup bucketGroup;
   while ((bucketGroup = bucketQueue.poll()) != null) {
 long overflow = bucketGroup.overflow();
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
index dec96604774..6c699e77c2f 1

(hbase) 02/03: HBASE-28505 Implement enforcement to require Date Tiered Compaction for Time Range Data Tiering (#5809)

2024-04-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit c1ea6520f3cc63ab78b25031998d53f5eed3d676
Author: vinayak hegde 
AuthorDate: Fri Apr 12 14:54:37 2024 +0530

HBASE-28505 Implement enforcement to require Date Tiered Compaction for 
Time Range Data Tiering (#5809)


Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DateTieredStoreEngine.java  |  3 ++
 .../hadoop/hbase/util/TableDescriptorChecker.java  | 36 +
 .../hbase/client/TestIllegalTableDescriptor.java   | 45 ++
 3 files changed, 84 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
index d15a6c92ef0..8fdbb6035ae 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
@@ -41,6 +41,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DateTieredStoreEngine extends StoreEngine {
+
+  public static final String DATE_TIERED_STORE_ENGINE = 
DateTieredStoreEngine.class.getName();
+
   @Override
   public boolean needsCompaction(List filesCompacting) {
 return compactionPolicy.needsCompaction(storeFileManager.getStorefiles(), 
filesCompacting);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
index 94e2e4bbfa0..471583b32b7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import static 
org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine.DATE_TIERED_STORE_ENGINE;
+
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompoundConfiguration;
@@ -28,10 +30,13 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.fs.ErasureCodingUtils;
+import org.apache.hadoop.hbase.regionserver.DataTieringManager;
+import org.apache.hadoop.hbase.regionserver.DataTieringType;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.StoreEngine;
 import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
 import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -191,6 +196,8 @@ public final class TableDescriptorChecker {
 
   // check in-memory compaction
   warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction);
+
+  checkDateTieredCompactionForTimeRangeDataTiering(conf, td);
 }
   }
 
@@ -210,6 +217,35 @@ public final class TableDescriptorChecker {
 });
   }
 
+  private static void checkDateTieredCompactionForTimeRangeDataTiering(final 
Configuration conf,
+final TableDescriptor td) throws IOException {
+// Table level configurations
+checkDateTieredCompactionForTimeRangeDataTiering(conf);
+for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
+  // Column family level configurations
+  Configuration cfdConf =
+new 
CompoundConfiguration().add(conf).addStringMap(cfd.getConfiguration());
+  checkDateTieredCompactionForTimeRangeDataTiering(cfdConf);
+}
+  }
+
+  private static void checkDateTieredCompactionForTimeRangeDataTiering(final 
Configuration conf)
+throws IOException {
+final String errorMessage =
+  "Time Range Data Tiering should be enabled with Date Tiered Compaction.";
+
+warnOrThrowExceptionForFailure(false, () -> {
+
+  // Determine whether Date Tiered Compaction will be enabled when Time 
Range Data Tiering is
+  // enabled after the configuration change.
+  if 
(DataTieringType.TIME_RANGE.name().equals(conf.get(DataTieringManager.DATATIERING_KEY)))
 {
+if 
(!DATE_TIERED_STORE_ENGINE.equals(conf.get(StoreEngine.STORE_ENGINE_CLASS_KEY)))
 {
+  throw new IllegalArgumentException(errorMessage);
+}
+  }
+});
+  }
+
   private static void checkCompactionPolicy(final Configuration conf, final 
TableDescriptor td)
 throws IOException {
 warnOrThrowExceptionFor

(hbase) branch HBASE-28463 updated (25f13b6f45d -> a2321ce9d82)

2024-04-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


 discard 25f13b6f45d HBASE-28466 Integration of time-based priority logic of 
bucket cache in prefetch functionality of HBase (#5808)
omit b7bb8b9d27a HBASE-28505 Implement enforcement to require Date Tiered 
Compaction for Time Range Data Tiering (#5809)
omit fe285bcc2f6 HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)
 add 5a8ae130552 HBASE-28516 Bump idna from 2.8 to 3.7 in 
/dev-support/git-jira-release-audit (#5821)
 add 4b7a39a229d HBASE-28504 Implement eviction logic for scanners in Rest 
APIs to prevent scanner leakage (#5802)
 add 4ed17c7117a HBASE-28518 Allow specifying a filter for the REST 
multiget endpoint
 add 4c29c5d86bd HBASE-28500 Rest Java client library assumes stateless 
servers (#5804)
 add 16e9affca37 HBASE-28292 Make Delay prefetch property to be dynamically 
configured (#5605)
 add d7f9ba65487 HBASE-28500 Rest Java client library assumes stateless 
servers (Addendum:fix spotbugs warning) (#5831)
 add a34b4bc5171 HBASE-28470 Fix typo in Java method comment (#5786)
 add bc37ce87b03 HBASE-28509 ScanResumer.resume would perform unnecessary 
scan when cl… (#5817)
 add 3539581268f HBASE-28511 Update hbase-thirdparty to 4.1.7 (#5818)
 add aaeef2db6ac HBASE-28215 CreateTableProcedure and DeleteTableProcedure 
should sleep a while before retrying (#5502)
 add 8b5ccda02f8 HASE-28414 create-release should spotless:apply after 
making any file changes (#5824)
 add c73f8b51cba HBASE-28497 Missing fields in Get.toJSON (#5800)
 add 8936a19b933 Revert "HBASE-28215 CreateTableProcedure and 
DeleteTableProcedure should sleep a while before retrying (#5502)"
 add 5a404c49504 HBASE-28150 CreateTableProcedure and DeleteTableProcedure 
should sleep a while before retrying (#5502)
 new 481a77da447 HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)
 new c1ea6520f3c HBASE-28505 Implement enforcement to require Date Tiered 
Compaction for Time Range Data Tiering (#5809)
 new a2321ce9d82 HBASE-28466 Integration of time-based priority logic of 
bucket cache in prefetch functionality of HBase (#5808)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (25f13b6f45d)
\
 N -- N -- N   refs/heads/HBASE-28463 (a2321ce9d82)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 dev-support/create-release/release-build.sh|  6 +-
 dev-support/create-release/release-util.sh | 11 +++
 .../git-jira-release-audit/requirements.txt|  2 +-
 .../java/org/apache/hadoop/hbase/client/Admin.java |  2 +-
 .../AsyncScanSingleRegionRpcRetryingCaller.java| 37 +++--
 .../hbase/client/AsyncTableResultScanner.java  | 24 +-
 .../java/org/apache/hadoop/hbase/client/Get.java   | 21 +
 .../apache/hadoop/hbase/filter/ParseFilter.java|  2 +-
 .../hadoop/hbase/security/EncryptionUtil.java  |  4 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  2 +-
 .../apache/hadoop/hbase/client/TestOperation.java  | 72 +
 hbase-examples/pom.xml |  2 +-
 hbase-protocol-shaded/pom.xml  |  2 +-
 hbase-rest/pom.xml |  4 +
 .../org/apache/hadoop/hbase/rest/Constants.java|  9 ++-
 .../apache/hadoop/hbase/rest/MultiRowResource.java | 31 +++-
 .../apache/hadoop/hbase/rest/ScannerResource.java  | 38 +++--
 .../apache/hadoop/hbase/rest/TableResource.java| 29 +--
 .../apache/hadoop/hbase/rest/client/Client.java| 49 ++--
 .../hadoop/hbase/rest/TestMultiRowResource.java| 81 +---
 .../apache/hadoop/hbase/rest/TestTableScan.java| 41 --
 .../hadoop/hbase/io/hfile/FixedFileTrailer.java|  2 +-
 .../org/apache/hadoop/hbase/io/hfile/HFile.java|  2 +
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  9 +++
 .../hadoop/hbase/io/hfile/PrefetchExecutor.java| 89 --
 .../master/procedure/Cre

(hbase) 03/03: HBASE-28466 Integration of time-based priority logic of bucket cache in prefetch functionality of HBase (#5808)

2024-04-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit a2321ce9d824e76ad053cfe915880f83300936a0
Author: vinayak hegde 
AuthorDate: Mon Apr 22 15:23:30 2024 +0530

HBASE-28466 Integration of time-based priority logic of bucket cache in 
prefetch functionality of HBase (#5808)

Signed-off-by: Wellington Chevreuil 
---
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |  6 +-
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |  7 +-
 .../apache/hadoop/hbase/io/hfile/HFileInfo.java|  6 ++
 .../hadoop/hbase/io/hfile/HFilePreadReader.java|  6 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 15 +++-
 .../hbase/regionserver/DataTieringManager.java | 91 +-
 .../hbase/regionserver/TestDataTieringManager.java | 58 ++
 7 files changed, 145 insertions(+), 44 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index bed0194b1fa..ac83af1053a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io.hfile;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Optional;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -197,11 +198,12 @@ public interface BlockCache extends Iterable 
{
* overridden by all implementing classes. In such cases, the returned 
Optional will be empty. For
* subclasses implementing this logic, the returned Optional would contain 
the boolean value
* reflecting if the passed file should indeed be cached.
-   * @param fileName to check if it should be cached.
+   * @param hFileInfo Information about the file to check if it should be 
cached.
+   * @param conf  The configuration object to use for determining caching 
behavior.
* @return empty optional if this method is not supported, otherwise the 
returned optional
* contains the boolean value informing if the file should be cached.
*/
-  default Optional shouldCacheFile(String fileName) {
+  default Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
 return Optional.empty();
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index d6692d2e2bf..b12510cdccd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -22,6 +22,7 @@ import java.util.Map;
 import java.util.Optional;
 import org.apache.commons.lang3.mutable.Mutable;
 import org.apache.commons.lang3.mutable.MutableBoolean;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
@@ -482,9 +483,9 @@ public class CombinedBlockCache implements 
ResizableBlockCache, HeapSize {
   }
 
   @Override
-  public Optional shouldCacheFile(String fileName) {
-Optional l1Result = l1Cache.shouldCacheFile(fileName);
-Optional l2Result = l2Cache.shouldCacheFile(fileName);
+  public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
+Optional l1Result = l1Cache.shouldCacheFile(hFileInfo, conf);
+Optional l2Result = l2Cache.shouldCacheFile(hFileInfo, conf);
 final Mutable combinedResult = new MutableBoolean(true);
 l1Result.ifPresent(b -> combinedResult.setValue(b && 
combinedResult.getValue()));
 l2Result.ifPresent(b -> combinedResult.setValue(b && 
combinedResult.getValue()));
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
index 31e637a0099..e89f86e7c4e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
@@ -122,6 +122,7 @@ public class HFileInfo implements SortedMap 
{
 
   private FixedFileTrailer trailer;
   private HFileContext hfileContext;
+  private boolean initialized = false;
 
   public HFileInfo() {
 super();
@@ -363,6 +364,10 @@ public class HFileInfo implements SortedMap {
* should be called after initTrailerAndContext
*/
   public void initMetaAndIndex(HFile.Reader reader) throws IOException {
+if (initialized) {
+  return;
+}
+
 ReaderContext context = reader.

(hbase) 01/03: HBASE-28465 Implementation of framework for time-based priority bucket-cache (#5793)

2024-04-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 481a77da447cdb13859d7e2279fe3eb519663c2d
Author: vinayak hegde 
AuthorDate: Mon Apr 8 20:54:19 2024 +0530

HBASE-28465 Implementation of framework for time-based priority 
bucket-cache (#5793)

Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DataTieringException.java   |  27 ++
 .../hbase/regionserver/DataTieringManager.java | 222 
 .../hadoop/hbase/regionserver/DataTieringType.java |  26 ++
 .../hadoop/hbase/regionserver/HRegionServer.java   |   1 +
 .../hbase/regionserver/TestDataTieringManager.java | 389 +
 5 files changed, 665 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
new file mode 100644
index 000..8d356422f6e
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class DataTieringException extends Exception {
+  DataTieringException(String reason) {
+super(reason);
+  }
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
new file mode 100644
index 000..0bc04ddc428
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.OptionalLong;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The DataTieringManager class categorizes data into hot data and cold data 
based on the specified
+ * {@link DataTieringType} when DataTiering is enabled. DataTiering is 
disabled by default with
+ * {@link DataTieringType} set to {@link DataTieringType#NONE}. The {@link 
DataTieringType}
+ * determines the logic for distinguishing data into hot or cold. By default, 
all data is considered
+ * as hot.
+ */
+@InterfaceAudience.Private
+public class DataTieringManager {
+  private static final Logger LOG = 
LoggerFactory.getLogger(DataTieringManager.class);
+  public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type";
+  public static final String DATATIERING_HOT_DATA_AGE_KEY =
+"hbase.hstore.datatiering.hot.age.millis";
+  public static final DataTieringType DEFAULT_DATATIERING = 
DataTieringType.NONE;
+  public static final long DEFAULT_DATATIERING_HOT_DATA_AGE = 7 * 24 * 60 * 60 
* 1000; // 7 Days
+  private static Dat

(hbase) branch HBASE-28463 updated: HBASE-28466 Integration of time-based priority logic of bucket cache in prefetch functionality of HBase (#5808)

2024-04-22 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-28463 by this push:
 new 25f13b6f45d HBASE-28466 Integration of time-based priority logic of 
bucket cache in prefetch functionality of HBase (#5808)
25f13b6f45d is described below

commit 25f13b6f45d0cf8081152e25e264eb447cb9522d
Author: vinayak hegde 
AuthorDate: Mon Apr 22 15:23:30 2024 +0530

HBASE-28466 Integration of time-based priority logic of bucket cache in 
prefetch functionality of HBase (#5808)

Signed-off-by: Wellington Chevreuil 
---
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |  6 +-
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |  7 +-
 .../apache/hadoop/hbase/io/hfile/HFileInfo.java|  6 ++
 .../hadoop/hbase/io/hfile/HFilePreadReader.java|  6 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 15 +++-
 .../hbase/regionserver/DataTieringManager.java | 91 +-
 .../hbase/regionserver/TestDataTieringManager.java | 58 ++
 7 files changed, 145 insertions(+), 44 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index bed0194b1fa..ac83af1053a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io.hfile;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Optional;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -197,11 +198,12 @@ public interface BlockCache extends Iterable 
{
* overridden by all implementing classes. In such cases, the returned 
Optional will be empty. For
* subclasses implementing this logic, the returned Optional would contain 
the boolean value
* reflecting if the passed file should indeed be cached.
-   * @param fileName to check if it should be cached.
+   * @param hFileInfo Information about the file to check if it should be 
cached.
+   * @param conf  The configuration object to use for determining caching 
behavior.
* @return empty optional if this method is not supported, otherwise the 
returned optional
* contains the boolean value informing if the file should be cached.
*/
-  default Optional shouldCacheFile(String fileName) {
+  default Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
 return Optional.empty();
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index d6692d2e2bf..b12510cdccd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -22,6 +22,7 @@ import java.util.Map;
 import java.util.Optional;
 import org.apache.commons.lang3.mutable.Mutable;
 import org.apache.commons.lang3.mutable.MutableBoolean;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
@@ -482,9 +483,9 @@ public class CombinedBlockCache implements 
ResizableBlockCache, HeapSize {
   }
 
   @Override
-  public Optional shouldCacheFile(String fileName) {
-Optional l1Result = l1Cache.shouldCacheFile(fileName);
-Optional l2Result = l2Cache.shouldCacheFile(fileName);
+  public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration 
conf) {
+Optional l1Result = l1Cache.shouldCacheFile(hFileInfo, conf);
+Optional l2Result = l2Cache.shouldCacheFile(hFileInfo, conf);
 final Mutable combinedResult = new MutableBoolean(true);
 l1Result.ifPresent(b -> combinedResult.setValue(b && 
combinedResult.getValue()));
 l2Result.ifPresent(b -> combinedResult.setValue(b && 
combinedResult.getValue()));
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
index 31e637a0099..e89f86e7c4e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileInfo.java
@@ -122,6 +122,7 @@ public class HFileInfo implements SortedMap 
{
 
   private FixedFileTrailer trailer;
   private HFileContext hfileContext;
+  private boolean initialized = false;
 
   public HFileInfo() {
 super();
@@ -363,6 +364,10 @@ public class HFileInfo implements So

(hbase) branch branch-2.4 updated: HBASE-28292 Make Delay prefetch property to be dynamically configured (#5605)

2024-04-16 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new cbc45c08e5a HBASE-28292 Make Delay prefetch property to be dynamically 
configured (#5605)
cbc45c08e5a is described below

commit cbc45c08e5aec9da3f87cfe8f400e94d8054766b
Author: Abhishek Kothalikar <99398985+kabhish...@users.noreply.github.com>
AuthorDate: Tue Apr 16 18:03:06 2024 +0530

HBASE-28292 Make Delay prefetch property to be dynamically configured 
(#5605)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Peter Somogyi 

(cherry picked from commit 16e9affca37f0027e1bc66e873cb291097aa75dd)
---
 .../org/apache/hadoop/hbase/io/hfile/HFile.java|   2 +
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |   9 ++
 .../hadoop/hbase/io/hfile/PrefetchExecutor.java| 104 ++---
 .../hadoop/hbase/regionserver/HRegionServer.java   |   7 ++
 .../regionserver/PrefetchExecutorNotifier.java |  75 +++
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java |  57 ++-
 6 files changed, 239 insertions(+), 15 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index d18194e95c4..c805e84dd32 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -455,6 +455,8 @@ public final class HFile {
 
 boolean prefetchComplete();
 
+boolean prefetchStarted();
+
 /**
  * To close the stream's socket. Note: This can be concurrently called 
from multiple threads and
  * implementation should take care of thread safety.
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index c044a1179e0..794655ef8a8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -1618,6 +1618,15 @@ public abstract class HFileReaderImpl implements 
HFile.Reader, Configurable {
 return PrefetchExecutor.isCompleted(path);
   }
 
+  /**
+   * Returns true if block prefetching was started after waiting for specified 
delay, false
+   * otherwise
+   */
+  @Override
+  public boolean prefetchStarted() {
+return PrefetchExecutor.isPrefetchStarted();
+  }
+
   /**
* Create a Scanner on this file. No seeks or reads are done on creation. 
Call
* {@link HFileScanner#seekTo(Cell)} to position an start the read. There is 
nothing to clean up
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
index bf27cbcfbc8..707515fd8af 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
@@ -17,20 +17,24 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import java.util.Map;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.Future;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.regex.Pattern;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -39,23 +43,30 @@ import org.slf4j.LoggerFactory;
 public final class PrefetchExecutor {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(PrefetchExecutor.class);
+  /** Wait time in miliseconds before executing prefetch */
+  public static final String PREFETCH_DELAY = "hbase.hfile.prefetch.delay";
+  public static final String PREFETCH_DELAY_VARIATION = 
"hbase.hfile.prefetch.delay.variation";
+  public static final float PREFETCH_DELAY_VARIATION_DEFAULT_VALUE = 0.2f;
 
   /** Futures for tracking block prefetch activity */
   private static final Map> prefetchFutures = new 
ConcurrentSkipListMap<>();
+  /** Runnables for resetting the prefetch acti

(hbase) branch branch-2.5 updated: HBASE-28292 Make Delay prefetch property to be dynamically configured (#5605)

2024-04-16 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
 new 582d1c892e8 HBASE-28292 Make Delay prefetch property to be dynamically 
configured (#5605)
582d1c892e8 is described below

commit 582d1c892e8304cec8095edfcef2917a8f1a3658
Author: Abhishek Kothalikar <99398985+kabhish...@users.noreply.github.com>
AuthorDate: Tue Apr 16 18:03:06 2024 +0530

HBASE-28292 Make Delay prefetch property to be dynamically configured 
(#5605)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Peter Somogyi 

(cherry picked from commit 16e9affca37f0027e1bc66e873cb291097aa75dd)
---
 .../org/apache/hadoop/hbase/io/hfile/HFile.java|  2 +
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  9 +++
 .../hadoop/hbase/io/hfile/PrefetchExecutor.java| 86 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  7 ++
 .../regionserver/PrefetchExecutorNotifier.java | 75 +++
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 57 +-
 6 files changed, 231 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 207c9986651..cb7b96ecb41 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -452,6 +452,8 @@ public final class HFile {
 
 boolean prefetchComplete();
 
+boolean prefetchStarted();
+
 /**
  * To close the stream's socket. Note: This can be concurrently called 
from multiple threads and
  * implementation should take care of thread safety.
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 2ff301e9e70..e50637bf22b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -1640,6 +1640,15 @@ public abstract class HFileReaderImpl implements 
HFile.Reader, Configurable {
 return PrefetchExecutor.isCompleted(path);
   }
 
+  /**
+   * Returns true if block prefetching was started after waiting for specified 
delay, false
+   * otherwise
+   */
+  @Override
+  public boolean prefetchStarted() {
+return PrefetchExecutor.isPrefetchStarted();
+  }
+
   /**
* Create a Scanner on this file. No seeks or reads are done on creation. 
Call
* {@link HFileScanner#seekTo(Cell)} to position an start the read. There is 
nothing to clean up
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
index 912241fbb95..c90d803f4ba 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
@@ -17,15 +17,18 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import java.util.Map;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.Future;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.regex.Pattern;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -41,23 +44,30 @@ import org.slf4j.LoggerFactory;
 public final class PrefetchExecutor {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(PrefetchExecutor.class);
+  /** Wait time in miliseconds before executing prefetch */
+  public static final String PREFETCH_DELAY = "hbase.hfile.prefetch.delay";
+  public static final String PREFETCH_DELAY_VARIATION = 
"hbase.hfile.prefetch.delay.variation";
+  public static final float PREFETCH_DELAY_VARIATION_DEFAULT_VALUE = 0.2f;
 
   /** Futures for tracking block prefetch activity */
   private static final Map> prefetchFutures = new 
ConcurrentSkipListMap<>();
+  /** Runnables for resetting the prefetch activity */
+  private static final Map prefetchRunnable = new 
ConcurrentSkipListMap<>();
   /** Executor pool shared among all HFiles for block prefetch */
   private static final ScheduledExecutorService prefetchExecutorPool;
   /** Delay before beginnin

(hbase) branch branch-2.6 updated: HBASE-28292 Make Delay prefetch property to be dynamically configured (#5605)

2024-04-16 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 0e6bb2865d7 HBASE-28292 Make Delay prefetch property to be dynamically 
configured (#5605)
0e6bb2865d7 is described below

commit 0e6bb2865d7639a38360744bd3d953a3b1f8b3e7
Author: Abhishek Kothalikar <99398985+kabhish...@users.noreply.github.com>
AuthorDate: Tue Apr 16 18:03:06 2024 +0530

HBASE-28292 Make Delay prefetch property to be dynamically configured 
(#5605)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Peter Somogyi 

(cherry picked from commit 16e9affca37f0027e1bc66e873cb291097aa75dd)
---
 .../org/apache/hadoop/hbase/io/hfile/HFile.java|  2 +
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  9 +++
 .../hadoop/hbase/io/hfile/PrefetchExecutor.java| 89 --
 .../hadoop/hbase/regionserver/HRegionServer.java   |  7 ++
 .../regionserver/PrefetchExecutorNotifier.java | 75 ++
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 56 +-
 6 files changed, 230 insertions(+), 8 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 84fe9387d6e..ae79ad85724 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -457,6 +457,8 @@ public final class HFile {
 
 boolean prefetchComplete();
 
+boolean prefetchStarted();
+
 /**
  * To close the stream's socket. Note: This can be concurrently called 
from multiple threads and
  * implementation should take care of thread safety.
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 3d020d0629e..caf875a89d6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -1658,6 +1658,15 @@ public abstract class HFileReaderImpl implements 
HFile.Reader, Configurable {
 return PrefetchExecutor.isCompleted(path);
   }
 
+  /**
+   * Returns true if block prefetching was started after waiting for specified 
delay, false
+   * otherwise
+   */
+  @Override
+  public boolean prefetchStarted() {
+return PrefetchExecutor.isPrefetchStarted();
+  }
+
   /**
* Create a Scanner on this file. No seeks or reads are done on creation. 
Call
* {@link HFileScanner#seekTo(Cell)} to position an start the read. There is 
nothing to clean up
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
index 4ae19193c8a..d23c2e3ecf3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
@@ -17,15 +17,18 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import java.util.Map;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.Future;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.regex.Pattern;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -41,23 +44,30 @@ import org.slf4j.LoggerFactory;
 public final class PrefetchExecutor {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(PrefetchExecutor.class);
+  /** Wait time in miliseconds before executing prefetch */
+  public static final String PREFETCH_DELAY = "hbase.hfile.prefetch.delay";
+  public static final String PREFETCH_DELAY_VARIATION = 
"hbase.hfile.prefetch.delay.variation";
+  public static final float PREFETCH_DELAY_VARIATION_DEFAULT_VALUE = 0.2f;
 
   /** Futures for tracking block prefetch activity */
   private static final Map> prefetchFutures = new 
ConcurrentSkipListMap<>();
+  /** Runnables for resetting the prefetch activity */
+  private static final Map prefetchRunnable = new 
ConcurrentSkipListMap<>();
   /** Executor pool shared among all HFiles for block prefetch */
   private static final ScheduledExecutorService prefetchExecutorPool;
   /** Delay before beginnin

(hbase) branch branch-2 updated: HBASE-28292 Make Delay prefetch property to be dynamically configured (#5605)

2024-04-16 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new e8c5772cb6c HBASE-28292 Make Delay prefetch property to be dynamically 
configured (#5605)
e8c5772cb6c is described below

commit e8c5772cb6cf6092b62f1e1c08392c881e685000
Author: Abhishek Kothalikar <99398985+kabhish...@users.noreply.github.com>
AuthorDate: Tue Apr 16 18:03:06 2024 +0530

HBASE-28292 Make Delay prefetch property to be dynamically configured 
(#5605)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Peter Somogyi 

(cherry picked from commit 16e9affca37f0027e1bc66e873cb291097aa75dd)
---
 .../org/apache/hadoop/hbase/io/hfile/HFile.java|  2 +
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  9 +++
 .../hadoop/hbase/io/hfile/PrefetchExecutor.java| 89 --
 .../hadoop/hbase/regionserver/HRegionServer.java   |  7 ++
 .../regionserver/PrefetchExecutorNotifier.java | 75 ++
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 56 +-
 6 files changed, 230 insertions(+), 8 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 84fe9387d6e..ae79ad85724 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -457,6 +457,8 @@ public final class HFile {
 
 boolean prefetchComplete();
 
+boolean prefetchStarted();
+
 /**
  * To close the stream's socket. Note: This can be concurrently called 
from multiple threads and
  * implementation should take care of thread safety.
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index ba961faf953..ace662414f4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -1658,6 +1658,15 @@ public abstract class HFileReaderImpl implements 
HFile.Reader, Configurable {
 return PrefetchExecutor.isCompleted(path);
   }
 
+  /**
+   * Returns true if block prefetching was started after waiting for specified 
delay, false
+   * otherwise
+   */
+  @Override
+  public boolean prefetchStarted() {
+return PrefetchExecutor.isPrefetchStarted();
+  }
+
   /**
* Create a Scanner on this file. No seeks or reads are done on creation. 
Call
* {@link HFileScanner#seekTo(Cell)} to position an start the read. There is 
nothing to clean up
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
index 4ae19193c8a..d23c2e3ecf3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
@@ -17,15 +17,18 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import java.util.Map;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.Future;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.regex.Pattern;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -41,23 +44,30 @@ import org.slf4j.LoggerFactory;
 public final class PrefetchExecutor {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(PrefetchExecutor.class);
+  /** Wait time in miliseconds before executing prefetch */
+  public static final String PREFETCH_DELAY = "hbase.hfile.prefetch.delay";
+  public static final String PREFETCH_DELAY_VARIATION = 
"hbase.hfile.prefetch.delay.variation";
+  public static final float PREFETCH_DELAY_VARIATION_DEFAULT_VALUE = 0.2f;
 
   /** Futures for tracking block prefetch activity */
   private static final Map> prefetchFutures = new 
ConcurrentSkipListMap<>();
+  /** Runnables for resetting the prefetch activity */
+  private static final Map prefetchRunnable = new 
ConcurrentSkipListMap<>();
   /** Executor pool shared among all HFiles for block prefetch */
   private static final ScheduledExecutorService prefetchExecutorPool;
   /** Delay before beginnin

(hbase) branch branch-3 updated: HBASE-28292 Make Delay prefetch property to be dynamically configured (#5605)

2024-04-16 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 5b9b18fafa8 HBASE-28292 Make Delay prefetch property to be dynamically 
configured (#5605)
5b9b18fafa8 is described below

commit 5b9b18fafa875e1cb77cbf857d403650f23b200e
Author: Abhishek Kothalikar <99398985+kabhish...@users.noreply.github.com>
AuthorDate: Tue Apr 16 18:03:06 2024 +0530

HBASE-28292 Make Delay prefetch property to be dynamically configured 
(#5605)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Peter Somogyi 

(cherry picked from commit 16e9affca37f0027e1bc66e873cb291097aa75dd)
---
 .../org/apache/hadoop/hbase/io/hfile/HFile.java|  2 +
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  9 +++
 .../hadoop/hbase/io/hfile/PrefetchExecutor.java| 89 --
 .../hadoop/hbase/regionserver/HRegionServer.java   |  7 ++
 .../regionserver/PrefetchExecutorNotifier.java | 75 ++
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 56 +-
 6 files changed, 230 insertions(+), 8 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 84fe9387d6e..ae79ad85724 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -457,6 +457,8 @@ public final class HFile {
 
 boolean prefetchComplete();
 
+boolean prefetchStarted();
+
 /**
  * To close the stream's socket. Note: This can be concurrently called 
from multiple threads and
  * implementation should take care of thread safety.
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index e0f27af7145..9c9b38c4906 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -1658,6 +1658,15 @@ public abstract class HFileReaderImpl implements 
HFile.Reader, Configurable {
 return PrefetchExecutor.isCompleted(path);
   }
 
+  /**
+   * Returns true if block prefetching was started after waiting for specified 
delay, false
+   * otherwise
+   */
+  @Override
+  public boolean prefetchStarted() {
+return PrefetchExecutor.isPrefetchStarted();
+  }
+
   /**
* Create a Scanner on this file. No seeks or reads are done on creation. 
Call
* {@link HFileScanner#seekTo(Cell)} to position an start the read. There is 
nothing to clean up
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
index 4ae19193c8a..d23c2e3ecf3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.java
@@ -17,15 +17,18 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
+import com.google.errorprone.annotations.RestrictedApi;
 import java.util.Map;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.Future;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.regex.Pattern;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -41,23 +44,30 @@ import org.slf4j.LoggerFactory;
 public final class PrefetchExecutor {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(PrefetchExecutor.class);
+  /** Wait time in miliseconds before executing prefetch */
+  public static final String PREFETCH_DELAY = "hbase.hfile.prefetch.delay";
+  public static final String PREFETCH_DELAY_VARIATION = 
"hbase.hfile.prefetch.delay.variation";
+  public static final float PREFETCH_DELAY_VARIATION_DEFAULT_VALUE = 0.2f;
 
   /** Futures for tracking block prefetch activity */
   private static final Map> prefetchFutures = new 
ConcurrentSkipListMap<>();
+  /** Runnables for resetting the prefetch activity */
+  private static final Map prefetchRunnable = new 
ConcurrentSkipListMap<>();
   /** Executor pool shared among all HFiles for block prefetch */
   private static final ScheduledExecutorService prefetchExecutorPool;
   /** Delay before beginnin

(hbase) branch HBASE-28463 updated: HBASE-28505 Implement enforcement to require Date Tiered Compaction for Time Range Data Tiering (#5809)

2024-04-12 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-28463 by this push:
 new b7bb8b9d27a HBASE-28505 Implement enforcement to require Date Tiered 
Compaction for Time Range Data Tiering (#5809)
b7bb8b9d27a is described below

commit b7bb8b9d27a2e0595b3c06a8763c1d01b6651c8f
Author: vinayak hegde 
AuthorDate: Fri Apr 12 14:54:37 2024 +0530

HBASE-28505 Implement enforcement to require Date Tiered Compaction for 
Time Range Data Tiering (#5809)


Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DateTieredStoreEngine.java  |  3 ++
 .../hadoop/hbase/util/TableDescriptorChecker.java  | 36 +
 .../hbase/client/TestIllegalTableDescriptor.java   | 45 ++
 3 files changed, 84 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
index d15a6c92ef0..8fdbb6035ae 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
@@ -41,6 +41,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DateTieredStoreEngine extends StoreEngine {
+
+  public static final String DATE_TIERED_STORE_ENGINE = 
DateTieredStoreEngine.class.getName();
+
   @Override
   public boolean needsCompaction(List filesCompacting) {
 return compactionPolicy.needsCompaction(storeFileManager.getStorefiles(), 
filesCompacting);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
index 94e2e4bbfa0..471583b32b7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import static 
org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine.DATE_TIERED_STORE_ENGINE;
+
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompoundConfiguration;
@@ -28,10 +30,13 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.fs.ErasureCodingUtils;
+import org.apache.hadoop.hbase.regionserver.DataTieringManager;
+import org.apache.hadoop.hbase.regionserver.DataTieringType;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.StoreEngine;
 import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
 import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -191,6 +196,8 @@ public final class TableDescriptorChecker {
 
   // check in-memory compaction
   warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction);
+
+  checkDateTieredCompactionForTimeRangeDataTiering(conf, td);
 }
   }
 
@@ -210,6 +217,35 @@ public final class TableDescriptorChecker {
 });
   }
 
+  private static void checkDateTieredCompactionForTimeRangeDataTiering(final 
Configuration conf,
+final TableDescriptor td) throws IOException {
+// Table level configurations
+checkDateTieredCompactionForTimeRangeDataTiering(conf);
+for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
+  // Column family level configurations
+  Configuration cfdConf =
+new 
CompoundConfiguration().add(conf).addStringMap(cfd.getConfiguration());
+  checkDateTieredCompactionForTimeRangeDataTiering(cfdConf);
+}
+  }
+
+  private static void checkDateTieredCompactionForTimeRangeDataTiering(final 
Configuration conf)
+throws IOException {
+final String errorMessage =
+  "Time Range Data Tiering should be enabled with Date Tiered Compaction.";
+
+warnOrThrowExceptionForFailure(false, () -> {
+
+  // Determine whether Date Tiered Compaction will be enabled when Time 
Range Data Tiering is
+  // enabled after the configuration change.
+  if 
(DataTieringType.TIME_RANGE.name().equals(conf.get(DataTieringManager.DATATIERING_KEY)))
 {
+if 
(!DATE_TIERED_STORE_ENGINE.equals(conf.get(StoreEngine.STORE_ENGINE_CLASS_KEY)))
 {
+  

(hbase) 01/01: HBASE-28465 Implementation of framework for time-based priority bucket-cache (#5793)

2024-04-12 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit fe285bcc2f649e1a51d62a3044dc2116ec637377
Author: vinayak hegde 
AuthorDate: Mon Apr 8 20:54:19 2024 +0530

HBASE-28465 Implementation of framework for time-based priority 
bucket-cache (#5793)

Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DataTieringException.java   |  27 ++
 .../hbase/regionserver/DataTieringManager.java | 222 
 .../hadoop/hbase/regionserver/DataTieringType.java |  26 ++
 .../hadoop/hbase/regionserver/HRegionServer.java   |   1 +
 .../hbase/regionserver/TestDataTieringManager.java | 389 +
 5 files changed, 665 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
new file mode 100644
index 000..8d356422f6e
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class DataTieringException extends Exception {
+  DataTieringException(String reason) {
+super(reason);
+  }
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
new file mode 100644
index 000..0bc04ddc428
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.OptionalLong;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The DataTieringManager class categorizes data into hot data and cold data 
based on the specified
+ * {@link DataTieringType} when DataTiering is enabled. DataTiering is 
disabled by default with
+ * {@link DataTieringType} set to {@link DataTieringType#NONE}. The {@link 
DataTieringType}
+ * determines the logic for distinguishing data into hot or cold. By default, 
all data is considered
+ * as hot.
+ */
+@InterfaceAudience.Private
+public class DataTieringManager {
+  private static final Logger LOG = 
LoggerFactory.getLogger(DataTieringManager.class);
+  public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type";
+  public static final String DATATIERING_HOT_DATA_AGE_KEY =
+"hbase.hstore.datatiering.hot.age.millis";
+  public static final DataTieringType DEFAULT_DATATIERING = 
DataTieringType.NONE;
+  public static final long DEFAULT_DATATIERING_HOT_DATA_AGE = 7 * 24 * 60 * 60 
* 1000; // 7 Days
+  private static Dat

(hbase) branch HBASE-28463 updated (a9d31709219 -> fe285bcc2f6)

2024-04-12 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


omit a9d31709219 HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)
 add 6101bad5a3a HBASE-28366 Mis-order of SCP and regionServerReport 
results into region inconsistencies (#5774)
 add aea7e7c85cd [ADDENDUM] HBASE-28458 
BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully 
cached (#5777) (#5791)
 add 558b151c5b2 HBASE-28483 backup merge fails on bulkloaded hfiles (#5795)
 add c1012a9ebec HBASE-28457 Introduce a version field in file based 
tracker record (#5784)
 add e5d59cadc5d HBASE-28481 Prompting table already exists after failing 
to create table with many region replications (#5789)
 add eeebbdfa723 HBASE-28478 Remove the hbase1 compatible code in 
FixedFileTrailer (#5788)
 add 3340d8dd07e HBASE-28183 It's impossible to re-enable the quota table 
if it gets disabled (#5691)
 add bf836a98073 HBASE-26192 Master UI hbck should provide a JSON formatted 
output option (#5772)
 add 1a089cd3935 HBASE-28485 Re-use ZstdDecompressCtx/ZstdCompressCtx for 
performance (#5797)
 add adc79a0a9c2 HBASE-28448 CompressionTest hangs when run over a Ozone 
ofs path (#5771)
 add 5d694dae5e4 HBASE-28506 Remove hbase-compression-xz (#5811)
 new fe285bcc2f6 HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (a9d31709219)
\
 N -- N -- N   refs/heads/HBASE-28463 (fe285bcc2f6)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 hbase-assembly/pom.xml |   4 -
 ...=> TestIncrementalBackupMergeWithBulkLoad.java} |  46 +--
 ...xistException.java => HbckEmptyRegionInfo.java} |  16 +-
 ...Exception.java => HbckInconsistentRegions.java} |  40 +-
 ...tsException.java => HbckOrphanRegionsOnFS.java} |  27 +-
 ...tsException.java => HbckOrphanRegionsOnRS.java} |  27 +-
 ...ationException.java => HbckOverlapRegions.java} |  32 +-
 .../org/apache/hadoop/hbase/HbckRegionDetails.java |  32 +-
 ...perationException.java => HbckRegionHoles.java} |  32 +-
 .../org/apache/hadoop/hbase/HbckServerName.java|  28 +-
 ...ationException.java => HbckUnknownServers.java} |  32 +-
 .../hadoop/hbase/client/MutableRegionInfo.java |   2 +-
 hbase-compression/hbase-compression-xz/pom.xml | 166 
 .../hadoop/hbase/io/compress/xz/LzmaCodec.java | 126 --
 .../hbase/io/compress/xz/LzmaCompressor.java   | 223 ---
 .../hbase/io/compress/xz/LzmaDecompressor.java | 151 
 .../io/compress/xz/TestHFileCompressionLzma.java   |  76 
 .../hadoop/hbase/io/compress/xz/TestLzmaCodec.java |  58 ---
 .../io/compress/xz/TestWALCompressionLzma.java |  56 ---
 .../hbase/io/compress/zstd/ZstdCompressor.java |  20 +-
 .../hbase/io/compress/zstd/ZstdDecompressor.java   |  19 +-
 hbase-compression/pom.xml  |   1 -
 .../hadoop/hbase/mapreduce/HFileInputFormat.java   |  25 +-
 .../protobuf/server/region/StoreFileTracker.proto  |   1 +
 hbase-replication/pom.xml  |   5 -
 .../hadoop/hbase/io/hfile/FixedFileTrailer.java|  42 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  46 ++-
 .../org/apache/hadoop/hbase/master/HMaster.java|  48 ++-
 .../apache/hadoop/hbase/master/MasterServices.java |   4 +
 .../apache/hadoop/hbase/master/ServerManager.java  |  20 +-
 .../HbckConfigFactory.java}|  15 +-
 .../hbase/master/http/hbck/model/HbckMetrics.java  |  98 +
 .../http/hbck/resource/HbckMetricsResource.java| 140 +++
 .../master/procedure/CreateTableProcedure.java |  14 +-
 .../storefiletracker/StoreFileListFile.java|  62 ++-
 .../apache/hadoop/hbase/util/CompressionTest.java  |  21 +-
 .../hbase/io/hfile/TestFixedFileTrailer.java   |  76 ++--
 .../io/hfile/bucket/TestPrefetchPersistence.java   |  35 +-
 .../hbase/master/MockNoopMasterServices.java   |

(hbase) branch HBASE-28463 updated: HBASE-28465 Implementation of framework for time-based priority bucket-cache (#5793)

2024-04-08 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/HBASE-28463 by this push:
 new a9d31709219 HBASE-28465 Implementation of framework for time-based 
priority bucket-cache (#5793)
a9d31709219 is described below

commit a9d31709219ef8e5d4ad69cd1b7bce770b678a5b
Author: vinayak hegde 
AuthorDate: Mon Apr 8 20:54:19 2024 +0530

HBASE-28465 Implementation of framework for time-based priority 
bucket-cache (#5793)

Signed-off-by: Wellington Chevreuil 
---
 .../hbase/regionserver/DataTieringException.java   |  27 ++
 .../hbase/regionserver/DataTieringManager.java | 222 
 .../hadoop/hbase/regionserver/DataTieringType.java |  26 ++
 .../hadoop/hbase/regionserver/HRegionServer.java   |   1 +
 .../hbase/regionserver/TestDataTieringManager.java | 389 +
 5 files changed, 665 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
new file mode 100644
index 000..8d356422f6e
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class DataTieringException extends Exception {
+  DataTieringException(String reason) {
+super(reason);
+  }
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
new file mode 100644
index 000..0bc04ddc428
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.OptionalLong;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The DataTieringManager class categorizes data into hot data and cold data 
based on the specified
+ * {@link DataTieringType} when DataTiering is enabled. DataTiering is 
disabled by default with
+ * {@link DataTieringType} set to {@link DataTieringType#NONE}. The {@link 
DataTieringType}
+ * determines the logic for distinguishing data into hot or cold. By default, 
all data is considered
+ * as hot.
+ */
+@InterfaceAudience.Private
+public class DataTieringManager {
+  private static final Logger LOG = 
LoggerFactory.getLogger(DataTieringManager.class);
+  public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type";
+  public static final String DATATIERING_HOT_DATA_AGE_KEY =
+"hbase.hstore.datatiering.

(hbase) 02/02: [ADDENDUM] HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully cached (#5777) (#5791)

2024-04-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit a56126c27664be095c03120a237957f91db7be3a
Author: Wellington Ramos Chevreuil 
AuthorDate: Fri Apr 5 10:56:06 2024 +0100

[ADDENDUM] HBASE-28458 BucketCache.notifyFileCachingCompleted may 
incorrectly consider a file fully cached (#5777) (#5791)

Signed-off-by: Peter Somogyi 
(cherry picked from commit d7566abd5de915e8f55a4f1f1939f6be38891657)
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 48 ++
 .../io/hfile/bucket/TestPrefetchPersistence.java   | 35 +++-
 2 files changed, 37 insertions(+), 46 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 55743e861af..c8111522c65 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -700,7 +700,8 @@ public class BucketCache implements BlockCache, HeapSize {
 } else {
   return bucketEntryToUse.withWriteLock(offsetLock, () -> {
 if (backingMap.remove(cacheKey, bucketEntryToUse)) {
-  LOG.debug("removed key {} from back map in the evict process", 
cacheKey);
+  LOG.debug("removed key {} from back map with offset lock {} in the 
evict process",
+cacheKey, bucketEntryToUse.offset());
   blockEvicted(cacheKey, bucketEntryToUse, !existedInRamCache, 
evictedByEvictionProcess);
   return true;
 }
@@ -1605,19 +1606,21 @@ public class BucketCache implements BlockCache, 
HeapSize {
   @Override
   public int evictBlocksByHfileName(String hfileName) {
 removeFileFromPrefetch(hfileName);
-Set keySet = blocksByHFile.subSet(new 
BlockCacheKey(hfileName, Long.MIN_VALUE),
-  true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
-
+Set keySet = getAllCacheKeysForFile(hfileName);
 int numEvicted = 0;
 for (BlockCacheKey key : keySet) {
   if (evictBlock(key)) {
 ++numEvicted;
   }
 }
-
 return numEvicted;
   }
 
+  private Set getAllCacheKeysForFile(String hfileName) {
+return blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), 
true,
+  new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
+  }
+
   /**
* Used to group bucket entries into priority buckets. There will be a 
BucketEntryGroup for each
* priority (single, multi, memory). Once bucketed, the eviction algorithm 
takes the appropriate
@@ -2030,26 +2033,33 @@ public class BucketCache implements BlockCache, 
HeapSize {
   entry.getKey().getHfileName().equals(fileName.getName())
 && entry.getKey().getBlockType().equals(BlockType.DATA)
 ) {
-  LOG.debug("found block for file {} in the backing map. Acquiring 
read lock for offset {}",
-fileName.getName(), entry.getKey().getOffset());
-  ReentrantReadWriteLock lock = 
offsetLock.getLock(entry.getKey().getOffset());
+  long offsetToLock = entry.getValue().offset();
+  LOG.debug("found block {} in the backing map. Acquiring read lock 
for offset {}",
+entry.getKey(), offsetToLock);
+  ReentrantReadWriteLock lock = offsetLock.getLock(offsetToLock);
   lock.readLock().lock();
   locks.add(lock);
   // rechecks the given key is still there (no eviction happened 
before the lock acquired)
   if (backingMap.containsKey(entry.getKey())) {
 count.increment();
+  } else {
+lock.readLock().unlock();
+locks.remove(lock);
+LOG.debug("found block {}, but when locked and tried to count, it 
was gone.");
   }
 }
   });
+  int metaCount = totalBlockCount - dataBlockCount;
   // BucketCache would only have data blocks
   if (dataBlockCount == count.getValue()) {
 LOG.debug("File {} has now been fully cached.", fileName);
 fileCacheCompleted(fileName, size);
   } else {
 LOG.debug(
-  "Prefetch executor completed for {}, but only {} blocks were cached. 
"
-+ "Total blocks for file: {}. Checking for blocks pending cache in 
cache writer queue.",
-  fileName.getName(), count.getValue(), dataBlockCount);
+  "Prefetch executor completed for {}, but only {} data blocks were 
cached. "
++ "Total data blocks for file: {}. "
++ "Checking for blocks pending cache in cache writer queue.",
+  fileName, count.getValue(), dataBlockCount);
 if (ramCache.hasBlocksForFile(fileName.getName())) {
 

(hbase) 01/02: HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully cached (#5777)

2024-04-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit ee4b44fa504943c08722393c5847afbbb5877cc1
Author: Wellington Ramos Chevreuil 
AuthorDate: Tue Apr 2 11:58:53 2024 +0100

HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider 
a file fully cached (#5777)

Signed-off-by: Duo Zhang 

(cherry picked from commit c4ac2df041aa4795f91024b1e5dc8d4f5b6c048e)
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 26 +-
 .../hfile/TestBlockEvictionOnRegionMovement.java   |  9 
 .../io/hfile/bucket/TestBucketCachePersister.java  | 25 +++--
 3 files changed, 34 insertions(+), 26 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 965b9776023..55743e861af 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -2020,26 +2020,30 @@ public class BucketCache implements BlockCache, 
HeapSize {
 // so we need to count all blocks for this file in the backing map under
 // a read lock for the block offset
 final List locks = new ArrayList<>();
-LOG.debug("Notifying caching completed for file {}, with total blocks {}", 
fileName,
-  dataBlockCount);
+LOG.debug("Notifying caching completed for file {}, with total blocks {}, 
and data blocks {}",
+  fileName, totalBlockCount, dataBlockCount);
 try {
   final MutableInt count = new MutableInt();
   LOG.debug("iterating over {} entries in the backing map", 
backingMap.size());
   backingMap.entrySet().stream().forEach(entry -> {
-if (entry.getKey().getHfileName().equals(fileName.getName())) {
+if (
+  entry.getKey().getHfileName().equals(fileName.getName())
+&& entry.getKey().getBlockType().equals(BlockType.DATA)
+) {
   LOG.debug("found block for file {} in the backing map. Acquiring 
read lock for offset {}",
 fileName.getName(), entry.getKey().getOffset());
   ReentrantReadWriteLock lock = 
offsetLock.getLock(entry.getKey().getOffset());
   lock.readLock().lock();
   locks.add(lock);
+  // rechecks the given key is still there (no eviction happened 
before the lock acquired)
   if (backingMap.containsKey(entry.getKey())) {
 count.increment();
   }
 }
   });
-  // We may either place only data blocks on the BucketCache or all type 
of blocks
-  if (dataBlockCount == count.getValue() || totalBlockCount == 
count.getValue()) {
-LOG.debug("File {} has now been fully cached.", fileName.getName());
+  // BucketCache would only have data blocks
+  if (dataBlockCount == count.getValue()) {
+LOG.debug("File {} has now been fully cached.", fileName);
 fileCacheCompleted(fileName, size);
   } else {
 LOG.debug(
@@ -2047,15 +2051,17 @@ public class BucketCache implements BlockCache, 
HeapSize {
 + "Total blocks for file: {}. Checking for blocks pending cache in 
cache writer queue.",
   fileName.getName(), count.getValue(), dataBlockCount);
 if (ramCache.hasBlocksForFile(fileName.getName())) {
+  for (ReentrantReadWriteLock lock : locks) {
+lock.readLock().unlock();
+  }
   LOG.debug("There are still blocks pending caching for file {}. Will 
sleep 100ms "
 + "and try the verification again.", fileName.getName());
   Thread.sleep(100);
   notifyFileCachingCompleted(fileName, totalBlockCount, 
dataBlockCount, size);
 } else {
-  LOG.info(
-"We found only {} blocks cached from a total of {} for file {}, "
-  + "but no blocks pending caching. Maybe cache is full?",
-count, dataBlockCount, fileName.getName());
+  LOG.info("We found only {} blocks cached from a total of {} for file 
{}, "
++ "but no blocks pending caching. Maybe cache is full or evictions 
"
++ "happened concurrently to cache prefetch.", count, 
totalBlockCount, fileName);
 }
   }
 } catch (InterruptedException e) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
index 8cd80e755cd..88b0b51131e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvi

(hbase) branch branch-2.6 updated (c690b821ad1 -> a56126c2766)

2024-04-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


from c690b821ad1 HBASE-28366 Mis-order of SCP and regionServerReport 
results into region inconsistencies (#5774)
 new ee4b44fa504 HBASE-28458 BucketCache.notifyFileCachingCompleted may 
incorrectly consider a file fully cached (#5777)
 new a56126c2766 [ADDENDUM] HBASE-28458 
BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully 
cached (#5777) (#5791)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 68 ++
 .../hfile/TestBlockEvictionOnRegionMovement.java   |  9 +--
 .../io/hfile/bucket/TestBucketCachePersister.java  | 25 
 .../io/hfile/bucket/TestPrefetchPersistence.java   | 35 ++-
 4 files changed, 68 insertions(+), 69 deletions(-)



(hbase) 02/02: [ADDENDUM] HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully cached (#5777) (#5791)

2024-04-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 50eb7dafbcb5b6b8a5c9f83ac3bfe0b53bfc16f0
Author: Wellington Ramos Chevreuil 
AuthorDate: Fri Apr 5 10:56:06 2024 +0100

[ADDENDUM] HBASE-28458 BucketCache.notifyFileCachingCompleted may 
incorrectly consider a file fully cached (#5777) (#5791)

Signed-off-by: Peter Somogyi 
(cherry picked from commit d7566abd5de915e8f55a4f1f1939f6be38891657)
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 48 ++
 .../io/hfile/bucket/TestPrefetchPersistence.java   | 35 +++-
 2 files changed, 37 insertions(+), 46 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 19b158b9fc5..643f3d8d93d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -736,7 +736,8 @@ public class BucketCache implements BlockCache, HeapSize {
 } else {
   return bucketEntryToUse.withWriteLock(offsetLock, () -> {
 if (backingMap.remove(cacheKey, bucketEntryToUse)) {
-  LOG.debug("removed key {} from back map in the evict process", 
cacheKey);
+  LOG.debug("removed key {} from back map with offset lock {} in the 
evict process",
+cacheKey, bucketEntryToUse.offset());
   blockEvicted(cacheKey, bucketEntryToUse, !existedInRamCache, 
evictedByEvictionProcess);
   return true;
 }
@@ -1643,19 +1644,21 @@ public class BucketCache implements BlockCache, 
HeapSize {
   @Override
   public int evictBlocksByHfileName(String hfileName) {
 fileNotFullyCached(hfileName);
-Set keySet = blocksByHFile.subSet(new 
BlockCacheKey(hfileName, Long.MIN_VALUE),
-  true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
-
+Set keySet = getAllCacheKeysForFile(hfileName);
 int numEvicted = 0;
 for (BlockCacheKey key : keySet) {
   if (evictBlock(key)) {
 ++numEvicted;
   }
 }
-
 return numEvicted;
   }
 
+  private Set getAllCacheKeysForFile(String hfileName) {
+return blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), 
true,
+  new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
+  }
+
   /**
* Used to group bucket entries into priority buckets. There will be a 
BucketEntryGroup for each
* priority (single, multi, memory). Once bucketed, the eviction algorithm 
takes the appropriate
@@ -2068,26 +2071,33 @@ public class BucketCache implements BlockCache, 
HeapSize {
   entry.getKey().getHfileName().equals(fileName.getName())
 && entry.getKey().getBlockType().equals(BlockType.DATA)
 ) {
-  LOG.debug("found block for file {} in the backing map. Acquiring 
read lock for offset {}",
-fileName.getName(), entry.getKey().getOffset());
-  ReentrantReadWriteLock lock = 
offsetLock.getLock(entry.getKey().getOffset());
+  long offsetToLock = entry.getValue().offset();
+  LOG.debug("found block {} in the backing map. Acquiring read lock 
for offset {}",
+entry.getKey(), offsetToLock);
+  ReentrantReadWriteLock lock = offsetLock.getLock(offsetToLock);
   lock.readLock().lock();
   locks.add(lock);
   // rechecks the given key is still there (no eviction happened 
before the lock acquired)
   if (backingMap.containsKey(entry.getKey())) {
 count.increment();
+  } else {
+lock.readLock().unlock();
+locks.remove(lock);
+LOG.debug("found block {}, but when locked and tried to count, it 
was gone.");
   }
 }
   });
+  int metaCount = totalBlockCount - dataBlockCount;
   // BucketCache would only have data blocks
   if (dataBlockCount == count.getValue()) {
 LOG.debug("File {} has now been fully cached.", fileName);
 fileCacheCompleted(fileName, size);
   } else {
 LOG.debug(
-  "Prefetch executor completed for {}, but only {} blocks were cached. 
"
-+ "Total blocks for file: {}. Checking for blocks pending cache in 
cache writer queue.",
-  fileName.getName(), count.getValue(), dataBlockCount);
+  "Prefetch executor completed for {}, but only {} data blocks were 
cached. "
++ "Total data blocks for file: {}. "
++ "Checking for blocks pending cache in cache writer queue.",
+  fileName, count.getValue(), dataBlockCount);
 if (ramCache.hasBlocksForFile(fileName.getName())) {
   for (Reent

(hbase) 01/02: HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully cached (#5777)

2024-04-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 113f04ea2cfe3583eec0ad31ee1bbdbf330888da
Author: Wellington Ramos Chevreuil 
AuthorDate: Tue Apr 2 11:58:53 2024 +0100

HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider 
a file fully cached (#5777)

Signed-off-by: Duo Zhang 

(cherry picked from commit c4ac2df041aa4795f91024b1e5dc8d4f5b6c048e)
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 26 +-
 .../hfile/TestBlockEvictionOnRegionMovement.java   |  9 
 .../io/hfile/bucket/TestBucketCachePersister.java  | 25 +++--
 3 files changed, 34 insertions(+), 26 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 728946c1c18..19b158b9fc5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -2058,26 +2058,30 @@ public class BucketCache implements BlockCache, 
HeapSize {
 // so we need to count all blocks for this file in the backing map under
 // a read lock for the block offset
 final List locks = new ArrayList<>();
-LOG.debug("Notifying caching completed for file {}, with total blocks {}", 
fileName,
-  dataBlockCount);
+LOG.debug("Notifying caching completed for file {}, with total blocks {}, 
and data blocks {}",
+  fileName, totalBlockCount, dataBlockCount);
 try {
   final MutableInt count = new MutableInt();
   LOG.debug("iterating over {} entries in the backing map", 
backingMap.size());
   backingMap.entrySet().stream().forEach(entry -> {
-if (entry.getKey().getHfileName().equals(fileName.getName())) {
+if (
+  entry.getKey().getHfileName().equals(fileName.getName())
+&& entry.getKey().getBlockType().equals(BlockType.DATA)
+) {
   LOG.debug("found block for file {} in the backing map. Acquiring 
read lock for offset {}",
 fileName.getName(), entry.getKey().getOffset());
   ReentrantReadWriteLock lock = 
offsetLock.getLock(entry.getKey().getOffset());
   lock.readLock().lock();
   locks.add(lock);
+  // rechecks the given key is still there (no eviction happened 
before the lock acquired)
   if (backingMap.containsKey(entry.getKey())) {
 count.increment();
   }
 }
   });
-  // We may either place only data blocks on the BucketCache or all type 
of blocks
-  if (dataBlockCount == count.getValue() || totalBlockCount == 
count.getValue()) {
-LOG.debug("File {} has now been fully cached.", fileName.getName());
+  // BucketCache would only have data blocks
+  if (dataBlockCount == count.getValue()) {
+LOG.debug("File {} has now been fully cached.", fileName);
 fileCacheCompleted(fileName, size);
   } else {
 LOG.debug(
@@ -2085,15 +2089,17 @@ public class BucketCache implements BlockCache, 
HeapSize {
 + "Total blocks for file: {}. Checking for blocks pending cache in 
cache writer queue.",
   fileName.getName(), count.getValue(), dataBlockCount);
 if (ramCache.hasBlocksForFile(fileName.getName())) {
+  for (ReentrantReadWriteLock lock : locks) {
+lock.readLock().unlock();
+  }
   LOG.debug("There are still blocks pending caching for file {}. Will 
sleep 100ms "
 + "and try the verification again.", fileName.getName());
   Thread.sleep(100);
   notifyFileCachingCompleted(fileName, totalBlockCount, 
dataBlockCount, size);
 } else {
-  LOG.info(
-"We found only {} blocks cached from a total of {} for file {}, "
-  + "but no blocks pending caching. Maybe cache is full?",
-count, dataBlockCount, fileName.getName());
+  LOG.info("We found only {} blocks cached from a total of {} for file 
{}, "
++ "but no blocks pending caching. Maybe cache is full or evictions 
"
++ "happened concurrently to cache prefetch.", count, 
totalBlockCount, fileName);
 }
   }
 } catch (InterruptedException e) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
index 8cd80e755cd..88b0b51131e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvi

(hbase) branch branch-2 updated (91519e8befd -> 50eb7dafbcb)

2024-04-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


from 91519e8befd HBASE-28366 Mis-order of SCP and regionServerReport 
results into region inconsistencies (#5774)
 new 113f04ea2cf HBASE-28458 BucketCache.notifyFileCachingCompleted may 
incorrectly consider a file fully cached (#5777)
 new 50eb7dafbcb [ADDENDUM] HBASE-28458 
BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully 
cached (#5777) (#5791)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 68 ++
 .../hfile/TestBlockEvictionOnRegionMovement.java   |  9 +--
 .../io/hfile/bucket/TestBucketCachePersister.java  | 25 
 .../io/hfile/bucket/TestPrefetchPersistence.java   | 35 ++-
 4 files changed, 68 insertions(+), 69 deletions(-)



(hbase) branch branch-3 updated: [ADDENDUM] HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully cached (#5777) (#5791)

2024-04-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new d7566abd5de [ADDENDUM] HBASE-28458 
BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully 
cached (#5777) (#5791)
d7566abd5de is described below

commit d7566abd5de915e8f55a4f1f1939f6be38891657
Author: Wellington Ramos Chevreuil 
AuthorDate: Fri Apr 5 10:56:06 2024 +0100

[ADDENDUM] HBASE-28458 BucketCache.notifyFileCachingCompleted may 
incorrectly consider a file fully cached (#5777) (#5791)

Signed-off-by: Peter Somogyi 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 46 +++---
 .../io/hfile/bucket/TestPrefetchPersistence.java   | 35 +++-
 2 files changed, 36 insertions(+), 45 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 9541939db94..71bfc757e51 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -747,7 +747,8 @@ public class BucketCache implements BlockCache, HeapSize {
 } else {
   return bucketEntryToUse.withWriteLock(offsetLock, () -> {
 if (backingMap.remove(cacheKey, bucketEntryToUse)) {
-  LOG.debug("removed key {} from back map in the evict process", 
cacheKey);
+  LOG.debug("removed key {} from back map with offset lock {} in the 
evict process",
+cacheKey, bucketEntryToUse.offset());
   blockEvicted(cacheKey, bucketEntryToUse, !existedInRamCache, 
evictedByEvictionProcess);
   return true;
 }
@@ -1658,19 +1659,21 @@ public class BucketCache implements BlockCache, 
HeapSize {
   @Override
   public int evictBlocksByHfileName(String hfileName) {
 fileNotFullyCached(hfileName);
-Set keySet = blocksByHFile.subSet(new 
BlockCacheKey(hfileName, Long.MIN_VALUE),
-  true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
-
+Set keySet = getAllCacheKeysForFile(hfileName);
 int numEvicted = 0;
 for (BlockCacheKey key : keySet) {
   if (evictBlock(key)) {
 ++numEvicted;
   }
 }
-
 return numEvicted;
   }
 
+  private Set getAllCacheKeysForFile(String hfileName) {
+return blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), 
true,
+  new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
+  }
+
   /**
* Used to group bucket entries into priority buckets. There will be a 
BucketEntryGroup for each
* priority (single, multi, memory). Once bucketed, the eviction algorithm 
takes the appropriate
@@ -2083,25 +2086,32 @@ public class BucketCache implements BlockCache, 
HeapSize {
   entry.getKey().getHfileName().equals(fileName.getName())
 && entry.getKey().getBlockType().equals(BlockType.DATA)
 ) {
-  LOG.debug("found block for file {} in the backing map. Acquiring 
read lock for offset {}",
-fileName, entry.getKey().getOffset());
-  ReentrantReadWriteLock lock = 
offsetLock.getLock(entry.getKey().getOffset());
+  long offsetToLock = entry.getValue().offset();
+  LOG.debug("found block {} in the backing map. Acquiring read lock 
for offset {}",
+entry.getKey(), offsetToLock);
+  ReentrantReadWriteLock lock = offsetLock.getLock(offsetToLock);
   lock.readLock().lock();
   locks.add(lock);
   // rechecks the given key is still there (no eviction happened 
before the lock acquired)
   if (backingMap.containsKey(entry.getKey())) {
 count.increment();
+  } else {
+lock.readLock().unlock();
+locks.remove(lock);
+LOG.debug("found block {}, but when locked and tried to count, it 
was gone.");
   }
 }
   });
+  int metaCount = totalBlockCount - dataBlockCount;
   // BucketCache would only have data blocks
   if (dataBlockCount == count.getValue()) {
 LOG.debug("File {} has now been fully cached.", fileName);
 fileCacheCompleted(fileName, size);
   } else {
 LOG.debug(
-  "Prefetch executor completed for {}, but only {} blocks were cached. 
"
-+ "Total blocks for file: {}. Checking for blocks pending cache in 
cache writer queue.",
+  "Prefetch executor completed for {}, but only {} data blocks were 
cached. "
++ "Total data blocks for file: {}. "
++ "Checking for blocks pending cache in cache writer queue.",
   fileName, count.getValue(), d

(hbase) branch master updated: [ADDENDUM] HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully cached (#5777) (#5791)

2024-04-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new aea7e7c85cd [ADDENDUM] HBASE-28458 
BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully 
cached (#5777) (#5791)
aea7e7c85cd is described below

commit aea7e7c85cdb8628fb03ead0f94d8e07ad49f067
Author: Wellington Ramos Chevreuil 
AuthorDate: Fri Apr 5 10:56:06 2024 +0100

[ADDENDUM] HBASE-28458 BucketCache.notifyFileCachingCompleted may 
incorrectly consider a file fully cached (#5777) (#5791)

Signed-off-by: Peter Somogyi 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 46 +++---
 .../io/hfile/bucket/TestPrefetchPersistence.java   | 35 +++-
 2 files changed, 36 insertions(+), 45 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 9541939db94..71bfc757e51 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -747,7 +747,8 @@ public class BucketCache implements BlockCache, HeapSize {
 } else {
   return bucketEntryToUse.withWriteLock(offsetLock, () -> {
 if (backingMap.remove(cacheKey, bucketEntryToUse)) {
-  LOG.debug("removed key {} from back map in the evict process", 
cacheKey);
+  LOG.debug("removed key {} from back map with offset lock {} in the 
evict process",
+cacheKey, bucketEntryToUse.offset());
   blockEvicted(cacheKey, bucketEntryToUse, !existedInRamCache, 
evictedByEvictionProcess);
   return true;
 }
@@ -1658,19 +1659,21 @@ public class BucketCache implements BlockCache, 
HeapSize {
   @Override
   public int evictBlocksByHfileName(String hfileName) {
 fileNotFullyCached(hfileName);
-Set keySet = blocksByHFile.subSet(new 
BlockCacheKey(hfileName, Long.MIN_VALUE),
-  true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
-
+Set keySet = getAllCacheKeysForFile(hfileName);
 int numEvicted = 0;
 for (BlockCacheKey key : keySet) {
   if (evictBlock(key)) {
 ++numEvicted;
   }
 }
-
 return numEvicted;
   }
 
+  private Set getAllCacheKeysForFile(String hfileName) {
+return blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), 
true,
+  new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
+  }
+
   /**
* Used to group bucket entries into priority buckets. There will be a 
BucketEntryGroup for each
* priority (single, multi, memory). Once bucketed, the eviction algorithm 
takes the appropriate
@@ -2083,25 +2086,32 @@ public class BucketCache implements BlockCache, 
HeapSize {
   entry.getKey().getHfileName().equals(fileName.getName())
 && entry.getKey().getBlockType().equals(BlockType.DATA)
 ) {
-  LOG.debug("found block for file {} in the backing map. Acquiring 
read lock for offset {}",
-fileName, entry.getKey().getOffset());
-  ReentrantReadWriteLock lock = 
offsetLock.getLock(entry.getKey().getOffset());
+  long offsetToLock = entry.getValue().offset();
+  LOG.debug("found block {} in the backing map. Acquiring read lock 
for offset {}",
+entry.getKey(), offsetToLock);
+  ReentrantReadWriteLock lock = offsetLock.getLock(offsetToLock);
   lock.readLock().lock();
   locks.add(lock);
   // rechecks the given key is still there (no eviction happened 
before the lock acquired)
   if (backingMap.containsKey(entry.getKey())) {
 count.increment();
+  } else {
+lock.readLock().unlock();
+locks.remove(lock);
+LOG.debug("found block {}, but when locked and tried to count, it 
was gone.");
   }
 }
   });
+  int metaCount = totalBlockCount - dataBlockCount;
   // BucketCache would only have data blocks
   if (dataBlockCount == count.getValue()) {
 LOG.debug("File {} has now been fully cached.", fileName);
 fileCacheCompleted(fileName, size);
   } else {
 LOG.debug(
-  "Prefetch executor completed for {}, but only {} blocks were cached. 
"
-+ "Total blocks for file: {}. Checking for blocks pending cache in 
cache writer queue.",
+  "Prefetch executor completed for {}, but only {} data blocks were 
cached. "
++ "Total data blocks for file: {}. "
++ "Checking for blocks pending cache in cache writer queue.",
   fileName, count.getValue(), d

(hbase) branch HBASE-28463 created (now 28c1e3b2a6b)

2024-04-03 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch HBASE-28463
in repository https://gitbox.apache.org/repos/asf/hbase.git


  at 28c1e3b2a6b Add stoty to the developer list (#5790)

No new revisions were added by this update.



(hbase) branch branch-3 updated: HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully cached (#5777)

2024-04-02 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new c4ac2df041a HBASE-28458 BucketCache.notifyFileCachingCompleted may 
incorrectly consider a file fully cached (#5777)
c4ac2df041a is described below

commit c4ac2df041aa4795f91024b1e5dc8d4f5b6c048e
Author: Wellington Ramos Chevreuil 
AuthorDate: Tue Apr 2 11:58:53 2024 +0100

HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider 
a file fully cached (#5777)

Signed-off-by: Duo Zhang 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 24 +
 .../hfile/TestBlockEvictionOnRegionMovement.java   |  9 
 .../io/hfile/bucket/TestBucketCachePersister.java  | 25 +++---
 3 files changed, 33 insertions(+), 25 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 855f183b98f..9541939db94 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -2073,25 +2073,29 @@ public class BucketCache implements BlockCache, 
HeapSize {
 // so we need to count all blocks for this file in the backing map under
 // a read lock for the block offset
 final List locks = new ArrayList<>();
-LOG.debug("Notifying caching completed for file {}, with total blocks {}", 
fileName,
-  dataBlockCount);
+LOG.debug("Notifying caching completed for file {}, with total blocks {}, 
and data blocks {}",
+  fileName, totalBlockCount, dataBlockCount);
 try {
   final MutableInt count = new MutableInt();
   LOG.debug("iterating over {} entries in the backing map", 
backingMap.size());
   backingMap.entrySet().stream().forEach(entry -> {
-if (entry.getKey().getHfileName().equals(fileName.getName())) {
+if (
+  entry.getKey().getHfileName().equals(fileName.getName())
+&& entry.getKey().getBlockType().equals(BlockType.DATA)
+) {
   LOG.debug("found block for file {} in the backing map. Acquiring 
read lock for offset {}",
 fileName, entry.getKey().getOffset());
   ReentrantReadWriteLock lock = 
offsetLock.getLock(entry.getKey().getOffset());
   lock.readLock().lock();
   locks.add(lock);
+  // rechecks the given key is still there (no eviction happened 
before the lock acquired)
   if (backingMap.containsKey(entry.getKey())) {
 count.increment();
   }
 }
   });
-  // We may either place only data blocks on the BucketCache or all type 
of blocks
-  if (dataBlockCount == count.getValue() || totalBlockCount == 
count.getValue()) {
+  // BucketCache would only have data blocks
+  if (dataBlockCount == count.getValue()) {
 LOG.debug("File {} has now been fully cached.", fileName);
 fileCacheCompleted(fileName, size);
   } else {
@@ -2100,15 +2104,17 @@ public class BucketCache implements BlockCache, 
HeapSize {
 + "Total blocks for file: {}. Checking for blocks pending cache in 
cache writer queue.",
   fileName, count.getValue(), dataBlockCount);
 if (ramCache.hasBlocksForFile(fileName.getName())) {
+  for (ReentrantReadWriteLock lock : locks) {
+lock.readLock().unlock();
+  }
   LOG.debug("There are still blocks pending caching for file {}. Will 
sleep 100ms "
 + "and try the verification again.", fileName);
   Thread.sleep(100);
   notifyFileCachingCompleted(fileName, totalBlockCount, 
dataBlockCount, size);
 } else {
-  LOG.info(
-"We found only {} blocks cached from a total of {} for file {}, "
-  + "but no blocks pending caching. Maybe cache is full?",
-count, dataBlockCount, fileName);
+  LOG.info("We found only {} blocks cached from a total of {} for file 
{}, "
++ "but no blocks pending caching. Maybe cache is full or evictions 
"
++ "happened concurrently to cache prefetch.", count, 
totalBlockCount, fileName);
 }
   }
 } catch (InterruptedException e) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
index eb3e3cc61f4..7303cf53a55 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnReg

(hbase) branch master updated: HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider a file fully cached (#5777)

2024-04-02 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 6f236c0d18c HBASE-28458 BucketCache.notifyFileCachingCompleted may 
incorrectly consider a file fully cached (#5777)
6f236c0d18c is described below

commit 6f236c0d18c98c68f4ac43049963e1ff3d4c81f4
Author: Wellington Ramos Chevreuil 
AuthorDate: Tue Apr 2 11:58:53 2024 +0100

HBASE-28458 BucketCache.notifyFileCachingCompleted may incorrectly consider 
a file fully cached (#5777)

Signed-off-by: Duo Zhang 
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 24 +
 .../hfile/TestBlockEvictionOnRegionMovement.java   |  9 
 .../io/hfile/bucket/TestBucketCachePersister.java  | 25 +++---
 3 files changed, 33 insertions(+), 25 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 855f183b98f..9541939db94 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -2073,25 +2073,29 @@ public class BucketCache implements BlockCache, 
HeapSize {
 // so we need to count all blocks for this file in the backing map under
 // a read lock for the block offset
 final List locks = new ArrayList<>();
-LOG.debug("Notifying caching completed for file {}, with total blocks {}", 
fileName,
-  dataBlockCount);
+LOG.debug("Notifying caching completed for file {}, with total blocks {}, 
and data blocks {}",
+  fileName, totalBlockCount, dataBlockCount);
 try {
   final MutableInt count = new MutableInt();
   LOG.debug("iterating over {} entries in the backing map", 
backingMap.size());
   backingMap.entrySet().stream().forEach(entry -> {
-if (entry.getKey().getHfileName().equals(fileName.getName())) {
+if (
+  entry.getKey().getHfileName().equals(fileName.getName())
+&& entry.getKey().getBlockType().equals(BlockType.DATA)
+) {
   LOG.debug("found block for file {} in the backing map. Acquiring 
read lock for offset {}",
 fileName, entry.getKey().getOffset());
   ReentrantReadWriteLock lock = 
offsetLock.getLock(entry.getKey().getOffset());
   lock.readLock().lock();
   locks.add(lock);
+  // rechecks the given key is still there (no eviction happened 
before the lock acquired)
   if (backingMap.containsKey(entry.getKey())) {
 count.increment();
   }
 }
   });
-  // We may either place only data blocks on the BucketCache or all type 
of blocks
-  if (dataBlockCount == count.getValue() || totalBlockCount == 
count.getValue()) {
+  // BucketCache would only have data blocks
+  if (dataBlockCount == count.getValue()) {
 LOG.debug("File {} has now been fully cached.", fileName);
 fileCacheCompleted(fileName, size);
   } else {
@@ -2100,15 +2104,17 @@ public class BucketCache implements BlockCache, 
HeapSize {
 + "Total blocks for file: {}. Checking for blocks pending cache in 
cache writer queue.",
   fileName, count.getValue(), dataBlockCount);
 if (ramCache.hasBlocksForFile(fileName.getName())) {
+  for (ReentrantReadWriteLock lock : locks) {
+lock.readLock().unlock();
+  }
   LOG.debug("There are still blocks pending caching for file {}. Will 
sleep 100ms "
 + "and try the verification again.", fileName);
   Thread.sleep(100);
   notifyFileCachingCompleted(fileName, totalBlockCount, 
dataBlockCount, size);
 } else {
-  LOG.info(
-"We found only {} blocks cached from a total of {} for file {}, "
-  + "but no blocks pending caching. Maybe cache is full?",
-count, dataBlockCount, fileName);
+  LOG.info("We found only {} blocks cached from a total of {} for file 
{}, "
++ "but no blocks pending caching. Maybe cache is full or evictions 
"
++ "happened concurrently to cache prefetch.", count, 
totalBlockCount, fileName);
 }
   }
 } catch (InterruptedException e) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnRegionMovement.java
index eb3e3cc61f4..7303cf53a55 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockEvictionOnReg

(hbase) branch branch-2.6 updated: HBASE-28450 BuckeCache.evictBlocksByHfileName won't work after a cache recovery from file (#5769)

2024-03-27 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 8ec5fb04573 HBASE-28450 BuckeCache.evictBlocksByHfileName won't work 
after a cache recovery from file (#5769)
8ec5fb04573 is described below

commit 8ec5fb04573aa38a368386429f1f9a2326632f96
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Mar 27 10:04:14 2024 +

HBASE-28450 BuckeCache.evictBlocksByHfileName won't work after a cache 
recovery from file (#5769)

Signed-off-by: Peter Somogyi 
Signed-off-by: Ankit Singhal 

(cherry picked from commit 298c550c804305f2c57029a563039eefcbb4af40)
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 16 +--
 .../hbase/io/hfile/bucket/BucketProtoUtils.java| 14 ++---
 .../bucket/TestRecoveryPersistentBucketCache.java  | 33 ++
 3 files changed, 50 insertions(+), 13 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index d98ad1e75c1..0f22fb5ce7f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -218,13 +218,8 @@ public class BucketCache implements BlockCache, HeapSize {
*/
   transient final IdReadWriteLock offsetLock = new 
IdReadWriteLock<>(ReferenceType.SOFT);
 
-  final NavigableSet blocksByHFile = new 
ConcurrentSkipListSet<>((a, b) -> {
-int nameComparison = a.getHfileName().compareTo(b.getHfileName());
-if (nameComparison != 0) {
-  return nameComparison;
-}
-return Long.compare(a.getOffset(), b.getOffset());
-  });
+  NavigableSet blocksByHFile = new ConcurrentSkipListSet<>(
+
Comparator.comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset));
 
   /** Statistics thread schedule pool (for heavy debugging, could remove) */
   private transient final ScheduledExecutorService scheduleThreadPool =
@@ -1418,8 +1413,11 @@ public class BucketCache implements BlockCache, HeapSize 
{
   }
 
   private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws 
IOException {
-backingMap = BucketProtoUtils.fromPB(proto.getDeserializersMap(), 
proto.getBackingMap(),
-  this::createRecycler);
+Pair, 
NavigableSet> pair =
+  BucketProtoUtils.fromPB(proto.getDeserializersMap(), 
proto.getBackingMap(),
+this::createRecycler);
+backingMap = pair.getFirst();
+blocksByHFile = pair.getSecond();
 fullyCachedFiles.clear();
 
fullyCachedFiles.putAll(BucketProtoUtils.fromPB(proto.getCachedFilesMap()));
 if (proto.hasChecksum()) {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
index 7cc5050506e..4b42414fb9c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
@@ -18,9 +18,12 @@
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.io.IOException;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.NavigableSet;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.function.Function;
 import org.apache.hadoop.hbase.io.ByteBuffAllocator;
 import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler;
@@ -121,10 +124,12 @@ final class BucketProtoUtils {
 }
   }
 
-  static ConcurrentHashMap fromPB(Map deserializers,
-BucketCacheProtos.BackingMap backingMap, Function 
createRecycler)
-throws IOException {
+  static Pair, 
NavigableSet> fromPB(
+Map deserializers, BucketCacheProtos.BackingMap 
backingMap,
+Function createRecycler) throws IOException {
 ConcurrentHashMap result = new 
ConcurrentHashMap<>();
+NavigableSet resultSet = new 
ConcurrentSkipListSet<>(Comparator
+  
.comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset));
 for (BucketCacheProtos.BackingMapEntry entry : backingMap.getEntryList()) {
   BucketCacheProtos.BlockCacheKey protoKey = entry.getKey();
   BlockCacheKey key = new BlockCacheKey(protoKey.getHfilename(), 
protoKey.getOffset(),
@@ -153,8 +158,9 @@ final class BucketProtoUtils {
 throw new IOException("Unknown deserializer class found: " + 
deserializerClass);
   }
   result.put(key, value);
+  resultSet.add(key);
 }
-return result;
+return new Pair<>(result, resultSet);
   }
 
   priv

(hbase) branch branch-2 updated: HBASE-28450 Addendum: Fix compile error in the UT

2024-03-27 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new c2f80473b2b HBASE-28450 Addendum: Fix compile error in the UT
c2f80473b2b is described below

commit c2f80473b2b99df42de5abb7c1f45e376b355e5a
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Mar 27 12:03:36 2024 +

HBASE-28450 Addendum: Fix compile error in the UT

Change-Id: I0c7f5213fd4e027e1ba1bf85bdafe94a412c84b2
---
 .../hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java
index 5856583fd11..8815c21be4c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java
@@ -110,7 +110,7 @@ public class TestRecoveryPersistentBucketCache {
 
   @Test
   public void testBucketCacheEvictByHFileAfterRecovery() throws Exception {
-HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 Path testDir = TEST_UTIL.getDataTestDir();
 TEST_UTIL.getTestFileSystem().mkdirs(testDir);
 Configuration conf = HBaseConfiguration.create();



(hbase) branch branch-2 updated: HBASE-28450 BuckeCache.evictBlocksByHfileName won't work after a cache recovery from file (#5769)

2024-03-27 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 61a8e766143 HBASE-28450 BuckeCache.evictBlocksByHfileName won't work 
after a cache recovery from file (#5769)
61a8e766143 is described below

commit 61a8e766143c8912925347482a48328a3d1d146c
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Mar 27 10:04:14 2024 +

HBASE-28450 BuckeCache.evictBlocksByHfileName won't work after a cache 
recovery from file (#5769)

Signed-off-by: Peter Somogyi 
Signed-off-by: Ankit Singhal 

(cherry picked from commit 298c550c804305f2c57029a563039eefcbb4af40)
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 16 +--
 .../hbase/io/hfile/bucket/BucketProtoUtils.java| 14 ++---
 .../bucket/TestRecoveryPersistentBucketCache.java  | 33 ++
 3 files changed, 50 insertions(+), 13 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index b42ddebfe23..42ed74f78bb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -218,13 +218,8 @@ public class BucketCache implements BlockCache, HeapSize {
*/
   transient final IdReadWriteLock offsetLock = new 
IdReadWriteLock<>(ReferenceType.SOFT);
 
-  final NavigableSet blocksByHFile = new 
ConcurrentSkipListSet<>((a, b) -> {
-int nameComparison = a.getHfileName().compareTo(b.getHfileName());
-if (nameComparison != 0) {
-  return nameComparison;
-}
-return Long.compare(a.getOffset(), b.getOffset());
-  });
+  NavigableSet blocksByHFile = new ConcurrentSkipListSet<>(
+
Comparator.comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset));
 
   /** Statistics thread schedule pool (for heavy debugging, could remove) */
   private transient final ScheduledExecutorService scheduleThreadPool =
@@ -1456,8 +1451,11 @@ public class BucketCache implements BlockCache, HeapSize 
{
   }
 
   private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws 
IOException {
-backingMap = BucketProtoUtils.fromPB(proto.getDeserializersMap(), 
proto.getBackingMap(),
-  this::createRecycler);
+Pair, 
NavigableSet> pair =
+  BucketProtoUtils.fromPB(proto.getDeserializersMap(), 
proto.getBackingMap(),
+this::createRecycler);
+backingMap = pair.getFirst();
+blocksByHFile = pair.getSecond();
 fullyCachedFiles.clear();
 
fullyCachedFiles.putAll(BucketProtoUtils.fromPB(proto.getCachedFilesMap()));
 if (proto.hasChecksum()) {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
index 7cc5050506e..4b42414fb9c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
@@ -18,9 +18,12 @@
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.io.IOException;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.NavigableSet;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.function.Function;
 import org.apache.hadoop.hbase.io.ByteBuffAllocator;
 import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler;
@@ -121,10 +124,12 @@ final class BucketProtoUtils {
 }
   }
 
-  static ConcurrentHashMap fromPB(Map deserializers,
-BucketCacheProtos.BackingMap backingMap, Function 
createRecycler)
-throws IOException {
+  static Pair, 
NavigableSet> fromPB(
+Map deserializers, BucketCacheProtos.BackingMap 
backingMap,
+Function createRecycler) throws IOException {
 ConcurrentHashMap result = new 
ConcurrentHashMap<>();
+NavigableSet resultSet = new 
ConcurrentSkipListSet<>(Comparator
+  
.comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset));
 for (BucketCacheProtos.BackingMapEntry entry : backingMap.getEntryList()) {
   BucketCacheProtos.BlockCacheKey protoKey = entry.getKey();
   BlockCacheKey key = new BlockCacheKey(protoKey.getHfilename(), 
protoKey.getOffset(),
@@ -153,8 +158,9 @@ final class BucketProtoUtils {
 throw new IOException("Unknown deserializer class found: " + 
deserializerClass);
   }
   result.put(key, value);
+  resultSet.add(key);
 }
-return result;
+return new Pair<>(result, resultSet);
   }
 
   priv

(hbase) branch branch-3 updated: HBASE-28450 BuckeCache.evictBlocksByHfileName won't work after a cache recovery from file (#5769)

2024-03-27 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 63ffb6c50cb HBASE-28450 BuckeCache.evictBlocksByHfileName won't work 
after a cache recovery from file (#5769)
63ffb6c50cb is described below

commit 63ffb6c50cb59177dbab14ac7299ec572a26dbd8
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Mar 27 10:04:14 2024 +

HBASE-28450 BuckeCache.evictBlocksByHfileName won't work after a cache 
recovery from file (#5769)

Signed-off-by: Peter Somogyi 
Signed-off-by: Ankit Singhal 

(cherry picked from commit 298c550c804305f2c57029a563039eefcbb4af40)
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 16 +--
 .../hbase/io/hfile/bucket/BucketProtoUtils.java| 14 ++---
 .../bucket/TestRecoveryPersistentBucketCache.java  | 33 ++
 3 files changed, 50 insertions(+), 13 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 57f71b31894..912a3ab524f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -222,13 +222,8 @@ public class BucketCache implements BlockCache, HeapSize {
*/
   transient final IdReadWriteLock offsetLock;
 
-  final NavigableSet blocksByHFile = new 
ConcurrentSkipListSet<>((a, b) -> {
-int nameComparison = a.getHfileName().compareTo(b.getHfileName());
-if (nameComparison != 0) {
-  return nameComparison;
-}
-return Long.compare(a.getOffset(), b.getOffset());
-  });
+  NavigableSet blocksByHFile = new ConcurrentSkipListSet<>(
+
Comparator.comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset));
 
   /** Statistics thread schedule pool (for heavy debugging, could remove) */
   private transient final ScheduledExecutorService scheduleThreadPool =
@@ -1471,8 +1466,11 @@ public class BucketCache implements BlockCache, HeapSize 
{
   }
 
   private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws 
IOException {
-backingMap = BucketProtoUtils.fromPB(proto.getDeserializersMap(), 
proto.getBackingMap(),
-  this::createRecycler);
+Pair, 
NavigableSet> pair =
+  BucketProtoUtils.fromPB(proto.getDeserializersMap(), 
proto.getBackingMap(),
+this::createRecycler);
+backingMap = pair.getFirst();
+blocksByHFile = pair.getSecond();
 fullyCachedFiles.clear();
 
fullyCachedFiles.putAll(BucketProtoUtils.fromPB(proto.getCachedFilesMap()));
 if (proto.hasChecksum()) {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
index 7cc5050506e..4b42414fb9c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
@@ -18,9 +18,12 @@
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.io.IOException;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.NavigableSet;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.function.Function;
 import org.apache.hadoop.hbase.io.ByteBuffAllocator;
 import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler;
@@ -121,10 +124,12 @@ final class BucketProtoUtils {
 }
   }
 
-  static ConcurrentHashMap fromPB(Map deserializers,
-BucketCacheProtos.BackingMap backingMap, Function 
createRecycler)
-throws IOException {
+  static Pair, 
NavigableSet> fromPB(
+Map deserializers, BucketCacheProtos.BackingMap 
backingMap,
+Function createRecycler) throws IOException {
 ConcurrentHashMap result = new 
ConcurrentHashMap<>();
+NavigableSet resultSet = new 
ConcurrentSkipListSet<>(Comparator
+  
.comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset));
 for (BucketCacheProtos.BackingMapEntry entry : backingMap.getEntryList()) {
   BucketCacheProtos.BlockCacheKey protoKey = entry.getKey();
   BlockCacheKey key = new BlockCacheKey(protoKey.getHfilename(), 
protoKey.getOffset(),
@@ -153,8 +158,9 @@ final class BucketProtoUtils {
 throw new IOException("Unknown deserializer class found: " + 
deserializerClass);
   }
   result.put(key, value);
+  resultSet.add(key);
 }
-return result;
+return new Pair<>(result, resultSet);
   }
 
   private static BlockType fromPb(BucketCacheProtos.BlockType

(hbase) branch master updated: HBASE-28450 BuckeCache.evictBlocksByHfileName won't work after a cache recovery from file (#5769)

2024-03-27 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 298c550c804 HBASE-28450 BuckeCache.evictBlocksByHfileName won't work 
after a cache recovery from file (#5769)
298c550c804 is described below

commit 298c550c804305f2c57029a563039eefcbb4af40
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Mar 27 10:04:14 2024 +

HBASE-28450 BuckeCache.evictBlocksByHfileName won't work after a cache 
recovery from file (#5769)
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 16 +--
 .../hbase/io/hfile/bucket/BucketProtoUtils.java| 14 ++---
 .../bucket/TestRecoveryPersistentBucketCache.java  | 33 ++
 3 files changed, 50 insertions(+), 13 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 57f71b31894..912a3ab524f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -222,13 +222,8 @@ public class BucketCache implements BlockCache, HeapSize {
*/
   transient final IdReadWriteLock offsetLock;
 
-  final NavigableSet blocksByHFile = new 
ConcurrentSkipListSet<>((a, b) -> {
-int nameComparison = a.getHfileName().compareTo(b.getHfileName());
-if (nameComparison != 0) {
-  return nameComparison;
-}
-return Long.compare(a.getOffset(), b.getOffset());
-  });
+  NavigableSet blocksByHFile = new ConcurrentSkipListSet<>(
+
Comparator.comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset));
 
   /** Statistics thread schedule pool (for heavy debugging, could remove) */
   private transient final ScheduledExecutorService scheduleThreadPool =
@@ -1471,8 +1466,11 @@ public class BucketCache implements BlockCache, HeapSize 
{
   }
 
   private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws 
IOException {
-backingMap = BucketProtoUtils.fromPB(proto.getDeserializersMap(), 
proto.getBackingMap(),
-  this::createRecycler);
+Pair, 
NavigableSet> pair =
+  BucketProtoUtils.fromPB(proto.getDeserializersMap(), 
proto.getBackingMap(),
+this::createRecycler);
+backingMap = pair.getFirst();
+blocksByHFile = pair.getSecond();
 fullyCachedFiles.clear();
 
fullyCachedFiles.putAll(BucketProtoUtils.fromPB(proto.getCachedFilesMap()));
 if (proto.hasChecksum()) {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
index 7cc5050506e..4b42414fb9c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
@@ -18,9 +18,12 @@
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.io.IOException;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.NavigableSet;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.function.Function;
 import org.apache.hadoop.hbase.io.ByteBuffAllocator;
 import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler;
@@ -121,10 +124,12 @@ final class BucketProtoUtils {
 }
   }
 
-  static ConcurrentHashMap fromPB(Map deserializers,
-BucketCacheProtos.BackingMap backingMap, Function 
createRecycler)
-throws IOException {
+  static Pair, 
NavigableSet> fromPB(
+Map deserializers, BucketCacheProtos.BackingMap 
backingMap,
+Function createRecycler) throws IOException {
 ConcurrentHashMap result = new 
ConcurrentHashMap<>();
+NavigableSet resultSet = new 
ConcurrentSkipListSet<>(Comparator
+  
.comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset));
 for (BucketCacheProtos.BackingMapEntry entry : backingMap.getEntryList()) {
   BucketCacheProtos.BlockCacheKey protoKey = entry.getKey();
   BlockCacheKey key = new BlockCacheKey(protoKey.getHfilename(), 
protoKey.getOffset(),
@@ -153,8 +158,9 @@ final class BucketProtoUtils {
 throw new IOException("Unknown deserializer class found: " + 
deserializerClass);
   }
   result.put(key, value);
+  resultSet.add(key);
 }
-return result;
+return new Pair<>(result, resultSet);
   }
 
   private static BlockType fromPb(BucketCacheProtos.BlockType blockType) {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRecoveryPersistentBucketCache.java
 
b/hbase-server/src/

(hbase) branch branch-2.4 updated: HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade authentication (#5659)

2024-02-08 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 3ea6b8b342f HBASE-28337 Positive connection test in 
TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade 
authentication (#5659)
3ea6b8b342f is described below

commit 3ea6b8b342f63c74bbbe0a1446ebbe701aeb8cb3
Author: Andor Molnár 
AuthorDate: Thu Feb 8 15:50:52 2024 +0100

HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider 
runs with Kerberos instead of Shade authentication (#5659)

Signed-off-by: Wellington Chevreuil 
---
 .../security/NettyHBaseSaslRpcClientHandler.java   |  6 +++
 .../TestShadeSaslAuthenticationProvider.java   | 43 ++
 2 files changed, 17 insertions(+), 32 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
index e57ca56390c..525a7800908 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
@@ -130,6 +130,12 @@ public class NettyHBaseSaslRpcClientHandler extends 
SimpleChannelInboundHandler<
   // Mechanisms which have multiple steps will not return true on 
`SaslClient#isComplete()`
   // until the handshake has fully completed. Mechanisms which only send a 
single buffer may
   // return true on `isComplete()` after that initial response is 
calculated.
+
+  // HBASE-28337 We still want to check if the SaslClient completed the 
handshake, because
+  // there are certain mechs like PLAIN which doesn't have a server 
response after the
+  // initial authentication request. We cannot remove this tryComplete(), 
otherwise mechs
+  // like PLAIN will fail with call timeout.
+  tryComplete(ctx);
 } catch (Exception e) {
   // the exception thrown by handlerAdded will not be passed to the 
exceptionCaught below
   // because netty will remove a handler if handlerAdded throws an 
exception.
diff --git 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
index 4ee753b1d26..1d0bb40b440 100644
--- 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
+++ 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
@@ -27,8 +27,6 @@ import java.io.BufferedWriter;
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.io.StringWriter;
 import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -67,10 +65,8 @@ import 
org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -82,8 +78,6 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-
 @Category({ MediumTests.class, SecurityTests.class })
 public class TestShadeSaslAuthenticationProvider {
   private static final Logger LOG =
@@ -210,21 +204,23 @@ public class TestShadeSaslAuthenticationProvider {
   @Test
   public void testPositiveAuthentication() throws Exception {
 final Configuration clientConf = new Configuration(CONF);
-try (Connection conn = ConnectionFactory.createConnection(clientConf)) {
+try (Connection conn1 = ConnectionFactory.createConnection(clientConf)) {
   UserGroupInformation user1 =
 UserGroupInformation.createUserForTesting("user1", new String[0]);
-  user1.addToken(ShadeClientTokenUtil.obtainToken(conn, "user1", 
USER1_PASSWORD));
+  user1.addToken(ShadeClientTokenUtil.obtainToken(conn1, "user1", 
USER1_PASSWORD));
   user1.doAs(new PrivilegedExceptionAction() {
 @Override
 public Void run() throws Exception {
-  try (Table t = conn.getTable(tableName)) {
-Result r = t.get(new Get(Bytes.toBytes(&quo

(hbase) branch branch-2.5 updated: HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade authentication (#5659)

2024-02-08 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
 new e4ec37d11d1 HBASE-28337 Positive connection test in 
TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade 
authentication (#5659)
e4ec37d11d1 is described below

commit e4ec37d11d1d589470a383b484c83b6c3d76e6ec
Author: Andor Molnár 
AuthorDate: Thu Feb 8 15:50:52 2024 +0100

HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider 
runs with Kerberos instead of Shade authentication (#5659)

Signed-off-by: Wellington Chevreuil 
---
 .../security/NettyHBaseSaslRpcClientHandler.java   |  6 +++
 .../TestShadeSaslAuthenticationProvider.java   | 43 ++
 2 files changed, 17 insertions(+), 32 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
index e57ca56390c..525a7800908 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
@@ -130,6 +130,12 @@ public class NettyHBaseSaslRpcClientHandler extends 
SimpleChannelInboundHandler<
   // Mechanisms which have multiple steps will not return true on 
`SaslClient#isComplete()`
   // until the handshake has fully completed. Mechanisms which only send a 
single buffer may
   // return true on `isComplete()` after that initial response is 
calculated.
+
+  // HBASE-28337 We still want to check if the SaslClient completed the 
handshake, because
+  // there are certain mechs like PLAIN which doesn't have a server 
response after the
+  // initial authentication request. We cannot remove this tryComplete(), 
otherwise mechs
+  // like PLAIN will fail with call timeout.
+  tryComplete(ctx);
 } catch (Exception e) {
   // the exception thrown by handlerAdded will not be passed to the 
exceptionCaught below
   // because netty will remove a handler if handlerAdded throws an 
exception.
diff --git 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
index 4ee753b1d26..1d0bb40b440 100644
--- 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
+++ 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
@@ -27,8 +27,6 @@ import java.io.BufferedWriter;
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.io.StringWriter;
 import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -67,10 +65,8 @@ import 
org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -82,8 +78,6 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-
 @Category({ MediumTests.class, SecurityTests.class })
 public class TestShadeSaslAuthenticationProvider {
   private static final Logger LOG =
@@ -210,21 +204,23 @@ public class TestShadeSaslAuthenticationProvider {
   @Test
   public void testPositiveAuthentication() throws Exception {
 final Configuration clientConf = new Configuration(CONF);
-try (Connection conn = ConnectionFactory.createConnection(clientConf)) {
+try (Connection conn1 = ConnectionFactory.createConnection(clientConf)) {
   UserGroupInformation user1 =
 UserGroupInformation.createUserForTesting("user1", new String[0]);
-  user1.addToken(ShadeClientTokenUtil.obtainToken(conn, "user1", 
USER1_PASSWORD));
+  user1.addToken(ShadeClientTokenUtil.obtainToken(conn1, "user1", 
USER1_PASSWORD));
   user1.doAs(new PrivilegedExceptionAction() {
 @Override
 public Void run() throws Exception {
-  try (Table t = conn.getTable(tableName)) {
-Result r = t.get(new Get(Bytes.toBytes(&quo

(hbase) branch branch-2.6 updated: HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade authentication (#5659)

2024-02-08 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 514b5b8012f HBASE-28337 Positive connection test in 
TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade 
authentication (#5659)
514b5b8012f is described below

commit 514b5b8012f1fec1610bca75fc6d22a94ca1bd80
Author: Andor Molnár 
AuthorDate: Thu Feb 8 15:50:52 2024 +0100

HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider 
runs with Kerberos instead of Shade authentication (#5659)

Signed-off-by: Wellington Chevreuil 
---
 .../security/NettyHBaseSaslRpcClientHandler.java   |  6 +++
 .../TestShadeSaslAuthenticationProvider.java   | 43 ++
 2 files changed, 17 insertions(+), 32 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
index 48e631c7629..cc71355d429 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
@@ -145,6 +145,12 @@ public class NettyHBaseSaslRpcClientHandler extends 
SimpleChannelInboundHandler<
   // Mechanisms which have multiple steps will not return true on 
`SaslClient#isComplete()`
   // until the handshake has fully completed. Mechanisms which only send a 
single buffer may
   // return true on `isComplete()` after that initial response is 
calculated.
+
+  // HBASE-28337 We still want to check if the SaslClient completed the 
handshake, because
+  // there are certain mechs like PLAIN which doesn't have a server 
response after the
+  // initial authentication request. We cannot remove this tryComplete(), 
otherwise mechs
+  // like PLAIN will fail with call timeout.
+  tryComplete(ctx);
 } catch (Exception e) {
   // the exception thrown by handlerAdded will not be passed to the 
exceptionCaught below
   // because netty will remove a handler if handlerAdded throws an 
exception.
diff --git 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
index 4ee753b1d26..1d0bb40b440 100644
--- 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
+++ 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
@@ -27,8 +27,6 @@ import java.io.BufferedWriter;
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.io.StringWriter;
 import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -67,10 +65,8 @@ import 
org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -82,8 +78,6 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-
 @Category({ MediumTests.class, SecurityTests.class })
 public class TestShadeSaslAuthenticationProvider {
   private static final Logger LOG =
@@ -210,21 +204,23 @@ public class TestShadeSaslAuthenticationProvider {
   @Test
   public void testPositiveAuthentication() throws Exception {
 final Configuration clientConf = new Configuration(CONF);
-try (Connection conn = ConnectionFactory.createConnection(clientConf)) {
+try (Connection conn1 = ConnectionFactory.createConnection(clientConf)) {
   UserGroupInformation user1 =
 UserGroupInformation.createUserForTesting("user1", new String[0]);
-  user1.addToken(ShadeClientTokenUtil.obtainToken(conn, "user1", 
USER1_PASSWORD));
+  user1.addToken(ShadeClientTokenUtil.obtainToken(conn1, "user1", 
USER1_PASSWORD));
   user1.doAs(new PrivilegedExceptionAction() {
 @Override
 public Void run() throws Exception {
-  try (Table t = conn.getTable(tableName)) {
-Result r = t.get(new Get(Bytes.toBytes(&quo

(hbase) branch branch-2 updated: HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade authentication (#5659)

2024-02-08 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new dd76f83a9f1 HBASE-28337 Positive connection test in 
TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade 
authentication (#5659)
dd76f83a9f1 is described below

commit dd76f83a9f153db3f1ad1ce7692f7543a4116b97
Author: Andor Molnár 
AuthorDate: Thu Feb 8 15:50:52 2024 +0100

HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider 
runs with Kerberos instead of Shade authentication (#5659)

Signed-off-by: Wellington Chevreuil 
---
 .../security/NettyHBaseSaslRpcClientHandler.java   |  6 +++
 .../TestShadeSaslAuthenticationProvider.java   | 43 ++
 2 files changed, 17 insertions(+), 32 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
index 48e631c7629..cc71355d429 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
@@ -145,6 +145,12 @@ public class NettyHBaseSaslRpcClientHandler extends 
SimpleChannelInboundHandler<
   // Mechanisms which have multiple steps will not return true on 
`SaslClient#isComplete()`
   // until the handshake has fully completed. Mechanisms which only send a 
single buffer may
   // return true on `isComplete()` after that initial response is 
calculated.
+
+  // HBASE-28337 We still want to check if the SaslClient completed the 
handshake, because
+  // there are certain mechs like PLAIN which doesn't have a server 
response after the
+  // initial authentication request. We cannot remove this tryComplete(), 
otherwise mechs
+  // like PLAIN will fail with call timeout.
+  tryComplete(ctx);
 } catch (Exception e) {
   // the exception thrown by handlerAdded will not be passed to the 
exceptionCaught below
   // because netty will remove a handler if handlerAdded throws an 
exception.
diff --git 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
index 4ee753b1d26..1d0bb40b440 100644
--- 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
+++ 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
@@ -27,8 +27,6 @@ import java.io.BufferedWriter;
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.io.StringWriter;
 import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -67,10 +65,8 @@ import 
org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -82,8 +78,6 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-
 @Category({ MediumTests.class, SecurityTests.class })
 public class TestShadeSaslAuthenticationProvider {
   private static final Logger LOG =
@@ -210,21 +204,23 @@ public class TestShadeSaslAuthenticationProvider {
   @Test
   public void testPositiveAuthentication() throws Exception {
 final Configuration clientConf = new Configuration(CONF);
-try (Connection conn = ConnectionFactory.createConnection(clientConf)) {
+try (Connection conn1 = ConnectionFactory.createConnection(clientConf)) {
   UserGroupInformation user1 =
 UserGroupInformation.createUserForTesting("user1", new String[0]);
-  user1.addToken(ShadeClientTokenUtil.obtainToken(conn, "user1", 
USER1_PASSWORD));
+  user1.addToken(ShadeClientTokenUtil.obtainToken(conn1, "user1", 
USER1_PASSWORD));
   user1.doAs(new PrivilegedExceptionAction() {
 @Override
 public Void run() throws Exception {
-  try (Table t = conn.getTable(tableName)) {
-Result r = t.get(new Get(Bytes.toBytes(&quo

(hbase) branch branch-3 updated: HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade authentication (#5659)

2024-02-08 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 93ef87cbfce HBASE-28337 Positive connection test in 
TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade 
authentication (#5659)
93ef87cbfce is described below

commit 93ef87cbfced0fe967e3f23e28227bdca019507e
Author: Andor Molnár 
AuthorDate: Thu Feb 8 15:50:52 2024 +0100

HBASE-28337 Positive connection test in TestShadeSaslAuthenticationProvider 
runs with Kerberos instead of Shade authentication (#5659)

Signed-off-by: Wellington Chevreuil 
---
 .../security/NettyHBaseSaslRpcClientHandler.java   |  6 +++
 .../TestShadeSaslAuthenticationProvider.java   | 43 ++
 2 files changed, 17 insertions(+), 32 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
index 48e631c7629..cc71355d429 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java
@@ -145,6 +145,12 @@ public class NettyHBaseSaslRpcClientHandler extends 
SimpleChannelInboundHandler<
   // Mechanisms which have multiple steps will not return true on 
`SaslClient#isComplete()`
   // until the handshake has fully completed. Mechanisms which only send a 
single buffer may
   // return true on `isComplete()` after that initial response is 
calculated.
+
+  // HBASE-28337 We still want to check if the SaslClient completed the 
handshake, because
+  // there are certain mechs like PLAIN which doesn't have a server 
response after the
+  // initial authentication request. We cannot remove this tryComplete(), 
otherwise mechs
+  // like PLAIN will fail with call timeout.
+  tryComplete(ctx);
 } catch (Exception e) {
   // the exception thrown by handlerAdded will not be passed to the 
exceptionCaught below
   // because netty will remove a handler if handlerAdded throws an 
exception.
diff --git 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
index a479310691b..26a8943096a 100644
--- 
a/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
+++ 
b/hbase-examples/src/test/java/org/apache/hadoop/hbase/security/provider/example/TestShadeSaslAuthenticationProvider.java
@@ -27,8 +27,6 @@ import java.io.BufferedWriter;
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.io.StringWriter;
 import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -69,10 +67,8 @@ import 
org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -84,8 +80,6 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-
 @Category({ MediumTests.class, SecurityTests.class })
 public class TestShadeSaslAuthenticationProvider {
   private static final Logger LOG =
@@ -212,21 +206,23 @@ public class TestShadeSaslAuthenticationProvider {
   @Test
   public void testPositiveAuthentication() throws Exception {
 final Configuration clientConf = new Configuration(CONF);
-try (Connection conn = ConnectionFactory.createConnection(clientConf)) {
+try (Connection conn1 = ConnectionFactory.createConnection(clientConf)) {
   UserGroupInformation user1 =
 UserGroupInformation.createUserForTesting("user1", new String[0]);
-  user1.addToken(ShadeClientTokenUtil.obtainToken(conn, "user1", 
USER1_PASSWORD));
+  user1.addToken(ShadeClientTokenUtil.obtainToken(conn1, "user1", 
USER1_PASSWORD));
   user1.doAs(new PrivilegedExceptionAction() {
 @Override
 public Void run() throws Exception {
-  try (Table t = conn.getTable(tableName)) {
-Result r = t.get(new Get(Bytes.toBytes(&quo

(hbase) branch master updated (275d928a7d4 -> e85557a34d7)

2024-02-08 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


from 275d928a7d4 HBASE-27800: Add support for default user quotas (#5666)
 add e85557a34d7 HBASE-28337 Positive connection test in 
TestShadeSaslAuthenticationProvider runs with Kerberos instead of Shade 
authentication (#5659)

No new revisions were added by this update.

Summary of changes:
 .../security/NettyHBaseSaslRpcClientHandler.java   |  6 +++
 .../TestShadeSaslAuthenticationProvider.java   | 43 ++
 2 files changed, 17 insertions(+), 32 deletions(-)



(hbase) branch branch-2 updated: HBASE-28303 Interrupt cache prefetch thread when a heap usage threshold is reached (#5615)

2024-02-06 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new f09a93bf520 HBASE-28303 Interrupt cache prefetch thread when a heap 
usage threshold is reached (#5615)
f09a93bf520 is described below

commit f09a93bf5203048370d061d34ce7a3c35756c55e
Author: Wellington Ramos Chevreuil 
AuthorDate: Fri Jan 26 10:03:42 2024 +

HBASE-28303 Interrupt cache prefetch thread when a heap usage threshold is 
reached (#5615)

Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Peter Somogyi 
---
 .../apache/hadoop/hbase/io/hfile/CacheConfig.java  | 28 
 .../hadoop/hbase/io/hfile/HFilePreadReader.java| 24 ++
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 38 ++
 .../io/hfile/bucket/TestBucketCachePersister.java  | 16 +
 4 files changed, 92 insertions(+), 14 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 0f77e329b97..0ba92da35c2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -99,6 +99,12 @@ public class CacheConfig {
   public static final String BUCKETCACHE_PERSIST_INTERVAL_KEY =
 "hbase.bucketcache.persist.intervalinmillis";
 
+  /**
+   * Configuration key to set the heap usage threshold limit once prefetch 
threads should be
+   * interrupted.
+   */
+  public static final String PREFETCH_HEAP_USAGE_THRESHOLD = 
"hbase.rs.prefetchheapusage";
+
   // Defaults
   public static final boolean DEFAULT_CACHE_DATA_ON_READ = true;
   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
@@ -111,6 +117,7 @@ public class CacheConfig {
   public static final boolean DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE = false;
   public static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true;
   public static final long DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD = 
Long.MAX_VALUE;
+  public static final double DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD = 1d;
 
   /**
* Whether blocks should be cached on read (default is on if there is a 
cache but this can be
@@ -157,6 +164,8 @@ public class CacheConfig {
 
   private final ByteBuffAllocator byteBuffAllocator;
 
+  private final double heapUsageThreshold;
+
   /**
* Create a cache configuration using the specified configuration object and 
defaults for family
* level settings. Only use if no column family context.
@@ -201,6 +210,8 @@ public class CacheConfig {
 this.cacheCompactedDataOnWrite =
   conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, 
DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE);
 this.cacheCompactedDataOnWriteThreshold = 
getCacheCompactedBlocksOnWriteThreshold(conf);
+this.heapUsageThreshold =
+  conf.getDouble(PREFETCH_HEAP_USAGE_THRESHOLD, 
DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD);
 this.blockCache = blockCache;
 this.byteBuffAllocator = byteBuffAllocator;
   }
@@ -222,6 +233,7 @@ public class CacheConfig {
 this.dropBehindCompaction = cacheConf.dropBehindCompaction;
 this.blockCache = cacheConf.blockCache;
 this.byteBuffAllocator = cacheConf.byteBuffAllocator;
+this.heapUsageThreshold = cacheConf.heapUsageThreshold;
   }
 
   private CacheConfig() {
@@ -237,6 +249,7 @@ public class CacheConfig {
 this.dropBehindCompaction = false;
 this.blockCache = null;
 this.byteBuffAllocator = ByteBuffAllocator.HEAP;
+this.heapUsageThreshold = DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD;
   }
 
   /**
@@ -386,6 +399,17 @@ public class CacheConfig {
 return false;
   }
 
+  /**
+   * Checks if the current heap usage is below the threshold configured by
+   * "hbase.rs.prefetchheapusage" (0.8 by default).
+   */
+  public boolean isHeapUsageBelowThreshold() {
+double total = Runtime.getRuntime().maxMemory();
+double available = Runtime.getRuntime().freeMemory();
+double usedRatio = 1d - (available / total);
+return heapUsageThreshold > usedRatio;
+  }
+
   /**
* If we make sure the block could not be cached, we will not acquire the 
lock otherwise we will
* acquire lock
@@ -413,6 +437,10 @@ public class CacheConfig {
 return this.byteBuffAllocator;
   }
 
+  public double getHeapUsageThreshold() {
+return heapUsageThreshold;
+  }
+
   private long getCacheCompactedBlocksOnWriteThreshold(Configuration conf) {
 long cacheCompactedBlocksOnWriteThreshold =
   conf.getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY,
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileP

(hbase) branch branch-3 updated: HBASE-28303 Interrupt cache prefetch thread when a heap usage threshold is reached (#5615)

2024-01-30 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 66d27a3eae2 HBASE-28303 Interrupt cache prefetch thread when a heap 
usage threshold is reached (#5615)
66d27a3eae2 is described below

commit 66d27a3eae2ef1186532dbe605d6f9197c31f127
Author: Wellington Ramos Chevreuil 
AuthorDate: Fri Jan 26 10:03:42 2024 +

HBASE-28303 Interrupt cache prefetch thread when a heap usage threshold is 
reached (#5615)

Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Peter Somogyi 
---
 .../apache/hadoop/hbase/io/hfile/CacheConfig.java  | 28 
 .../hadoop/hbase/io/hfile/HFilePreadReader.java| 24 ++
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 38 ++
 .../io/hfile/bucket/TestBucketCachePersister.java  | 16 +
 4 files changed, 92 insertions(+), 14 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 4587eced616..f89a6194cef 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -99,6 +99,12 @@ public class CacheConfig {
   public static final String BUCKETCACHE_PERSIST_INTERVAL_KEY =
 "hbase.bucketcache.persist.intervalinmillis";
 
+  /**
+   * Configuration key to set the heap usage threshold limit once prefetch 
threads should be
+   * interrupted.
+   */
+  public static final String PREFETCH_HEAP_USAGE_THRESHOLD = 
"hbase.rs.prefetchheapusage";
+
   // Defaults
   public static final boolean DEFAULT_CACHE_DATA_ON_READ = true;
   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
@@ -111,6 +117,7 @@ public class CacheConfig {
   public static final boolean DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE = false;
   public static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true;
   public static final long DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD = 
Long.MAX_VALUE;
+  public static final double DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD = 1d;
 
   /**
* Whether blocks should be cached on read (default is on if there is a 
cache but this can be
@@ -157,6 +164,8 @@ public class CacheConfig {
 
   private final ByteBuffAllocator byteBuffAllocator;
 
+  private final double heapUsageThreshold;
+
   /**
* Create a cache configuration using the specified configuration object and 
defaults for family
* level settings. Only use if no column family context.
@@ -201,6 +210,8 @@ public class CacheConfig {
 this.cacheCompactedDataOnWrite =
   conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, 
DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE);
 this.cacheCompactedDataOnWriteThreshold = 
getCacheCompactedBlocksOnWriteThreshold(conf);
+this.heapUsageThreshold =
+  conf.getDouble(PREFETCH_HEAP_USAGE_THRESHOLD, 
DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD);
 this.blockCache = blockCache;
 this.byteBuffAllocator = byteBuffAllocator;
   }
@@ -222,6 +233,7 @@ public class CacheConfig {
 this.dropBehindCompaction = cacheConf.dropBehindCompaction;
 this.blockCache = cacheConf.blockCache;
 this.byteBuffAllocator = cacheConf.byteBuffAllocator;
+this.heapUsageThreshold = cacheConf.heapUsageThreshold;
   }
 
   private CacheConfig() {
@@ -237,6 +249,7 @@ public class CacheConfig {
 this.dropBehindCompaction = false;
 this.blockCache = null;
 this.byteBuffAllocator = ByteBuffAllocator.HEAP;
+this.heapUsageThreshold = DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD;
   }
 
   /**
@@ -386,6 +399,17 @@ public class CacheConfig {
 return false;
   }
 
+  /**
+   * Checks if the current heap usage is below the threshold configured by
+   * "hbase.rs.prefetchheapusage" (0.8 by default).
+   */
+  public boolean isHeapUsageBelowThreshold() {
+double total = Runtime.getRuntime().maxMemory();
+double available = Runtime.getRuntime().freeMemory();
+double usedRatio = 1d - (available / total);
+return heapUsageThreshold > usedRatio;
+  }
+
   /**
* If we make sure the block could not be cached, we will not acquire the 
lock otherwise we will
* acquire lock
@@ -413,6 +437,10 @@ public class CacheConfig {
 return this.byteBuffAllocator;
   }
 
+  public double getHeapUsageThreshold() {
+return heapUsageThreshold;
+  }
+
   private long getCacheCompactedBlocksOnWriteThreshold(Configuration conf) {
 long cacheCompactedBlocksOnWriteThreshold =
   conf.getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY,
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileP

(hbase) branch master updated: HBASE-28303 Interrupt cache prefetch thread when a heap usage threshold is reached (#5615)

2024-01-26 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 73cb0dddf8b HBASE-28303 Interrupt cache prefetch thread when a heap 
usage threshold is reached (#5615)
73cb0dddf8b is described below

commit 73cb0dddf8bcd0768e2e8eb7cf9d1bc3eddc1ea1
Author: Wellington Ramos Chevreuil 
AuthorDate: Fri Jan 26 10:03:42 2024 +

HBASE-28303 Interrupt cache prefetch thread when a heap usage threshold is 
reached (#5615)

Signed-off-by: Tak Lon (Stephen) Wu 
Signed-off-by: Peter Somogyi 
---
 .../apache/hadoop/hbase/io/hfile/CacheConfig.java  | 28 
 .../hadoop/hbase/io/hfile/HFilePreadReader.java| 24 ++
 .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 38 ++
 .../io/hfile/bucket/TestBucketCachePersister.java  | 16 +
 4 files changed, 92 insertions(+), 14 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 4587eced616..f89a6194cef 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -99,6 +99,12 @@ public class CacheConfig {
   public static final String BUCKETCACHE_PERSIST_INTERVAL_KEY =
 "hbase.bucketcache.persist.intervalinmillis";
 
+  /**
+   * Configuration key to set the heap usage threshold limit once prefetch 
threads should be
+   * interrupted.
+   */
+  public static final String PREFETCH_HEAP_USAGE_THRESHOLD = 
"hbase.rs.prefetchheapusage";
+
   // Defaults
   public static final boolean DEFAULT_CACHE_DATA_ON_READ = true;
   public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
@@ -111,6 +117,7 @@ public class CacheConfig {
   public static final boolean DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE = false;
   public static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true;
   public static final long DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD = 
Long.MAX_VALUE;
+  public static final double DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD = 1d;
 
   /**
* Whether blocks should be cached on read (default is on if there is a 
cache but this can be
@@ -157,6 +164,8 @@ public class CacheConfig {
 
   private final ByteBuffAllocator byteBuffAllocator;
 
+  private final double heapUsageThreshold;
+
   /**
* Create a cache configuration using the specified configuration object and 
defaults for family
* level settings. Only use if no column family context.
@@ -201,6 +210,8 @@ public class CacheConfig {
 this.cacheCompactedDataOnWrite =
   conf.getBoolean(CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, 
DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE);
 this.cacheCompactedDataOnWriteThreshold = 
getCacheCompactedBlocksOnWriteThreshold(conf);
+this.heapUsageThreshold =
+  conf.getDouble(PREFETCH_HEAP_USAGE_THRESHOLD, 
DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD);
 this.blockCache = blockCache;
 this.byteBuffAllocator = byteBuffAllocator;
   }
@@ -222,6 +233,7 @@ public class CacheConfig {
 this.dropBehindCompaction = cacheConf.dropBehindCompaction;
 this.blockCache = cacheConf.blockCache;
 this.byteBuffAllocator = cacheConf.byteBuffAllocator;
+this.heapUsageThreshold = cacheConf.heapUsageThreshold;
   }
 
   private CacheConfig() {
@@ -237,6 +249,7 @@ public class CacheConfig {
 this.dropBehindCompaction = false;
 this.blockCache = null;
 this.byteBuffAllocator = ByteBuffAllocator.HEAP;
+this.heapUsageThreshold = DEFAULT_PREFETCH_HEAP_USAGE_THRESHOLD;
   }
 
   /**
@@ -386,6 +399,17 @@ public class CacheConfig {
 return false;
   }
 
+  /**
+   * Checks if the current heap usage is below the threshold configured by
+   * "hbase.rs.prefetchheapusage" (0.8 by default).
+   */
+  public boolean isHeapUsageBelowThreshold() {
+double total = Runtime.getRuntime().maxMemory();
+double available = Runtime.getRuntime().freeMemory();
+double usedRatio = 1d - (available / total);
+return heapUsageThreshold > usedRatio;
+  }
+
   /**
* If we make sure the block could not be cached, we will not acquire the 
lock otherwise we will
* acquire lock
@@ -413,6 +437,10 @@ public class CacheConfig {
 return this.byteBuffAllocator;
   }
 
+  public double getHeapUsageThreshold() {
+return heapUsageThreshold;
+  }
+
   private long getCacheCompactedBlocksOnWriteThreshold(Configuration conf) {
 long cacheCompactedBlocksOnWriteThreshold =
   conf.getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY,
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileP

(hbase) branch branch-2.4 updated: HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags (#5581)

2024-01-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 99acdff71ba HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to 
jdk11_jvm_flags (#5581)
99acdff71ba is described below

commit 99acdff71ba8c116ab557c34df130d56185adb80
Author: mrzhao 
AuthorDate: Fri Jan 5 17:15:17 2024 +0800

HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags 
(#5581)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Nihal Jain 
Change-Id: I9ce1e4d2fed969f7e63de7f48ec474d8edb9d55e
---
 bin/hbase | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/bin/hbase b/bin/hbase
index a04975ed8ef..2bbe2d161ba 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -484,7 +484,7 @@ add_jdk11_deps_to_classpath() {
 }
 
 add_jdk11_jvm_flags() {
-  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.nio=ALL-UNNAMED 
--add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens 
java.base/java.lang=ALL-UNNAMED --add-opens 
java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports 
java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports [...]
+  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED 
--add-opens java.base/java.nio=ALL-UNNAMED --add-opens 
java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED 
--add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/s [...]
 }
 
 #Add the development env class path stuff



(hbase) branch branch-2.5 updated: HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags (#5581)

2024-01-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
 new 504532a77ff HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to 
jdk11_jvm_flags (#5581)
504532a77ff is described below

commit 504532a77ffafdc4f0cf39db6c483c51adec20fd
Author: mrzhao 
AuthorDate: Fri Jan 5 17:15:17 2024 +0800

HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags 
(#5581)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Nihal Jain 
Change-Id: I9ce1e4d2fed969f7e63de7f48ec474d8edb9d55e
---
 bin/hbase | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/bin/hbase b/bin/hbase
index 64be92b217d..c30a5c6aa7a 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -491,7 +491,7 @@ add_jdk11_deps_to_classpath() {
 }
 
 add_jdk11_jvm_flags() {
-  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.nio=ALL-UNNAMED 
--add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens 
java.base/java.lang=ALL-UNNAMED --add-opens 
java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports 
java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports [...]
+  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED 
--add-opens java.base/java.nio=ALL-UNNAMED --add-opens 
java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED 
--add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/s [...]
 }
 
 add_opentelemetry_agent() {



(hbase) branch branch-2.6 updated: HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags (#5581)

2024-01-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new e67c54d6137 HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to 
jdk11_jvm_flags (#5581)
e67c54d6137 is described below

commit e67c54d61372725561f671599e4798ee040eb500
Author: mrzhao 
AuthorDate: Fri Jan 5 17:15:17 2024 +0800

HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags 
(#5581)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Nihal Jain 
Change-Id: I9ce1e4d2fed969f7e63de7f48ec474d8edb9d55e
---
 bin/hbase | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/bin/hbase b/bin/hbase
index 4a5476acd13..73e582fa3ad 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -496,7 +496,7 @@ add_jdk11_deps_to_classpath() {
 }
 
 add_jdk11_jvm_flags() {
-  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.nio=ALL-UNNAMED 
--add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens 
java.base/java.lang=ALL-UNNAMED --add-opens 
java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports 
java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports [...]
+  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED 
--add-opens java.base/java.nio=ALL-UNNAMED --add-opens 
java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED 
--add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/s [...]
 }
 
 add_opentelemetry_agent() {



(hbase) branch branch-2 updated: HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags (#5581)

2024-01-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 213bc8ca367 HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to 
jdk11_jvm_flags (#5581)
213bc8ca367 is described below

commit 213bc8ca367e64ab7ae997af20852a8adb4e2128
Author: mrzhao 
AuthorDate: Fri Jan 5 17:15:17 2024 +0800

HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags 
(#5581)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Nihal Jain 
Change-Id: I9ce1e4d2fed969f7e63de7f48ec474d8edb9d55e
---
 bin/hbase | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/bin/hbase b/bin/hbase
index 4a5476acd13..73e582fa3ad 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -496,7 +496,7 @@ add_jdk11_deps_to_classpath() {
 }
 
 add_jdk11_jvm_flags() {
-  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.nio=ALL-UNNAMED 
--add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens 
java.base/java.lang=ALL-UNNAMED --add-opens 
java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports 
java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports [...]
+  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED 
--add-opens java.base/java.nio=ALL-UNNAMED --add-opens 
java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED 
--add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/s [...]
 }
 
 add_opentelemetry_agent() {



(hbase) branch branch-3 updated: HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags (#5581)

2024-01-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new b23b7e97b52 HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to 
jdk11_jvm_flags (#5581)
b23b7e97b52 is described below

commit b23b7e97b524a67e6b487f0d4fcb2c7b9830de31
Author: mrzhao 
AuthorDate: Fri Jan 5 17:15:17 2024 +0800

HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags 
(#5581)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Nihal Jain 
Change-Id: I9ce1e4d2fed969f7e63de7f48ec474d8edb9d55e
---
 bin/hbase | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/bin/hbase b/bin/hbase
index 60cfb8afef9..e329c507089 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -496,7 +496,7 @@ add_jdk11_deps_to_classpath() {
 }
 
 add_jdk11_jvm_flags() {
-  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.nio=ALL-UNNAMED 
--add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens 
java.base/java.lang=ALL-UNNAMED --add-opens 
java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports 
java.security.jgss/sun.security.krb5=ALL-UNNAMED"
+  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED 
--add-opens java.base/java.nio=ALL-UNNAMED --add-opens 
java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED 
--add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/s [...]
 }
 
 add_opentelemetry_agent() {
@@ -786,7 +786,7 @@ fi
 # Add lib/jdk11 jars to the classpath
 
 if [ "${DEBUG}" = "true" ]; then
-  echo "Deciding on addition of lib/jdk11 jars to the classpath"
+  echo "Deciding on addition of lib/jdk11 jars to the classpath and setting 
JVM module flags"
 fi
 
 addJDK11Jars=false
@@ -879,6 +879,7 @@ export CLASSPATH
 if [ "${DEBUG}" = "true" ]; then
   echo "classpath=${CLASSPATH}" >&2
   HBASE_OPTS="${HBASE_OPTS} -Xdiag"
+  echo "HBASE_OPTS=${HBASE_OPTS}"
 fi
 
 # resolve the command arguments



(hbase) branch master updated: HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags (#5581)

2024-01-05 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 119885415c2 HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to 
jdk11_jvm_flags (#5581)
119885415c2 is described below

commit 119885415c2f1770d95fdf832d249cbf7a0c80f4
Author: mrzhao 
AuthorDate: Fri Jan 5 17:15:17 2024 +0800

HBASE-28259 Add java.base/java.io=ALL-UNNAMED open to jdk11_jvm_flags 
(#5581)

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Nihal Jain 
---
 bin/hbase | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/bin/hbase b/bin/hbase
index 30b4e94a89a..e329c507089 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -496,7 +496,7 @@ add_jdk11_deps_to_classpath() {
 }
 
 add_jdk11_jvm_flags() {
-  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.nio=ALL-UNNAMED 
--add-opens java.base/sun.nio.ch=ALL-UNNAMED --add-opens 
java.base/java.lang=ALL-UNNAMED --add-opens 
java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports 
java.security.jgss/sun.security.krb5=ALL-UNNAMED --add-exports [...]
+  HBASE_OPTS="$HBASE_OPTS 
-Dorg.apache.hbase.thirdparty.io.netty.tryReflectionSetAccessible=true 
--add-modules jdk.unsupported --add-opens java.base/java.io=ALL-UNNAMED 
--add-opens java.base/java.nio=ALL-UNNAMED --add-opens 
java.base/sun.nio.ch=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED 
--add-opens java.base/jdk.internal.ref=ALL-UNNAMED --add-opens 
java.base/java.lang.reflect=ALL-UNNAMED --add-exports 
java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.security.jgss/s [...]
 }
 
 add_opentelemetry_agent() {



(hbase) branch branch-2 updated: HBASE-28246 Expose region cached size over JMX metrics and report in the RS UI (#5565)

2023-12-14 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 28ca7205d03 HBASE-28246 Expose region cached size over JMX metrics and 
report in the RS UI (#5565)
28ca7205d03 is described below

commit 28ca7205d0374063093708fae0bf5b19efd5760e
Author: Wellington Ramos Chevreuil 
AuthorDate: Thu Dec 14 21:10:18 2023 +

HBASE-28246 Expose region cached size over JMX metrics and report in the RS 
UI (#5565)

Signed-off-by: Peter Somogyi 
---
 .../regionserver/MetricsRegionServerSource.java|  2 +
 .../hbase/regionserver/MetricsRegionWrapper.java   |  5 ++
 .../regionserver/MetricsRegionSourceImpl.java  |  4 +
 .../regionserver/TestMetricsRegionSourceImpl.java  |  5 ++
 .../hbase/tmpl/regionserver/RegionListTmpl.jamon   |  3 +
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   | 23 +++--
 .../hadoop/hbase/io/hfile/BlockCacheKey.java   | 15 
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  | 11 ++-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  2 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 97 +++---
 .../hadoop/hbase/regionserver/HRegionServer.java   | 25 +++---
 .../regionserver/MetricsRegionWrapperImpl.java | 17 +++-
 .../io/hfile/TestPrefetchWithBucketCache.java  | 27 +-
 .../regionserver/MetricsRegionWrapperStub.java |  5 ++
 14 files changed, 166 insertions(+), 75 deletions(-)

diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 5220f2d82b2..75269e57181 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -632,4 +632,6 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount";
   String SCANNER_LEASE_EXPIRED_COUNT_DESC =
 "Count of scanners which were expired due to scanner lease timeout";
+  String CURRENT_REGION_CACHE_RATIO = "currentRegionCacheRatio";
+  String CURRENT_REGION_CACHE_RATIO_DESC = "The percentage of caching 
completed for this region.";
 }
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index 3115603aabf..4d8a028d89b 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -65,6 +65,11 @@ public interface MetricsRegionWrapper {
*/
   long getStoreFileSize();
 
+  /**
+   * Gets the current cache % ratio for this region.
+   */
+  float getCurrentRegionCacheRatio();
+
   /**
* Get the total number of read requests that have been issued against this 
region
*/
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
index 0c20456e8cb..92ecaa58088 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -233,6 +233,10 @@ public class MetricsRegionSourceImpl implements 
MetricsRegionSource {
 this.regionWrapper.getNumReferenceFiles());
   mrb.addGauge(Interns.info(regionNamePrefix + 
MetricsRegionServerSource.STOREFILE_SIZE,
 MetricsRegionServerSource.STOREFILE_SIZE_DESC), 
this.regionWrapper.getStoreFileSize());
+  mrb.addGauge(
+Interns.info(regionNamePrefix + 
MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO,
+  MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO_DESC),
+this.regionWrapper.getCurrentRegionCacheRatio());
   mrb.addCounter(
 Interns.info(regionNamePrefix + 
MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT,
   MetricsRegionSource.COMPACTIONS_COMPLETED_DESC),
diff --git 
a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
 
b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
index 3fe116a11a7..2c8205085d1 100644
--- 
a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/test/j

(hbase) branch branch-3 updated: HBASE-28246 Expose region cached size over JMX metrics and report in the RS UI (#5565)

2023-12-14 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new e38f444b858 HBASE-28246 Expose region cached size over JMX metrics and 
report in the RS UI (#5565)
e38f444b858 is described below

commit e38f444b858589361b693e14c8facf4694f268db
Author: Wellington Ramos Chevreuil 
AuthorDate: Thu Dec 14 21:10:18 2023 +

HBASE-28246 Expose region cached size over JMX metrics and report in the RS 
UI (#5565)

Signed-off-by: Peter Somogyi 
---
 .../regionserver/MetricsRegionServerSource.java|  2 +
 .../regionserver/MetricsRegionSourceImpl.java  |  4 +
 .../hbase/regionserver/MetricsRegionWrapper.java   |  5 ++
 .../regionserver/TestMetricsRegionSourceImpl.java  |  5 ++
 .../hbase/tmpl/regionserver/RegionListTmpl.jamon   |  3 +
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   | 23 +++---
 .../hadoop/hbase/io/hfile/BlockCacheKey.java   | 15 
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  | 11 ++-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  2 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 92 +++---
 .../hadoop/hbase/regionserver/HRegionServer.java   | 25 +++---
 .../regionserver/MetricsRegionWrapperImpl.java | 17 +++-
 .../io/hfile/TestPrefetchWithBucketCache.java  | 27 ++-
 .../regionserver/MetricsRegionWrapperStub.java |  5 ++
 14 files changed, 164 insertions(+), 72 deletions(-)

diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index a53899c476f..8b64e793bed 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -639,4 +639,6 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount";
   String SCANNER_LEASE_EXPIRED_COUNT_DESC =
 "Count of scanners which were expired due to scanner lease timeout";
+  String CURRENT_REGION_CACHE_RATIO = "currentRegionCacheRatio";
+  String CURRENT_REGION_CACHE_RATIO_DESC = "The percentage of caching 
completed for this region.";
 }
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
index 410d775d7d3..2f4fbb431ab 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -233,6 +233,10 @@ public class MetricsRegionSourceImpl implements 
MetricsRegionSource {
 this.regionWrapper.getNumReferenceFiles());
   mrb.addGauge(Interns.info(regionNamePrefix + 
MetricsRegionServerSource.STOREFILE_SIZE,
 MetricsRegionServerSource.STOREFILE_SIZE_DESC), 
this.regionWrapper.getStoreFileSize());
+  mrb.addGauge(
+Interns.info(regionNamePrefix + 
MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO,
+  MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO_DESC),
+this.regionWrapper.getCurrentRegionCacheRatio());
   mrb.addCounter(
 Interns.info(regionNamePrefix + 
MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT,
   MetricsRegionSource.COMPACTIONS_COMPLETED_DESC),
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index 55ddc603979..3445faf7eaa 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -65,6 +65,11 @@ public interface MetricsRegionWrapper {
*/
   long getStoreFileSize();
 
+  /**
+   * Gets the current cache % ratio for this region.
+   */
+  float getCurrentRegionCacheRatio();
+
   /**
* Get the total number of read requests that have been issued against this 
region
*/
diff --git 
a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
 
b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
index 46b6405eb46..28286cc1c6e 100644
--- 
a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
+++ 
b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regio

(hbase) branch master updated: HBASE-28246 Expose region cached size over JMX metrics and report in the RS UI (#5565)

2023-12-14 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new e8762dfea46 HBASE-28246 Expose region cached size over JMX metrics and 
report in the RS UI (#5565)
e8762dfea46 is described below

commit e8762dfea4692db600f50e1224bc9cb4f4e09078
Author: Wellington Ramos Chevreuil 
AuthorDate: Thu Dec 14 21:10:18 2023 +

HBASE-28246 Expose region cached size over JMX metrics and report in the RS 
UI (#5565)

Signed-off-by: Peter Somogyi 
---
 .../regionserver/MetricsRegionServerSource.java|  2 +
 .../regionserver/MetricsRegionSourceImpl.java  |  4 +
 .../hbase/regionserver/MetricsRegionWrapper.java   |  5 ++
 .../regionserver/TestMetricsRegionSourceImpl.java  |  5 ++
 .../hbase/tmpl/regionserver/RegionListTmpl.jamon   |  3 +
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   | 23 +++---
 .../hadoop/hbase/io/hfile/BlockCacheKey.java   | 15 
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  | 11 ++-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java |  2 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 92 +++---
 .../hadoop/hbase/regionserver/HRegionServer.java   | 25 +++---
 .../regionserver/MetricsRegionWrapperImpl.java | 17 +++-
 .../io/hfile/TestPrefetchWithBucketCache.java  | 27 ++-
 .../regionserver/MetricsRegionWrapperStub.java |  5 ++
 14 files changed, 164 insertions(+), 72 deletions(-)

diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index a53899c476f..8b64e793bed 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -639,4 +639,6 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount";
   String SCANNER_LEASE_EXPIRED_COUNT_DESC =
 "Count of scanners which were expired due to scanner lease timeout";
+  String CURRENT_REGION_CACHE_RATIO = "currentRegionCacheRatio";
+  String CURRENT_REGION_CACHE_RATIO_DESC = "The percentage of caching 
completed for this region.";
 }
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
index 410d775d7d3..2f4fbb431ab 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -233,6 +233,10 @@ public class MetricsRegionSourceImpl implements 
MetricsRegionSource {
 this.regionWrapper.getNumReferenceFiles());
   mrb.addGauge(Interns.info(regionNamePrefix + 
MetricsRegionServerSource.STOREFILE_SIZE,
 MetricsRegionServerSource.STOREFILE_SIZE_DESC), 
this.regionWrapper.getStoreFileSize());
+  mrb.addGauge(
+Interns.info(regionNamePrefix + 
MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO,
+  MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO_DESC),
+this.regionWrapper.getCurrentRegionCacheRatio());
   mrb.addCounter(
 Interns.info(regionNamePrefix + 
MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT,
   MetricsRegionSource.COMPACTIONS_COMPLETED_DESC),
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index 55ddc603979..3445faf7eaa 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -65,6 +65,11 @@ public interface MetricsRegionWrapper {
*/
   long getStoreFileSize();
 
+  /**
+   * Gets the current cache % ratio for this region.
+   */
+  float getCurrentRegionCacheRatio();
+
   /**
* Get the total number of read requests that have been issued against this 
region
*/
diff --git 
a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
 
b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
index 46b6405eb46..28286cc1c6e 100644
--- 
a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
+++ 
b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regio

(hbase) branch master updated: HBASE-28251 [SFT] Add description for specifying SFT impl during snapshot recovery (#5570)

2023-12-11 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 25c639f9d6d HBASE-28251 [SFT] Add description for specifying SFT impl 
during snapshot recovery (#5570)
25c639f9d6d is described below

commit 25c639f9d6deb52367089e2631004f4d6862daea
Author: Wellington Ramos Chevreuil 
AuthorDate: Mon Dec 11 10:17:33 2023 +

HBASE-28251 [SFT] Add description for specifying SFT impl during snapshot 
recovery (#5570)

Signed-off-by: Duo Zhang 
Signed-off-by: Nihal Jain 
Signed-off-by: Peter Somogyi 
---
 .../asciidoc/_chapters/bulk_data_generator_tool.adoc  |  8 
 src/main/asciidoc/_chapters/store_file_tracking.adoc  | 19 +++
 2 files changed, 23 insertions(+), 4 deletions(-)

diff --git a/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc 
b/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc
index 3ac6ca69312..b04fcdeb726 100644
--- a/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc
+++ b/src/main/asciidoc/_chapters/bulk_data_generator_tool.adoc
@@ -18,8 +18,8 @@
  * limitations under the License.
  */
 
-
-== Bulk Data Generator Tool
+[[BulkDataGeneratorTool]]
+= Bulk Data Generator Tool
 :doctype: book
 :numbered:
 :toc: left
@@ -29,7 +29,7 @@
 This is a random data generator tool for HBase tables leveraging Hbase bulk 
load.
 It can create pre-splited HBase table and the generated data is *uniformly 
distributed* to all the regions of the table.
 
-=== How to Use
+== Usage
 
 [source]
 
@@ -53,7 +53,7 @@ hbase 
org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool -t TE
 hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool -t 
TEST_TABLE -mc 10 -r 100 -sc 10 -Dmapreduce.map.memory.mb=8192
 
 
-=== How it Works
+== Overview
 
  Table Schema
 Tool generates a HBase table with single column family, i.e. *cf* and 9 
columns i.e.
diff --git a/src/main/asciidoc/_chapters/store_file_tracking.adoc 
b/src/main/asciidoc/_chapters/store_file_tracking.adoc
index 74d802f386c..b6c1f7e7339 100644
--- a/src/main/asciidoc/_chapters/store_file_tracking.adoc
+++ b/src/main/asciidoc/_chapters/store_file_tracking.adoc
@@ -143,3 +143,22 @@ example, that would be as follows:
 
 alter 'my-table', CONFIGURATION => {'hbase.store.file-tracker.impl' => 'FILE'}
 
+
+### Specifying trackers during snapshot recovery
+
+It's also possible to specify a given store file tracking implementation when 
recovering a snapshot
+using the _CLONE_SFT_ option of  _clone_snasphot_ command. This is useful when 
recovering old
+snapshots, taken prior to a change in the global configuration, or if the 
snapshot has been
+imported from a different cluster that had a different store file tracking 
setting.
+Because snapshots preserve table and colum family descriptors, a simple 
restore would reload
+the original configuration, requiring the additional steps described above to 
convert the
+table/column family to the desired tracker implementation.
+An example of how to use _clone_snapshot_ to specify the *FILE* tracker 
implementation
+is shown below:
+
+
+clone_snapshot 'snapshotName', 'namespace:tableName', {CLONE_SFT=>'FILE'}
+
+
+NOTE: The option to specify the tracker during snapshot recovery is only 
available for the
+_clone_snapshot_ command. The _restore_snapshot_ command does not support this 
parameter.



(hbase) branch branch-2.4 updated: HBASE-28209: Create a jmx metrics to expose the oldWALs directory size

2023-12-08 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new ee2d21a4113 HBASE-28209: Create a jmx metrics to expose the oldWALs 
directory size
ee2d21a4113 is described below

commit ee2d21a41134a3e3ca627edcb0913ed5bc03723c
Author: vinayak hegde 
AuthorDate: Fri Dec 8 15:57:48 2023 +0530

HBASE-28209: Create a jmx metrics to expose the oldWALs directory size

Signed-off-by: Wellington Chevreuil 
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |  7 ++
 hbase-common/src/main/resources/hbase-default.xml  |  6 ++
 .../hadoop/hbase/master/MetricsMasterSource.java   |  2 +
 .../hadoop/hbase/master/MetricsMasterWrapper.java  |  5 ++
 .../hbase/master/MetricsMasterSourceImpl.java  |  4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java|  6 ++
 .../hadoop/hbase/master/MasterWalManager.java  | 12 +++
 .../hbase/master/MetricsMasterWrapperImpl.java |  8 ++
 .../hadoop/hbase/master/OldWALsDirSizeChore.java   | 53 +
 .../hadoop/hbase/master/TestMasterMetrics.java |  2 +
 .../hbase/master/TestMasterMetricsWrapper.java |  1 +
 .../hbase/master/TestOldWALsDirSizeChore.java  | 90 ++
 12 files changed, 195 insertions(+), 1 deletion(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 3766da718d5..89d041d6702 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -154,6 +154,13 @@ public final class HConstants {
   /** Default value for the balancer period */
   public static final int DEFAULT_HBASE_BALANCER_PERIOD = 30;
 
+  /** Config for the oldWALs directory size updater period */
+  public static final String HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD =
+"hbase.master.oldwals.dir.updater.period";
+
+  /** Default value for the oldWALs directory size updater period */
+  public static final int DEFAULT_HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD = 
30;
+
   /**
* Config key for enable/disable automatically separate child regions to 
different region servers
* in the procedure of split regions. One child will be kept to the server 
where parent region is
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 7fab04f4e2f..730e383e138 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -608,6 +608,12 @@ possible configurations would overwhelm and obscure the 
important.
 Period at which the region balancer runs in the Master, in
   milliseconds.
   
+  
+hbase.master.oldwals.dir.updater.period
+30
+Period at which the oldWALs directory size calculator/updater 
will run in the
+  Master, in milliseconds.
+  
   
 hbase.regions.slop
 0.001
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
index 5952bdc4d8e..7bbd57e98b4 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
@@ -66,6 +66,7 @@ public interface MetricsMasterSource extends BaseSource {
   String MERGE_PLAN_COUNT_NAME = "mergePlanCount";
 
   String CLUSTER_REQUESTS_NAME = "clusterRequests";
+  String OLD_WAL_DIR_SIZE_NAME = "oldWALsDirSize";
   String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
   String MASTER_START_TIME_DESC = "Master Start Time";
   String MASTER_FINISHED_INITIALIZATION_TIME_DESC =
@@ -85,6 +86,7 @@ public interface MetricsMasterSource extends BaseSource {
   String MERGE_PLAN_COUNT_DESC = "Number of Region Merge Plans executed";
 
   String SERVER_CRASH_METRIC_PREFIX = "serverCrash";
+  String OLD_WAL_DIR_SIZE_DESC = "size of old WALs directory in bytes";
 
   /**
* Increment the number of requests the cluster has seen.
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
index 051ad4335c2..ab8b4f5d949 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
@@ -146,4 +146,9 @@ public interface MetricsMasterWrapper {
* Get the time in Millis when the master finished initializing/becoming the 
active ma

(hbase) branch branch-2.6 updated: HBASE-28209: Create a jmx metrics to expose the oldWALs directory size

2023-12-08 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 7133958792f HBASE-28209: Create a jmx metrics to expose the oldWALs 
directory size
7133958792f is described below

commit 7133958792ffdef15653f7c562a183e628cf74ae
Author: vinayak hegde 
AuthorDate: Fri Dec 8 15:57:48 2023 +0530

HBASE-28209: Create a jmx metrics to expose the oldWALs directory size

Signed-off-by: Wellington Chevreuil 
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |  7 ++
 hbase-common/src/main/resources/hbase-default.xml  |  6 ++
 .../hadoop/hbase/master/MetricsMasterSource.java   |  2 +
 .../hadoop/hbase/master/MetricsMasterWrapper.java  |  5 ++
 .../hbase/master/MetricsMasterSourceImpl.java  |  4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java|  6 ++
 .../hadoop/hbase/master/MasterWalManager.java  | 12 +++
 .../hbase/master/MetricsMasterWrapperImpl.java |  8 ++
 .../hadoop/hbase/master/OldWALsDirSizeChore.java   | 53 +
 .../hadoop/hbase/master/TestMasterMetrics.java |  2 +
 .../hbase/master/TestMasterMetricsWrapper.java |  1 +
 .../hbase/master/TestOldWALsDirSizeChore.java  | 90 ++
 12 files changed, 195 insertions(+), 1 deletion(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 4e6fe9874aa..b919ac9c5e5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -154,6 +154,13 @@ public final class HConstants {
   /** Default value for the balancer period */
   public static final int DEFAULT_HBASE_BALANCER_PERIOD = 30;
 
+  /** Config for the oldWALs directory size updater period */
+  public static final String HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD =
+"hbase.master.oldwals.dir.updater.period";
+
+  /** Default value for the oldWALs directory size updater period */
+  public static final int DEFAULT_HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD = 
30;
+
   /**
* Config key for enable/disable automatically separate child regions to 
different region servers
* in the procedure of split regions. One child will be kept to the server 
where parent region is
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 3c293d9c6cd..95dea3ec439 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -617,6 +617,12 @@ possible configurations would overwhelm and obscure the 
important.
 Period at which the region balancer runs in the Master, in
   milliseconds.
   
+  
+hbase.master.oldwals.dir.updater.period
+30
+Period at which the oldWALs directory size calculator/updater 
will run in the
+  Master, in milliseconds.
+  
   
 hbase.regions.slop
 0.2
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
index 5952bdc4d8e..7bbd57e98b4 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
@@ -66,6 +66,7 @@ public interface MetricsMasterSource extends BaseSource {
   String MERGE_PLAN_COUNT_NAME = "mergePlanCount";
 
   String CLUSTER_REQUESTS_NAME = "clusterRequests";
+  String OLD_WAL_DIR_SIZE_NAME = "oldWALsDirSize";
   String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
   String MASTER_START_TIME_DESC = "Master Start Time";
   String MASTER_FINISHED_INITIALIZATION_TIME_DESC =
@@ -85,6 +86,7 @@ public interface MetricsMasterSource extends BaseSource {
   String MERGE_PLAN_COUNT_DESC = "Number of Region Merge Plans executed";
 
   String SERVER_CRASH_METRIC_PREFIX = "serverCrash";
+  String OLD_WAL_DIR_SIZE_DESC = "size of old WALs directory in bytes";
 
   /**
* Increment the number of requests the cluster has seen.
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
index 051ad4335c2..ab8b4f5d949 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
@@ -146,4 +146,9 @@ public interface MetricsMasterWrapper {
* Get the time in Millis when the master finished initializing/becoming the 
active ma

(hbase) branch branch-2 updated: HBASE-28209: Create a jmx metrics to expose the oldWALs directory size

2023-12-08 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 1e5e8b35bb1 HBASE-28209: Create a jmx metrics to expose the oldWALs 
directory size
1e5e8b35bb1 is described below

commit 1e5e8b35bb1fd037269094f9416e7413393f988d
Author: vinayak hegde 
AuthorDate: Fri Dec 8 15:57:48 2023 +0530

HBASE-28209: Create a jmx metrics to expose the oldWALs directory size

Signed-off-by: Wellington Chevreuil 
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |  7 ++
 hbase-common/src/main/resources/hbase-default.xml  |  6 ++
 .../hadoop/hbase/master/MetricsMasterSource.java   |  2 +
 .../hadoop/hbase/master/MetricsMasterWrapper.java  |  5 ++
 .../hbase/master/MetricsMasterSourceImpl.java  |  4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java|  6 ++
 .../hadoop/hbase/master/MasterWalManager.java  | 12 +++
 .../hbase/master/MetricsMasterWrapperImpl.java |  8 ++
 .../hadoop/hbase/master/OldWALsDirSizeChore.java   | 53 +
 .../hadoop/hbase/master/TestMasterMetrics.java |  2 +
 .../hbase/master/TestMasterMetricsWrapper.java |  1 +
 .../hbase/master/TestOldWALsDirSizeChore.java  | 90 ++
 12 files changed, 195 insertions(+), 1 deletion(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 4e6fe9874aa..b919ac9c5e5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -154,6 +154,13 @@ public final class HConstants {
   /** Default value for the balancer period */
   public static final int DEFAULT_HBASE_BALANCER_PERIOD = 30;
 
+  /** Config for the oldWALs directory size updater period */
+  public static final String HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD =
+"hbase.master.oldwals.dir.updater.period";
+
+  /** Default value for the oldWALs directory size updater period */
+  public static final int DEFAULT_HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD = 
30;
+
   /**
* Config key for enable/disable automatically separate child regions to 
different region servers
* in the procedure of split regions. One child will be kept to the server 
where parent region is
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 3c293d9c6cd..95dea3ec439 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -617,6 +617,12 @@ possible configurations would overwhelm and obscure the 
important.
 Period at which the region balancer runs in the Master, in
   milliseconds.
   
+  
+hbase.master.oldwals.dir.updater.period
+30
+Period at which the oldWALs directory size calculator/updater 
will run in the
+  Master, in milliseconds.
+  
   
 hbase.regions.slop
 0.2
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
index 5952bdc4d8e..7bbd57e98b4 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
@@ -66,6 +66,7 @@ public interface MetricsMasterSource extends BaseSource {
   String MERGE_PLAN_COUNT_NAME = "mergePlanCount";
 
   String CLUSTER_REQUESTS_NAME = "clusterRequests";
+  String OLD_WAL_DIR_SIZE_NAME = "oldWALsDirSize";
   String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
   String MASTER_START_TIME_DESC = "Master Start Time";
   String MASTER_FINISHED_INITIALIZATION_TIME_DESC =
@@ -85,6 +86,7 @@ public interface MetricsMasterSource extends BaseSource {
   String MERGE_PLAN_COUNT_DESC = "Number of Region Merge Plans executed";
 
   String SERVER_CRASH_METRIC_PREFIX = "serverCrash";
+  String OLD_WAL_DIR_SIZE_DESC = "size of old WALs directory in bytes";
 
   /**
* Increment the number of requests the cluster has seen.
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
index 051ad4335c2..ab8b4f5d949 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java
@@ -146,4 +146,9 @@ public interface MetricsMasterWrapper {
* Get the time in Millis when the master finished initializing/becoming the 
active master
*

(hbase) branch branch-3 updated: HBASE-28209: Create a jmx metrics to expose the oldWALs directory size (#5528)

2023-12-04 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 8f8b9b83ca6 HBASE-28209: Create a jmx metrics to expose the oldWALs 
directory size (#5528)
8f8b9b83ca6 is described below

commit 8f8b9b83ca626fd344508b238512dc519b839182
Author: vinayak hegde 
AuthorDate: Mon Dec 4 16:33:34 2023 +0530

HBASE-28209: Create a jmx metrics to expose the oldWALs directory size 
(#5528)

Signed-off-by: Wellington Chevreuil 
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |  7 ++
 hbase-common/src/main/resources/hbase-default.xml  |  6 ++
 .../hadoop/hbase/master/MetricsMasterSource.java   |  2 +
 .../hbase/master/MetricsMasterSourceImpl.java  |  4 +-
 .../hadoop/hbase/master/MetricsMasterWrapper.java  |  5 ++
 .../org/apache/hadoop/hbase/master/HMaster.java|  6 ++
 .../hadoop/hbase/master/MasterWalManager.java  | 12 +++
 .../hbase/master/MetricsMasterWrapperImpl.java |  8 ++
 .../hadoop/hbase/master/OldWALsDirSizeChore.java   | 53 +
 .../hadoop/hbase/master/TestMasterMetrics.java |  2 +
 .../hbase/master/TestMasterMetricsWrapper.java |  1 +
 .../hbase/master/TestOldWALsDirSizeChore.java  | 90 ++
 12 files changed, 195 insertions(+), 1 deletion(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 2aa9ecf69ec..5b53d2b2c0d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -153,6 +153,13 @@ public final class HConstants {
   /** Default value for the balancer period */
   public static final int DEFAULT_HBASE_BALANCER_PERIOD = 30;
 
+  /** Config for the oldWALs directory size updater period */
+  public static final String HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD =
+"hbase.master.oldwals.dir.updater.period";
+
+  /** Default value for the oldWALs directory size updater period */
+  public static final int DEFAULT_HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD = 
30;
+
   /**
* Config key for enable/disable automatically separate child regions to 
different region servers
* in the procedure of split regions. One child will be kept to the server 
where parent region is
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 17a9853d2ad..1bf63b136e0 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -606,6 +606,12 @@ possible configurations would overwhelm and obscure the 
important.
 Period at which the region balancer runs in the Master, in
   milliseconds.
   
+  
+hbase.master.oldwals.dir.updater.period
+30
+Period at which the oldWALs directory size calculator/updater 
will run in the
+  Master, in milliseconds.
+  
   
 hbase.regions.slop
 0.2
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
index 4a5b97ae66b..d606ed63088 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
@@ -70,6 +70,7 @@ public interface MetricsMasterSource extends BaseSource {
   String CLUSTER_REQUESTS_NAME = "clusterRequests";
   String CLUSTER_READ_REQUESTS_NAME = "clusterReadRequests";
   String CLUSTER_WRITE_REQUESTS_NAME = "clusterWriteRequests";
+  String OLD_WAL_DIR_SIZE_NAME = "oldWALsDirSize";
   String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
   String MASTER_START_TIME_DESC = "Master Start Time";
   String MASTER_FINISHED_INITIALIZATION_TIME_DESC =
@@ -91,6 +92,7 @@ public interface MetricsMasterSource extends BaseSource {
   String OFFLINE_REGION_COUNT_DESC = "Number of Offline Regions";
 
   String SERVER_CRASH_METRIC_PREFIX = "serverCrash";
+  String OLD_WAL_DIR_SIZE_DESC = "size of old WALs directory in bytes";
 
   /**
* Increment the number of requests the cluster has seen.
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
index e0abf77bea4..011e66312aa 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
@@ -129,7 +129,9 @@ public class MetricsMaste

(hbase) branch master updated: HBASE-28209: Create a jmx metrics to expose the oldWALs directory size (#5528)

2023-12-04 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new bc0f7a41b38 HBASE-28209: Create a jmx metrics to expose the oldWALs 
directory size (#5528)
bc0f7a41b38 is described below

commit bc0f7a41b38de453bf3cc188d54d227e0b7f3ba9
Author: vinayak hegde 
AuthorDate: Mon Dec 4 16:33:34 2023 +0530

HBASE-28209: Create a jmx metrics to expose the oldWALs directory size 
(#5528)

Signed-off-by: Wellington Chevreuil 
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |  7 ++
 hbase-common/src/main/resources/hbase-default.xml  |  6 ++
 .../hadoop/hbase/master/MetricsMasterSource.java   |  2 +
 .../hbase/master/MetricsMasterSourceImpl.java  |  4 +-
 .../hadoop/hbase/master/MetricsMasterWrapper.java  |  5 ++
 .../org/apache/hadoop/hbase/master/HMaster.java|  6 ++
 .../hadoop/hbase/master/MasterWalManager.java  | 12 +++
 .../hbase/master/MetricsMasterWrapperImpl.java |  8 ++
 .../hadoop/hbase/master/OldWALsDirSizeChore.java   | 53 +
 .../hadoop/hbase/master/TestMasterMetrics.java |  2 +
 .../hbase/master/TestMasterMetricsWrapper.java |  1 +
 .../hbase/master/TestOldWALsDirSizeChore.java  | 90 ++
 src/main/asciidoc/_chapters/hbase-default.adoc | 11 +++
 13 files changed, 206 insertions(+), 1 deletion(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 2aa9ecf69ec..5b53d2b2c0d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -153,6 +153,13 @@ public final class HConstants {
   /** Default value for the balancer period */
   public static final int DEFAULT_HBASE_BALANCER_PERIOD = 30;
 
+  /** Config for the oldWALs directory size updater period */
+  public static final String HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD =
+"hbase.master.oldwals.dir.updater.period";
+
+  /** Default value for the oldWALs directory size updater period */
+  public static final int DEFAULT_HBASE_OLDWAL_DIR_SIZE_UPDATER_PERIOD = 
30;
+
   /**
* Config key for enable/disable automatically separate child regions to 
different region servers
* in the procedure of split regions. One child will be kept to the server 
where parent region is
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 17a9853d2ad..1bf63b136e0 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -606,6 +606,12 @@ possible configurations would overwhelm and obscure the 
important.
 Period at which the region balancer runs in the Master, in
   milliseconds.
   
+  
+hbase.master.oldwals.dir.updater.period
+30
+Period at which the oldWALs directory size calculator/updater 
will run in the
+  Master, in milliseconds.
+  
   
 hbase.regions.slop
 0.2
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
index 4a5b97ae66b..d606ed63088 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
@@ -70,6 +70,7 @@ public interface MetricsMasterSource extends BaseSource {
   String CLUSTER_REQUESTS_NAME = "clusterRequests";
   String CLUSTER_READ_REQUESTS_NAME = "clusterReadRequests";
   String CLUSTER_WRITE_REQUESTS_NAME = "clusterWriteRequests";
+  String OLD_WAL_DIR_SIZE_NAME = "oldWALsDirSize";
   String MASTER_ACTIVE_TIME_DESC = "Master Active Time";
   String MASTER_START_TIME_DESC = "Master Start Time";
   String MASTER_FINISHED_INITIALIZATION_TIME_DESC =
@@ -91,6 +92,7 @@ public interface MetricsMasterSource extends BaseSource {
   String OFFLINE_REGION_COUNT_DESC = "Number of Offline Regions";
 
   String SERVER_CRASH_METRIC_PREFIX = "serverCrash";
+  String OLD_WAL_DIR_SIZE_DESC = "size of old WALs directory in bytes";
 
   /**
* Increment the number of requests the cluster has seen.
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
index e0abf77bea4..011e66312aa 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
@

(hbase) 01/02: HBASE-27998 Enhance region metrics to include prefetch ratio for each… (#5342)

2023-11-30 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 27dbcc4b300f379bcab2f39adb75a567b2f81a48
Author: Rahul Agarkar 
AuthorDate: Tue Aug 29 15:36:23 2023 +0530

HBASE-27998 Enhance region metrics to include prefetch ratio for each… 
(#5342)

Signed-off-by: Wellington Chevreuil 
(cherry picked from commit 9e74cc0d655badccdc300bc485b7ffb02b0606a8)
---
 .../org/apache/hadoop/hbase/RegionMetrics.java |   6 ++
 .../apache/hadoop/hbase/RegionMetricsBuilder.java  |  39 +++-
 .../org/apache/hadoop/hbase/ServerMetrics.java |   6 ++
 .../apache/hadoop/hbase/ServerMetricsBuilder.java  |  20 +++-
 .../src/main/protobuf/BucketCacheEntry.proto   |   8 +-
 .../src/main/protobuf/ClusterStatus.proto  |  11 +++
 .../src/main/protobuf/PrefetchPersistence.proto|  36 
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |   8 +-
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |  11 ++-
 .../hadoop/hbase/io/hfile/HFilePreadReader.java|   5 +-
 .../hadoop/hbase/io/hfile/PrefetchProtoUtils.java  |  53 ---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 101 +
 .../hbase/io/hfile/bucket/BucketProtoUtils.java|  26 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  40 +++-
 .../org/apache/hadoop/hbase/TestServerMetrics.java |  10 +-
 .../hbase/master/TestRegionsRecoveryChore.java |  14 +++
 16 files changed, 264 insertions(+), 130 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java
index d915e7a32ca..26022d98fa5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java
@@ -132,4 +132,10 @@ public interface RegionMetrics {
 
   /** Returns the compaction state of this region */
   CompactionState getCompactionState();
+
+  /** Returns the total size of the hfiles in the region */
+  Size getRegionSizeMB();
+
+  /** Returns current prefetch ratio of this region on this server */
+  float getCurrentRegionCachedRatio();
 }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java
index 15a9c48bfbe..b18e8aa8e1a 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java
@@ -79,7 +79,8 @@ public final class RegionMetricsBuilder {
   ClusterStatusProtos.StoreSequenceId::getSequenceId)))
   .setUncompressedStoreFileSize(
 new Size(regionLoadPB.getStoreUncompressedSizeMB(), 
Size.Unit.MEGABYTE))
-  .build();
+  .setRegionSizeMB(new Size(regionLoadPB.getRegionSizeMB(), 
Size.Unit.MEGABYTE))
+  
.setCurrentRegionCachedRatio(regionLoadPB.getCurrentRegionCachedRatio()).build();
   }
 
   private static List
@@ -118,7 +119,8 @@ public final class RegionMetricsBuilder {
   
.addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId()))
   .setStoreUncompressedSizeMB(
 (int) 
regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))
-  .build();
+  .setRegionSizeMB((int) 
regionMetrics.getRegionSizeMB().get(Size.Unit.MEGABYTE))
+  
.setCurrentRegionCachedRatio(regionMetrics.getCurrentRegionCachedRatio()).build();
   }
 
   public static RegionMetricsBuilder newBuilder(byte[] name) {
@@ -151,6 +153,8 @@ public final class RegionMetricsBuilder {
   private long blocksLocalWithSsdWeight;
   private long blocksTotalWeight;
   private CompactionState compactionState;
+  private Size regionSizeMB = Size.ZERO;
+  private float currentRegionCachedRatio;
 
   private RegionMetricsBuilder(byte[] name) {
 this.name = name;
@@ -281,6 +285,16 @@ public final class RegionMetricsBuilder {
 return this;
   }
 
+  public RegionMetricsBuilder setRegionSizeMB(Size value) {
+this.regionSizeMB = value;
+return this;
+  }
+
+  public RegionMetricsBuilder setCurrentRegionCachedRatio(float value) {
+this.currentRegionCachedRatio = value;
+return this;
+  }
+
   public RegionMetrics build() {
 return new RegionMetricsImpl(name, storeCount, storeFileCount, 
storeRefCount,
   maxCompactedStoreFileRefCount, compactingCellCount, compactedCellCount, 
storeFileSize,
@@ -288,7 +302,7 @@ public final class RegionMetricsBuilder {
   uncompressedStoreFileSize, writeRequestCount, readRequestCount, 
filteredReadRequestCount,
   completedSequenceId, storeSequenceIds, dataLocality, 
lastMajorCompactionTimestamp,
   dataLocalityForSsd, blocksLocalWeight, blocksLocalWithSsdWeight, 
blocksTotalWeight,
-  compactionState);
+  compactionState, regionSizeMB

(hbase) branch branch-2.6 updated (a91e93349f6 -> b74429afeea)

2023-11-30 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a change to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


from a91e93349f6 HBASE-28211 BucketCache.blocksByHFile may leak on 
allocationFailure or if we reach io errors tolerated (#5530)
 new 27dbcc4b300 HBASE-27998 Enhance region metrics to include prefetch 
ratio for each… (#5342)
 new b74429afeea HBASE-27999 Implement cache prefetch aware load balancer 
(#5527)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../java/org/apache/hadoop/hbase/RegionLoad.java   |  10 +
 .../org/apache/hadoop/hbase/RegionMetrics.java |   6 +
 .../apache/hadoop/hbase/RegionMetricsBuilder.java  |  39 +-
 .../java/org/apache/hadoop/hbase/ServerLoad.java   |   5 +
 .../org/apache/hadoop/hbase/ServerMetrics.java |   6 +
 .../apache/hadoop/hbase/ServerMetricsBuilder.java  |  20 +-
 .../java/org/apache/hadoop/hbase/HConstants.java   |  12 +
 .../src/main/protobuf/BucketCacheEntry.proto   |   8 +-
 .../src/main/protobuf/ClusterStatus.proto  |  11 +
 .../src/main/protobuf/PrefetchPersistence.proto|  36 --
 ...lancerWithStochasticLoadBalancerAsInternal.java |   2 +
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |   8 +-
 .../hadoop/hbase/io/hfile/BlockCacheFactory.java   |  13 +-
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |  11 +-
 .../hadoop/hbase/io/hfile/HFilePreadReader.java|   5 +-
 .../hadoop/hbase/io/hfile/PrefetchProtoUtils.java  |  53 ---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 115 -
 .../hbase/io/hfile/bucket/BucketProtoUtils.java|  26 +-
 .../hbase/io/hfile/bucket/PersistentIOEngine.java  |   4 +-
 .../master/balancer/BalancerClusterState.java  | 156 ++-
 .../hbase/master/balancer/BalancerRegionLoad.java  |  12 +
 .../hbase/master/balancer/BaseLoadBalancer.java|   3 +-
 .../master/balancer/CacheAwareLoadBalancer.java| 479 +
 .../master/balancer/StochasticLoadBalancer.java|  51 ++-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  40 +-
 .../org/apache/hadoop/hbase/TestServerMetrics.java |  10 +-
 .../hbase/master/TestRegionsRecoveryChore.java |  14 +
 .../hbase/master/balancer/BalancerTestBase.java|  14 +
 .../balancer/TestCacheAwareLoadBalancer.java   | 400 +
 .../TestCacheAwareLoadBalancerCostFunctions.java   | 327 ++
 .../balancer/TestStochasticLoadBalancer.java   |   2 +
 31 files changed, 1728 insertions(+), 170 deletions(-)
 delete mode 100644 
hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto
 delete mode 100644 
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java
 create mode 100644 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/CacheAwareLoadBalancer.java
 create mode 100644 
hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancer.java
 create mode 100644 
hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestCacheAwareLoadBalancerCostFunctions.java



(hbase) 02/02: HBASE-27999 Implement cache prefetch aware load balancer (#5527)

2023-11-30 Thread wchevreuil
This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit b74429afeea57782835ff17aa6ffdff1f75af0a3
Author: Wellington Ramos Chevreuil 
AuthorDate: Wed Nov 22 00:12:42 2023 +0530

HBASE-27999 Implement cache prefetch aware load balancer (#5527)

this commit is part of the rebase of HBASE-28186

Signed-off-by: Wellington Chevreuil 
Signed-off-by: Tak Lon (Stephen) Wu 
Co-authored-by: Rahul Agarkar 

(cherry picked from commit e799ee08be07d43f802c5f2624614630d80a0c9c)
---
 .../java/org/apache/hadoop/hbase/RegionLoad.java   |  10 +
 .../java/org/apache/hadoop/hbase/ServerLoad.java   |   5 +
 .../java/org/apache/hadoop/hbase/HConstants.java   |  12 +
 ...lancerWithStochasticLoadBalancerAsInternal.java |   2 +
 .../hadoop/hbase/io/hfile/BlockCacheFactory.java   |  13 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |  14 +-
 .../hbase/io/hfile/bucket/PersistentIOEngine.java  |   4 +-
 .../master/balancer/BalancerClusterState.java  | 156 ++-
 .../hbase/master/balancer/BalancerRegionLoad.java  |  12 +
 .../hbase/master/balancer/BaseLoadBalancer.java|   3 +-
 .../master/balancer/CacheAwareLoadBalancer.java| 479 +
 .../master/balancer/StochasticLoadBalancer.java|  51 ++-
 .../hbase/master/balancer/BalancerTestBase.java|  14 +
 .../balancer/TestCacheAwareLoadBalancer.java   | 400 +
 .../TestCacheAwareLoadBalancerCostFunctions.java   | 327 ++
 .../balancer/TestStochasticLoadBalancer.java   |   2 +
 16 files changed, 1464 insertions(+), 40 deletions(-)

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
index d61ba86a33e..5bbe14884ae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
@@ -389,6 +389,16 @@ public class RegionLoad implements RegionMetrics {
 return metrics.getCompactionState();
   }
 
+  @Override
+  public Size getRegionSizeMB() {
+return metrics.getRegionSizeMB();
+  }
+
+  @Override
+  public float getCurrentRegionCachedRatio() {
+return metrics.getCurrentRegionCachedRatio();
+  }
+
   /**
* @see java.lang.Object#toString()
*/
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index eec0ac5cdca..e3fa4c3a01c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -430,6 +430,11 @@ public class ServerLoad implements ServerMetrics {
 return metrics.getTasks();
   }
 
+  @Override
+  public Map getRegionCachedInfo() {
+return metrics.getRegionCachedInfo();
+  }
+
   /**
* Originally, this method factored in the effect of requests going to the 
server as well.
* However, this does not interact very well with the current region 
rebalancing code, which only
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index c3b6d3bbb81..4e6fe9874aa 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1428,6 +1428,18 @@ public final class HConstants {
*/
   public static final String BUCKET_CACHE_SIZE_KEY = "hbase.bucketcache.size";
 
+  /**
+   * If the chosen ioengine can persist its state across restarts, the path to 
the file to persist
+   * to. This file is NOT the data file. It is a file into which we will 
serialize the map of what
+   * is in the data file. For example, if you pass the following argument as
+   * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"),
+   * file:/tmp/bucketcache.data , then we will write the 
bucketcache data to the file
+   * /tmp/bucketcache.data but the metadata on where the data is 
in the supplied file
+   * is an in-memory map that needs to be persisted across restarts. Where to 
store this in-memory
+   * state is what you supply here: e.g. /tmp/bucketcache.map.
+   */
+  public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = 
"hbase.bucketcache.persistent.path";
+
   /**
* HConstants for fast fail on the client side follow
*/
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
index ee8922ebe85..6faad5a3631 100644
--- 
a/hbase-rsgroup/src/test/java/org/apach

  1   2   3   4   5   6   7   >