(hbase) branch master updated: HBASE-28479 Change the deprecation cycle for HasMasterServices and HasRegionServerServices (#5872)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 339d7adfcf8 HBASE-28479 Change the deprecation cycle for 
HasMasterServices and HasRegionServerServices (#5872)
339d7adfcf8 is described below

commit 339d7adfcf8297809fc77eec335a4041a659576f
Author: Duo Zhang 
AuthorDate: Mon May 6 15:08:45 2024 +0800

HBASE-28479 Change the deprecation cycle for HasMasterServices and 
HasRegionServerServices (#5872)

Signed-off-by: Yi Mei 
---
 .../java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java  | 5 +++--
 .../org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java | 5 +++--
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java
index dd8babf21dd..6b672178e2a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java
@@ -24,8 +24,9 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Mark a class that it has a MasterServices accessor. Temporary hack until 
core Coprocesssors are
  * integrated.
  * @see CoreCoprocessor
- * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 
we will not need this
- * facility as CoreCoprocessors are integated into core.
+ * @deprecated Since 2.0.0 to be removed in 4.0.0. The hope was that by 3.0.0 
we will not need this
+ * facility as CoreCoprocessors are integated into core but we 
failed, so delay the
+ * removal to 4.0.0.
  */
 @Deprecated
 @InterfaceAudience.Private
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java
index 21301efeafa..faf9c7e42ac 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java
@@ -24,8 +24,9 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Mark a class that it has a RegionServiceServices accessor. Temporary hack 
until core
  * Coprocesssors are integrated.
  * @see CoreCoprocessor
- * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 
we will not need this
- * facility as CoreCoprocessors are integated into core.
+ * @deprecated Since 2.0.0 to be removed in 4.0.0. The hope was that by 3.0.0 
we will not need this
+ * facility as CoreCoprocessors are integated into core but we 
failed, so delay the
+ * removal to 4.0.0.
  */
 @Deprecated
 @InterfaceAudience.Private



(hbase) branch master updated (339d7adfcf8 -> 708882c6512)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


from 339d7adfcf8 HBASE-28479 Change the deprecation cycle for 
HasMasterServices and HasRegionServerServices (#5872)
 add 708882c6512 HBASE-28480 Remove deprecated methods in 
RegionCoprocessorHost for 3.0.0 (#5873)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 19 +
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 13 --
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 20 -
 .../hbase/regionserver/RegionCoprocessorHost.java  | 38 ++---
 .../coprocessor/SampleRegionWALCoprocessor.java| 48 +-
 .../hbase/coprocessor/SimpleRegionObserver.java| 36 
 .../coprocessor/TestRegionObserverInterface.java   | 45 ++--
 .../hadoop/hbase/coprocessor/TestWALObserver.java  |  5 +--
 8 files changed, 30 insertions(+), 194 deletions(-)



(hbase) branch master updated: HBASE-28567 Race condition causes MetaRegionLocationCache to never set watcher to populate meta location (#5874)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new ce113dd3f4e HBASE-28567 Race condition causes MetaRegionLocationCache 
to never set watcher to populate meta location (#5874)
ce113dd3f4e is described below

commit ce113dd3f4e9bd92d799fedbf684e8b0eb9b7a4b
Author: Vincent Poon 
AuthorDate: Mon May 6 00:16:19 2024 -0700

HBASE-28567 Race condition causes MetaRegionLocationCache to never set 
watcher to populate meta location (#5874)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
---
 .../src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java   | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
index 3879cb7ba91..5af7de5678c 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
@@ -460,6 +460,11 @@ public class ZKWatcher implements Watcher, Abortable, 
Closeable {
   public List getMetaReplicaNodesAndWatchChildren() throws 
KeeperException {
 List childrenOfBaseNode =
   ZKUtil.listChildrenAndWatchForNewChildren(this, znodePaths.baseZNode);
+// Need to throw here instead of returning an empty list if the base znode 
hasn't been created
+// Caller should retry in that case, versus thinking the base znode has a 
watcher set
+if (childrenOfBaseNode == null) {
+  keeperException(new 
KeeperException.NoNodeException(znodePaths.baseZNode));
+}
 return filterMetaReplicaNodes(childrenOfBaseNode);
   }
 



(hbase) 02/03: HBASE-28480 Remove deprecated methods in RegionCoprocessorHost for 3.0.0 (#5873)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit a27c317a300b3f2834fb45b58787745d55bc91bf
Author: Duo Zhang 
AuthorDate: Mon May 6 15:11:48 2024 +0800

HBASE-28480 Remove deprecated methods in RegionCoprocessorHost for 3.0.0 
(#5873)

Signed-off-by: Yi Mei 
(cherry picked from commit 708882c6512a7b3f863ec17731bb0eba004039d1)
---
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 19 +
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 13 --
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 20 -
 .../hbase/regionserver/RegionCoprocessorHost.java  | 38 ++---
 .../coprocessor/SampleRegionWALCoprocessor.java| 48 +-
 .../hbase/coprocessor/SimpleRegionObserver.java| 36 
 .../coprocessor/TestRegionObserverInterface.java   | 45 ++--
 .../hadoop/hbase/coprocessor/TestWALObserver.java  |  5 +--
 8 files changed, 30 insertions(+), 194 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 018826644ac..21cabcec1f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -502,8 +502,9 @@ public interface RegionObserver {
* @param byteNow  - timestamp bytes
* @param get  - the get formed using the current cell's row. Note that 
the get does not
* specify the family and qualifier
-   * @deprecated Since hbase-2.0.0. No replacement. To be removed in 
hbase-3.0.0 and replaced with
+   * @deprecated Since hbase-2.0.0. No replacement. To be removed in 
hbase-4.0.0 and replaced with
* something that doesn't expose IntefaceAudience.Private 
classes.
+   * VisibilityController still needs this, need to change the 
logic there first.
*/
   @Deprecated
   default void 
prePrepareTimeStampForDeleteVersion(ObserverContext
 c,
@@ -1403,22 +1404,6 @@ public interface RegionObserver {
 RegionInfo info, Path edits) throws IOException {
   }
 
-  /**
-   * Called before a {@link WALEdit} replayed for this region.
-   * @param ctx the environment provided by the region server
-   */
-  default void preWALRestore(ObserverContext ctx,
-RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
-  }
-
-  /**
-   * Called after a {@link WALEdit} replayed for this region.
-   * @param ctx the environment provided by the region server
-   */
-  default void postWALRestore(ObserverContext ctx,
-RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
-  }
-
   /**
* Called before bulkLoadHFile. Users can create a StoreFile instance to 
access the contents of a
* HFile.
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index ae4045b1216..c55090d3a75 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5656,15 +5656,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   currentReplaySeqId =
 (key.getOrigLogSeqNum() > 0) ? key.getOrigLogSeqNum() : 
currentEditSeqId;
 
-  // Start coprocessor replay here. The coprocessor is for each WALEdit
-  // instead of a KeyValue.
-  if (coprocessorHost != null) {
-status.setStatus("Running pre-WAL-restore hook in coprocessors");
-if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) 
{
-  // if bypass this wal entry, ignore it ...
-  continue;
-}
-  }
   boolean checkRowWithinBoundary = false;
   // Check this edit is for this region.
   if (
@@ -5733,10 +5724,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 internalFlushcache(null, currentEditSeqId, stores.values(), 
status, false,
   FlushLifeCycleTracker.DUMMY);
   }
-
-  if (coprocessorHost != null) {
-coprocessorHost.postWALRestore(this.getRegionInfo(), key, val);
-  }
 }
 
 if (coprocessorHost != null) {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index a2b9a93263d..babaa56170a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServic

(hbase) 01/03: HBASE-28479 Change the deprecation cycle for HasMasterServices and HasRegionServerServices (#5872)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 5d76864bfb7f57667a3de37967772b1b750d0f67
Author: Duo Zhang 
AuthorDate: Mon May 6 15:08:45 2024 +0800

HBASE-28479 Change the deprecation cycle for HasMasterServices and 
HasRegionServerServices (#5872)

Signed-off-by: Yi Mei 
(cherry picked from commit 339d7adfcf8297809fc77eec335a4041a659576f)
---
 .../java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java  | 5 +++--
 .../org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java | 5 +++--
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java
index dd8babf21dd..6b672178e2a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasMasterServices.java
@@ -24,8 +24,9 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Mark a class that it has a MasterServices accessor. Temporary hack until 
core Coprocesssors are
  * integrated.
  * @see CoreCoprocessor
- * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 
we will not need this
- * facility as CoreCoprocessors are integated into core.
+ * @deprecated Since 2.0.0 to be removed in 4.0.0. The hope was that by 3.0.0 
we will not need this
+ * facility as CoreCoprocessors are integated into core but we 
failed, so delay the
+ * removal to 4.0.0.
  */
 @Deprecated
 @InterfaceAudience.Private
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java
index 21301efeafa..faf9c7e42ac 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/HasRegionServerServices.java
@@ -24,8 +24,9 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Mark a class that it has a RegionServiceServices accessor. Temporary hack 
until core
  * Coprocesssors are integrated.
  * @see CoreCoprocessor
- * @deprecated Since 2.0.0 to be removed in 3.0.0. The hope is that by 3.0.0 
we will not need this
- * facility as CoreCoprocessors are integated into core.
+ * @deprecated Since 2.0.0 to be removed in 4.0.0. The hope was that by 3.0.0 
we will not need this
+ * facility as CoreCoprocessors are integated into core but we 
failed, so delay the
+ * removal to 4.0.0.
  */
 @Deprecated
 @InterfaceAudience.Private



(hbase) 03/03: HBASE-28567 Race condition causes MetaRegionLocationCache to never set watcher to populate meta location (#5874)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d20b9cd1541342e41fa423005b122d31650a492e
Author: Vincent Poon 
AuthorDate: Mon May 6 00:16:19 2024 -0700

HBASE-28567 Race condition causes MetaRegionLocationCache to never set 
watcher to populate meta location (#5874)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
(cherry picked from commit ce113dd3f4e9bd92d799fedbf684e8b0eb9b7a4b)
---
 .../src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java   | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
index 3879cb7ba91..5af7de5678c 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
@@ -460,6 +460,11 @@ public class ZKWatcher implements Watcher, Abortable, 
Closeable {
   public List getMetaReplicaNodesAndWatchChildren() throws 
KeeperException {
 List childrenOfBaseNode =
   ZKUtil.listChildrenAndWatchForNewChildren(this, znodePaths.baseZNode);
+// Need to throw here instead of returning an empty list if the base znode 
hasn't been created
+// Caller should retry in that case, versus thinking the base znode has a 
watcher set
+if (childrenOfBaseNode == null) {
+  keeperException(new 
KeeperException.NoNodeException(znodePaths.baseZNode));
+}
 return filterMetaReplicaNodes(childrenOfBaseNode);
   }
 



(hbase) branch branch-3 updated (6ad7d9ea06e -> d20b9cd1541)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a change to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


from 6ad7d9ea06e HBASE-28521 Use standard ConnectionRegistry and Client API 
to get region server list in in replication (#5825)
 new 5d76864bfb7 HBASE-28479 Change the deprecation cycle for 
HasMasterServices and HasRegionServerServices (#5872)
 new a27c317a300 HBASE-28480 Remove deprecated methods in 
RegionCoprocessorHost for 3.0.0 (#5873)
 new d20b9cd1541 HBASE-28567 Race condition causes MetaRegionLocationCache 
to never set watcher to populate meta location (#5874)

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hbase/coprocessor/HasMasterServices.java   |  5 ++-
 .../hbase/coprocessor/HasRegionServerServices.java |  5 ++-
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 19 +
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 13 --
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 20 -
 .../hbase/regionserver/RegionCoprocessorHost.java  | 38 ++---
 .../coprocessor/SampleRegionWALCoprocessor.java| 48 +-
 .../hbase/coprocessor/SimpleRegionObserver.java| 36 
 .../coprocessor/TestRegionObserverInterface.java   | 45 ++--
 .../hadoop/hbase/coprocessor/TestWALObserver.java  |  5 +--
 .../apache/hadoop/hbase/zookeeper/ZKWatcher.java   |  5 +++
 11 files changed, 41 insertions(+), 198 deletions(-)



(hbase) branch branch-2.4 updated: HBASE-28567 Race condition causes MetaRegionLocationCache to never set watcher to populate meta location (#5874)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 82fe5e67e89 HBASE-28567 Race condition causes MetaRegionLocationCache 
to never set watcher to populate meta location (#5874)
82fe5e67e89 is described below

commit 82fe5e67e89c7d1fc451fe7292072bfa76a48dc7
Author: Vincent Poon 
AuthorDate: Mon May 6 00:16:19 2024 -0700

HBASE-28567 Race condition causes MetaRegionLocationCache to never set 
watcher to populate meta location (#5874)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
(cherry picked from commit ce113dd3f4e9bd92d799fedbf684e8b0eb9b7a4b)
---
 .../src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java   | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
index d6c948931bb..251e0967238 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
@@ -458,6 +458,11 @@ public class ZKWatcher implements Watcher, Abortable, 
Closeable {
   public List getMetaReplicaNodesAndWatchChildren() throws 
KeeperException {
 List childrenOfBaseNode =
   ZKUtil.listChildrenAndWatchForNewChildren(this, znodePaths.baseZNode);
+// Need to throw here instead of returning an empty list if the base znode 
hasn't been created
+// Caller should retry in that case, versus thinking the base znode has a 
watcher set
+if (childrenOfBaseNode == null) {
+  keeperException(new 
KeeperException.NoNodeException(znodePaths.baseZNode));
+}
 return filterMetaReplicaNodes(childrenOfBaseNode);
   }
 



(hbase) branch branch-2.5 updated: HBASE-28567 Race condition causes MetaRegionLocationCache to never set watcher to populate meta location (#5874)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
 new 069a12ab444 HBASE-28567 Race condition causes MetaRegionLocationCache 
to never set watcher to populate meta location (#5874)
069a12ab444 is described below

commit 069a12ab444c2486301d275197f7850c0182fe5d
Author: Vincent Poon 
AuthorDate: Mon May 6 00:16:19 2024 -0700

HBASE-28567 Race condition causes MetaRegionLocationCache to never set 
watcher to populate meta location (#5874)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
(cherry picked from commit ce113dd3f4e9bd92d799fedbf684e8b0eb9b7a4b)
---
 .../src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java   | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
index 3879cb7ba91..5af7de5678c 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
@@ -460,6 +460,11 @@ public class ZKWatcher implements Watcher, Abortable, 
Closeable {
   public List getMetaReplicaNodesAndWatchChildren() throws 
KeeperException {
 List childrenOfBaseNode =
   ZKUtil.listChildrenAndWatchForNewChildren(this, znodePaths.baseZNode);
+// Need to throw here instead of returning an empty list if the base znode 
hasn't been created
+// Caller should retry in that case, versus thinking the base znode has a 
watcher set
+if (childrenOfBaseNode == null) {
+  keeperException(new 
KeeperException.NoNodeException(znodePaths.baseZNode));
+}
 return filterMetaReplicaNodes(childrenOfBaseNode);
   }
 



(hbase) branch branch-2.6 updated: HBASE-28567 Race condition causes MetaRegionLocationCache to never set watcher to populate meta location (#5874)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new f2fb0bf21a3 HBASE-28567 Race condition causes MetaRegionLocationCache 
to never set watcher to populate meta location (#5874)
f2fb0bf21a3 is described below

commit f2fb0bf21a374685970d6dfbc015eb55fe9ef9ff
Author: Vincent Poon 
AuthorDate: Mon May 6 00:16:19 2024 -0700

HBASE-28567 Race condition causes MetaRegionLocationCache to never set 
watcher to populate meta location (#5874)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
(cherry picked from commit ce113dd3f4e9bd92d799fedbf684e8b0eb9b7a4b)
---
 .../src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java   | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
index 3879cb7ba91..5af7de5678c 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
@@ -460,6 +460,11 @@ public class ZKWatcher implements Watcher, Abortable, 
Closeable {
   public List getMetaReplicaNodesAndWatchChildren() throws 
KeeperException {
 List childrenOfBaseNode =
   ZKUtil.listChildrenAndWatchForNewChildren(this, znodePaths.baseZNode);
+// Need to throw here instead of returning an empty list if the base znode 
hasn't been created
+// Caller should retry in that case, versus thinking the base znode has a 
watcher set
+if (childrenOfBaseNode == null) {
+  keeperException(new 
KeeperException.NoNodeException(znodePaths.baseZNode));
+}
 return filterMetaReplicaNodes(childrenOfBaseNode);
   }
 



(hbase) branch branch-2 updated: HBASE-28567 Race condition causes MetaRegionLocationCache to never set watcher to populate meta location (#5874)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 7da84c2d687 HBASE-28567 Race condition causes MetaRegionLocationCache 
to never set watcher to populate meta location (#5874)
7da84c2d687 is described below

commit 7da84c2d6873eaf88e4883221b423ff12e6711bb
Author: Vincent Poon 
AuthorDate: Mon May 6 00:16:19 2024 -0700

HBASE-28567 Race condition causes MetaRegionLocationCache to never set 
watcher to populate meta location (#5874)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
(cherry picked from commit ce113dd3f4e9bd92d799fedbf684e8b0eb9b7a4b)
---
 .../src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java   | 5 +
 1 file changed, 5 insertions(+)

diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
index 3879cb7ba91..5af7de5678c 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java
@@ -460,6 +460,11 @@ public class ZKWatcher implements Watcher, Abortable, 
Closeable {
   public List getMetaReplicaNodesAndWatchChildren() throws 
KeeperException {
 List childrenOfBaseNode =
   ZKUtil.listChildrenAndWatchForNewChildren(this, znodePaths.baseZNode);
+// Need to throw here instead of returning an empty list if the base znode 
hasn't been created
+// Caller should retry in that case, versus thinking the base znode has a 
watcher set
+if (childrenOfBaseNode == null) {
+  keeperException(new 
KeeperException.NoNodeException(znodePaths.baseZNode));
+}
 return filterMetaReplicaNodes(childrenOfBaseNode);
   }
 



(hbase) branch master updated (ce113dd3f4e -> 917f2f1ec0c)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


from ce113dd3f4e HBASE-28567 Race condition causes MetaRegionLocationCache 
to never set watcher to populate meta location (#5874)
 add 917f2f1ec0c HBASE-28459 HFileOutputFormat2 ClassCastException with s3 
magic committer (#5851)

No new revisions were added by this update.

Summary of changes:
 .../java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)



(hbase) branch branch-2 updated: HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer (#5858)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new b7def4ff9b9 HBASE-28459 HFileOutputFormat2 ClassCastException with s3 
magic committer (#5858)
b7def4ff9b9 is described below

commit b7def4ff9b98164152ea4ccff6206bfe3d17bac6
Author: Sravishtta Kommineni <49591501+ksravi...@users.noreply.github.com>
AuthorDate: Mon May 6 04:17:09 2024 -0400

HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer 
(#5858)

Co-authored-by: Sravi Kommineni 
Signed-off-by: Duo Zhang 
---
 .../hadoop/hbase/mapreduce/HFileOutputFormat2.java |  8 +++-
 .../hbase/mapreduce/TestHFileOutputFormat2.java| 56 ++
 2 files changed, 62 insertions(+), 2 deletions(-)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 98951667cbe..43dd4a7160e 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
@@ -87,7 +88,6 @@ import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -215,11 +215,15 @@ public class HFileOutputFormat2 extends 
FileOutputFormat RecordWriter 
createRecordWriter(
 final TaskAttemptContext context, final OutputCommitter committer) throws 
IOException {
 
 // Get the path of the temporary output file
-final Path outputDir = ((FileOutputCommitter) committer).getWorkPath();
+final Path outputDir = getWorkPath(committer);
 final Configuration conf = context.getConfiguration();
 final boolean writeMultipleTables =
   conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false);
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 128d690a9a1..3c486a8a52f 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -112,9 +112,12 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.junit.ClassRule;
 import org.junit.Ignore;
@@ -1599,6 +1602,59 @@ public class TestHFileOutputFormat2 {
 }
   }
 
+  @Test
+  public void itGetsWorkPathHadoop2() throws Exception {
+Configuration conf = new Configuration(this.util.getConfiguration());
+Job job = new Job(conf);
+FileOutputCommitter committer =
+  new FileOutputCommitter(new Path("/test"), 
createTestTaskAttemptContext(job));
+assertEquals(committer.getWorkPath(), 
HFileOutputFormat2.getWorkPath(committer));
+  }
+
+  @Test
+  public void itGetsWorkPathHadoo3() {
+Hadoop3TestOutputCommitter committer = new Hadoop3TestOutputCommitter(new 
Path("/test"));
+assertEquals(committer.getWorkPath(), 
HFileOutputFormat2.getWorkPath(committer));
+  }
+
+  static class Hadoop3TestOutputCommitter extends OutputCommitter {
+
+Path path;
+
+Hadoop3TestOutputCommitter(Path path) {
+  this.path = path;
+}
+
+public Path getWorkPath() {
+  return path;
+}
+
+@Override
+public void setupJob(JobContext jobContext) throws IOException {
+
+}
+
+@Override
+public void setupTask(TaskAttemptContext taskAtt

(hbase) branch branch-3 updated: HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer (#5851)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 7052e8caada HBASE-28459 HFileOutputFormat2 ClassCastException with s3 
magic committer (#5851)
7052e8caada is described below

commit 7052e8caada4ffbef40545c5d72b07ac6c395ccc
Author: Sravishtta Kommineni <49591501+ksravi...@users.noreply.github.com>
AuthorDate: Mon May 6 04:15:32 2024 -0400

HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer 
(#5851)

Co-authored-by: Sravi Kommineni 
Signed-off-by: Duo Zhang 
Reviewed-by: Ray Mattingly 
(cherry picked from commit 917f2f1ec0c0fa113cbcdc8ca1332c0e7f9b481b)
---
 .../java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index fcbcd2d9f59..2e288f24680 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -86,8 +86,8 @@ import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -194,7 +194,7 @@ public class HFileOutputFormat2 extends 
FileOutputFormat

(hbase) branch branch-2.4 updated: HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer (#5858)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 8b9cafce0b2 HBASE-28459 HFileOutputFormat2 ClassCastException with s3 
magic committer (#5858)
8b9cafce0b2 is described below

commit 8b9cafce0b2fe7e09b4ee45fe869b6b2be77ee54
Author: Sravishtta Kommineni <49591501+ksravi...@users.noreply.github.com>
AuthorDate: Mon May 6 04:17:09 2024 -0400

HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer 
(#5858)

Co-authored-by: Sravi Kommineni 
Signed-off-by: Duo Zhang 
(cherry picked from commit b7def4ff9b98164152ea4ccff6206bfe3d17bac6)
---
 .../hadoop/hbase/mapreduce/HFileOutputFormat2.java |  8 +++-
 .../hbase/mapreduce/TestHFileOutputFormat2.java| 56 ++
 2 files changed, 62 insertions(+), 2 deletions(-)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index e098b1c3277..bd08e5bab46 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
@@ -85,7 +86,6 @@ import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -193,11 +193,15 @@ public class HFileOutputFormat2 extends 
FileOutputFormat RecordWriter 
createRecordWriter(
 final TaskAttemptContext context, final OutputCommitter committer) throws 
IOException {
 
 // Get the path of the temporary output file
-final Path outputDir = ((FileOutputCommitter) committer).getWorkPath();
+final Path outputDir = getWorkPath(committer);
 final Configuration conf = context.getConfiguration();
 final boolean writeMultipleTables =
   conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false);
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 534b552a581..cadea13c19c 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -111,9 +111,12 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.junit.ClassRule;
 import org.junit.Ignore;
@@ -1583,6 +1586,59 @@ public class TestHFileOutputFormat2 {
 }
   }
 
+  @Test
+  public void itGetsWorkPathHadoop2() throws Exception {
+Configuration conf = new Configuration(this.util.getConfiguration());
+Job job = new Job(conf);
+FileOutputCommitter committer =
+  new FileOutputCommitter(new Path("/test"), 
createTestTaskAttemptContext(job));
+assertEquals(committer.getWorkPath(), 
HFileOutputFormat2.getWorkPath(committer));
+  }
+
+  @Test
+  public void itGetsWorkPathHadoo3() {
+Hadoop3TestOutputCommitter committer = new Hadoop3TestOutputCommitter(new 
Path("/test"));
+assertEquals(committer.getWorkPath(), 
HFileOutputFormat2.getWorkPath(committer));
+  }
+
+  static class Hadoop3TestOutputCommitter extends OutputCommitter {
+
+Path path;
+
+Hadoop3TestOutputCommitter(Path path) {
+  this.path = path;
+}
+
+public Path getWorkPath() {
+  return path;
+}
+
+@Override
+public void setupJob(JobContext jobContext) throws IOException {
+

(hbase) branch branch-2.5 updated: HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer (#5858)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
 new a99a06f0553 HBASE-28459 HFileOutputFormat2 ClassCastException with s3 
magic committer (#5858)
a99a06f0553 is described below

commit a99a06f0553f32fd663baac0c7b970e9abced0d9
Author: Sravishtta Kommineni <49591501+ksravi...@users.noreply.github.com>
AuthorDate: Mon May 6 04:17:09 2024 -0400

HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer 
(#5858)

Co-authored-by: Sravi Kommineni 
Signed-off-by: Duo Zhang 
(cherry picked from commit b7def4ff9b98164152ea4ccff6206bfe3d17bac6)
---
 .../hadoop/hbase/mapreduce/HFileOutputFormat2.java |  8 +++-
 .../hbase/mapreduce/TestHFileOutputFormat2.java| 56 ++
 2 files changed, 62 insertions(+), 2 deletions(-)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index a35c44520a4..5627e02dc81 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
@@ -86,7 +87,6 @@ import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -203,11 +203,15 @@ public class HFileOutputFormat2 extends 
FileOutputFormat RecordWriter 
createRecordWriter(
 final TaskAttemptContext context, final OutputCommitter committer) throws 
IOException {
 
 // Get the path of the temporary output file
-final Path outputDir = ((FileOutputCommitter) committer).getWorkPath();
+final Path outputDir = getWorkPath(committer);
 final Configuration conf = context.getConfiguration();
 final boolean writeMultipleTables =
   conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false);
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 99b27f05630..8156302fc48 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -111,9 +111,12 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.junit.ClassRule;
 import org.junit.Ignore;
@@ -1583,6 +1586,59 @@ public class TestHFileOutputFormat2 {
 }
   }
 
+  @Test
+  public void itGetsWorkPathHadoop2() throws Exception {
+Configuration conf = new Configuration(this.util.getConfiguration());
+Job job = new Job(conf);
+FileOutputCommitter committer =
+  new FileOutputCommitter(new Path("/test"), 
createTestTaskAttemptContext(job));
+assertEquals(committer.getWorkPath(), 
HFileOutputFormat2.getWorkPath(committer));
+  }
+
+  @Test
+  public void itGetsWorkPathHadoo3() {
+Hadoop3TestOutputCommitter committer = new Hadoop3TestOutputCommitter(new 
Path("/test"));
+assertEquals(committer.getWorkPath(), 
HFileOutputFormat2.getWorkPath(committer));
+  }
+
+  static class Hadoop3TestOutputCommitter extends OutputCommitter {
+
+Path path;
+
+Hadoop3TestOutputCommitter(Path path) {
+  this.path = path;
+}
+
+public Path getWorkPath() {
+  return path;
+}
+
+@Override
+public void setupJob(JobContext jobContext) throws IOException {
+

(hbase) branch branch-2.6 updated: HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer (#5858)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 0b4d1fd013f HBASE-28459 HFileOutputFormat2 ClassCastException with s3 
magic committer (#5858)
0b4d1fd013f is described below

commit 0b4d1fd013fa89491108559f1f51f285a12f2c44
Author: Sravishtta Kommineni <49591501+ksravi...@users.noreply.github.com>
AuthorDate: Mon May 6 04:17:09 2024 -0400

HBASE-28459 HFileOutputFormat2 ClassCastException with s3 magic committer 
(#5858)

Co-authored-by: Sravi Kommineni 
Signed-off-by: Duo Zhang 
(cherry picked from commit b7def4ff9b98164152ea4ccff6206bfe3d17bac6)
---
 .../hadoop/hbase/mapreduce/HFileOutputFormat2.java |  8 +++-
 .../hbase/mapreduce/TestHFileOutputFormat2.java| 56 ++
 2 files changed, 62 insertions(+), 2 deletions(-)

diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 98951667cbe..43dd4a7160e 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
@@ -87,7 +88,6 @@ import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.OutputFormat;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -215,11 +215,15 @@ public class HFileOutputFormat2 extends 
FileOutputFormat RecordWriter 
createRecordWriter(
 final TaskAttemptContext context, final OutputCommitter committer) throws 
IOException {
 
 // Get the path of the temporary output file
-final Path outputDir = ((FileOutputCommitter) committer).getWorkPath();
+final Path outputDir = getWorkPath(committer);
 final Configuration conf = context.getConfiguration();
 final boolean writeMultipleTables =
   conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false);
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 50e43571538..67bb489ce29 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -111,9 +111,12 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.junit.ClassRule;
 import org.junit.Ignore;
@@ -1598,6 +1601,59 @@ public class TestHFileOutputFormat2 {
 }
   }
 
+  @Test
+  public void itGetsWorkPathHadoop2() throws Exception {
+Configuration conf = new Configuration(this.util.getConfiguration());
+Job job = new Job(conf);
+FileOutputCommitter committer =
+  new FileOutputCommitter(new Path("/test"), 
createTestTaskAttemptContext(job));
+assertEquals(committer.getWorkPath(), 
HFileOutputFormat2.getWorkPath(committer));
+  }
+
+  @Test
+  public void itGetsWorkPathHadoo3() {
+Hadoop3TestOutputCommitter committer = new Hadoop3TestOutputCommitter(new 
Path("/test"));
+assertEquals(committer.getWorkPath(), 
HFileOutputFormat2.getWorkPath(committer));
+  }
+
+  static class Hadoop3TestOutputCommitter extends OutputCommitter {
+
+Path path;
+
+Hadoop3TestOutputCommitter(Path path) {
+  this.path = path;
+}
+
+public Path getWorkPath() {
+  return path;
+}
+
+@Override
+public void setupJob(JobContext jobContext) throws IOException {
+

(hbase) branch master updated: HBASE-28566 Remove ZKDataMigrator (#5875)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new bcd6205f9d0 HBASE-28566 Remove ZKDataMigrator (#5875)
bcd6205f9d0 is described below

commit bcd6205f9d00627b8015fcb014a5888419b85a13
Author: Duo Zhang 
AuthorDate: Mon May 6 21:58:35 2024 +0800

HBASE-28566 Remove ZKDataMigrator (#5875)

Signed-off-by: Yi Mei 
---
 .../apache/hadoop/hbase/util/ZKDataMigrator.java   | 120 -
 1 file changed, 120 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
deleted file mode 100644
index 5f4ddb3821e..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
-
-/**
- * Utlity method to migrate zookeeper data across HBase versions.
- * @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
- */
-@Deprecated
-@InterfaceAudience.Private
-public class ZKDataMigrator {
-  private static final Logger LOG = 
LoggerFactory.getLogger(ZKDataMigrator.class);
-
-  // Shutdown constructor.
-  private ZKDataMigrator() {
-  }
-
-  /**
-   * Method for table states migration. Used when upgrading from pre-2.0 to 
2.0 Reading state from
-   * zk, applying them to internal state and delete. Used by master to clean 
migration from zk based
-   * states to table descriptor based states.
-   * @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
-   */
-  @Deprecated
-  public static Map queryForTableStates(ZKWatcher 
zkw)
-throws KeeperException, InterruptedException {
-Map rv = new HashMap<>();
-List children = ZKUtil.listChildrenNoWatch(zkw, 
zkw.getZNodePaths().tableZNode);
-if (children == null) return rv;
-for (String child : children) {
-  TableName tableName = TableName.valueOf(child);
-  ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, 
tableName);
-  TableState.State newState = TableState.State.ENABLED;
-  if (state != null) {
-switch (state) {
-  case ENABLED:
-newState = TableState.State.ENABLED;
-break;
-  case DISABLED:
-newState = TableState.State.DISABLED;
-break;
-  case DISABLING:
-newState = TableState.State.DISABLING;
-break;
-  case ENABLING:
-newState = TableState.State.ENABLING;
-break;
-  default:
-}
-  }
-  rv.put(tableName, newState);
-}
-return rv;
-  }
-
-  /**
-   * Gets table state from ZK.
-   * @param zkw   ZKWatcher instance to use
-   * @param tableName table we're checking
-   * @return Null or
-   * {@link 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State}
-   * found in znode.
-   * @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
-   */
-  @Deprecated
-  private static ZooKeeperProtos.DeprecatedTableState.State 
getTableState(final ZKWatcher zkw,
-final TableName tableName) throws KeeperException, InterruptedException {
-String znode =
-  ZNodePaths.joinZNode(zkw.getZNodePaths().tableZNode, 
tableName.getNameAsString());
-

(hbase) branch branch-3 updated: HBASE-28566 Remove ZKDataMigrator (#5875)

2024-05-06 Thread zhangduo
This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new 84fcc8f268b HBASE-28566 Remove ZKDataMigrator (#5875)
84fcc8f268b is described below

commit 84fcc8f268bde1b081c76eb34b4c92c455212eb5
Author: Duo Zhang 
AuthorDate: Mon May 6 21:58:35 2024 +0800

HBASE-28566 Remove ZKDataMigrator (#5875)

Signed-off-by: Yi Mei 
(cherry picked from commit bcd6205f9d00627b8015fcb014a5888419b85a13)
---
 .../apache/hadoop/hbase/util/ZKDataMigrator.java   | 120 -
 1 file changed, 120 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
deleted file mode 100644
index 5f4ddb3821e..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
-
-/**
- * Utlity method to migrate zookeeper data across HBase versions.
- * @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
- */
-@Deprecated
-@InterfaceAudience.Private
-public class ZKDataMigrator {
-  private static final Logger LOG = 
LoggerFactory.getLogger(ZKDataMigrator.class);
-
-  // Shutdown constructor.
-  private ZKDataMigrator() {
-  }
-
-  /**
-   * Method for table states migration. Used when upgrading from pre-2.0 to 
2.0 Reading state from
-   * zk, applying them to internal state and delete. Used by master to clean 
migration from zk based
-   * states to table descriptor based states.
-   * @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
-   */
-  @Deprecated
-  public static Map queryForTableStates(ZKWatcher 
zkw)
-throws KeeperException, InterruptedException {
-Map rv = new HashMap<>();
-List children = ZKUtil.listChildrenNoWatch(zkw, 
zkw.getZNodePaths().tableZNode);
-if (children == null) return rv;
-for (String child : children) {
-  TableName tableName = TableName.valueOf(child);
-  ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, 
tableName);
-  TableState.State newState = TableState.State.ENABLED;
-  if (state != null) {
-switch (state) {
-  case ENABLED:
-newState = TableState.State.ENABLED;
-break;
-  case DISABLED:
-newState = TableState.State.DISABLED;
-break;
-  case DISABLING:
-newState = TableState.State.DISABLING;
-break;
-  case ENABLING:
-newState = TableState.State.ENABLING;
-break;
-  default:
-}
-  }
-  rv.put(tableName, newState);
-}
-return rv;
-  }
-
-  /**
-   * Gets table state from ZK.
-   * @param zkw   ZKWatcher instance to use
-   * @param tableName table we're checking
-   * @return Null or
-   * {@link 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State}
-   * found in znode.
-   * @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
-   */
-  @Deprecated
-  private static ZooKeeperProtos.DeprecatedTableState.State 
getTableState(final ZKWatcher zkw,
-final TableName tableName) throws KeeperException, InterruptedException {
-String znode =
-  ZNodePat

(hbase-site) branch asf-site updated: INFRA-10751 Empty commit

2024-05-06 Thread git-site-role
This is an automated email from the ASF dual-hosted git repository.

git-site-role pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/hbase-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new f7f78b26d41 INFRA-10751 Empty commit
f7f78b26d41 is described below

commit f7f78b26d41b392fbf52137a768acbe4b12a613c
Author: jenkins 
AuthorDate: Mon May 6 14:44:27 2024 +

INFRA-10751 Empty commit



(hbase) branch dependabot/pip/dev-support/flaky-tests/jinja2-3.1.4 created (now a105412eea2)

2024-05-06 Thread github-bot
This is an automated email from the ASF dual-hosted git repository.

github-bot pushed a change to branch 
dependabot/pip/dev-support/flaky-tests/jinja2-3.1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


  at a105412eea2 Bump jinja2 from 3.1.3 to 3.1.4 in /dev-support/flaky-tests

No new revisions were added by this update.



(hbase) branch master updated: HBASE-28556 Reduce memory copying in Rest server when serializing CellModel to Protobuf (#5870)

2024-05-06 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 156e430dc56 HBASE-28556 Reduce memory copying in Rest server when 
serializing CellModel to Protobuf (#5870)
156e430dc56 is described below

commit 156e430dc56211c0aea15d792e8733b1b0e3de5c
Author: Istvan Toth 
AuthorDate: Tue May 7 07:36:21 2024 +0200

HBASE-28556 Reduce memory copying in Rest server when serializing CellModel 
to Protobuf (#5870)

Signed-off-by: Duo Zhang 
---
 .../apache/hadoop/hbase/rest/MultiRowResource.java | 11 +--
 .../hadoop/hbase/rest/ProtobufStreamingOutput.java | 17 +
 .../org/apache/hadoop/hbase/rest/RestUtil.java | 48 
 .../org/apache/hadoop/hbase/rest/RowResource.java  |  9 +--
 .../hadoop/hbase/rest/ScannerInstanceResource.java | 23 +++---
 .../hadoop/hbase/rest/TableScanResource.java   | 12 +--
 .../apache/hadoop/hbase/rest/model/CellModel.java  | 86 ++
 .../hadoop/hbase/rest/model/CellSetModel.java  | 18 -
 .../apache/hadoop/hbase/rest/model/RowModel.java   | 64 ++--
 9 files changed, 215 insertions(+), 73 deletions(-)

diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index 99fc0c845e6..8cce772472a 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -22,14 +22,10 @@ import java.util.ArrayList;
 import java.util.Base64;
 import java.util.Base64.Decoder;
 import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -125,12 +121,7 @@ public class MultiRowResource extends ResourceBase 
implements Constants {
 if (r.isEmpty()) {
   continue;
 }
-RowModel rowModel = new RowModel(r.getRow());
-for (Cell c : r.listCells()) {
-  rowModel.addCell(new CellModel(CellUtil.cloneFamily(c), 
CellUtil.cloneQualifier(c),
-c.getTimestamp(), CellUtil.cloneValue(c)));
-}
-model.addRow(rowModel);
+model.addRow(RestUtil.createRowModelFromResult(r));
   }
   if (model.getRows().isEmpty()) {
 // If no rows found.
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
index eadd6a9334b..60c3d363ec3 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
@@ -19,14 +19,9 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -91,15 +86,11 @@ public class ProtobufStreamingOutput implements 
StreamingOutput {
 
   private CellSetModel createModelFromResults(Result[] results) {
 CellSetModel cellSetModel = new CellSetModel();
-for (Result rs : results) {
-  byte[] rowKey = rs.getRow();
-  RowModel rModel = new RowModel(rowKey);
-  List kvs = rs.listCells();
-  for (Cell kv : kvs) {
-rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), 
CellUtil.cloneQualifier(kv),
-  kv.getTimestamp(), CellUtil.cloneValue(kv)));
+for (int i = 0; i < results.length; i++) {
+  if (results[i].isEmpty()) {
+continue;
   }
-  cellSetModel.addRow(rModel);
+  cellSetModel.addRow(RestUtil.createRowModelFromResult(results[i]));
 }
 return cellSetModel;
   }
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java
new file mode 100644
index 000..5f884c510d6
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.j

(hbase) branch branch-3 updated: HBASE-28556 Reduce memory copying in Rest server when serializing CellModel to Protobuf (#5870)

2024-05-06 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
 new d28a0961f99 HBASE-28556 Reduce memory copying in Rest server when 
serializing CellModel to Protobuf (#5870)
d28a0961f99 is described below

commit d28a0961f9990045390f33499b1011eeba586544
Author: Istvan Toth 
AuthorDate: Tue May 7 07:36:21 2024 +0200

HBASE-28556 Reduce memory copying in Rest server when serializing CellModel 
to Protobuf (#5870)

Signed-off-by: Duo Zhang 
---
 .../apache/hadoop/hbase/rest/MultiRowResource.java | 11 +--
 .../hadoop/hbase/rest/ProtobufStreamingOutput.java | 17 +
 .../org/apache/hadoop/hbase/rest/RestUtil.java | 48 
 .../org/apache/hadoop/hbase/rest/RowResource.java  |  9 +--
 .../hadoop/hbase/rest/ScannerInstanceResource.java | 23 +++---
 .../hadoop/hbase/rest/TableScanResource.java   | 12 +--
 .../apache/hadoop/hbase/rest/model/CellModel.java  | 86 ++
 .../hadoop/hbase/rest/model/CellSetModel.java  | 18 -
 .../apache/hadoop/hbase/rest/model/RowModel.java   | 64 ++--
 9 files changed, 215 insertions(+), 73 deletions(-)

diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index 99fc0c845e6..8cce772472a 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -22,14 +22,10 @@ import java.util.ArrayList;
 import java.util.Base64;
 import java.util.Base64.Decoder;
 import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -125,12 +121,7 @@ public class MultiRowResource extends ResourceBase 
implements Constants {
 if (r.isEmpty()) {
   continue;
 }
-RowModel rowModel = new RowModel(r.getRow());
-for (Cell c : r.listCells()) {
-  rowModel.addCell(new CellModel(CellUtil.cloneFamily(c), 
CellUtil.cloneQualifier(c),
-c.getTimestamp(), CellUtil.cloneValue(c)));
-}
-model.addRow(rowModel);
+model.addRow(RestUtil.createRowModelFromResult(r));
   }
   if (model.getRows().isEmpty()) {
 // If no rows found.
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
index eadd6a9334b..60c3d363ec3 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
@@ -19,14 +19,9 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -91,15 +86,11 @@ public class ProtobufStreamingOutput implements 
StreamingOutput {
 
   private CellSetModel createModelFromResults(Result[] results) {
 CellSetModel cellSetModel = new CellSetModel();
-for (Result rs : results) {
-  byte[] rowKey = rs.getRow();
-  RowModel rModel = new RowModel(rowKey);
-  List kvs = rs.listCells();
-  for (Cell kv : kvs) {
-rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), 
CellUtil.cloneQualifier(kv),
-  kv.getTimestamp(), CellUtil.cloneValue(kv)));
+for (int i = 0; i < results.length; i++) {
+  if (results[i].isEmpty()) {
+continue;
   }
-  cellSetModel.addRow(rModel);
+  cellSetModel.addRow(RestUtil.createRowModelFromResult(results[i]));
 }
 return cellSetModel;
   }
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java
new file mode 100644
index 000..5f884c510d6
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUt

(hbase) branch branch-2 updated: HBASE-28556 Reduce memory copying in Rest server when serializing CellModel to Protobuf (#5870)

2024-05-06 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new fa85559da9a HBASE-28556 Reduce memory copying in Rest server when 
serializing CellModel to Protobuf (#5870)
fa85559da9a is described below

commit fa85559da9a661c8752b20ab91691de68e6d4c66
Author: Istvan Toth 
AuthorDate: Tue May 7 07:36:21 2024 +0200

HBASE-28556 Reduce memory copying in Rest server when serializing CellModel 
to Protobuf (#5870)

Signed-off-by: Duo Zhang 
---
 .../apache/hadoop/hbase/rest/MultiRowResource.java | 11 +--
 .../hadoop/hbase/rest/ProtobufStreamingOutput.java | 17 +
 .../org/apache/hadoop/hbase/rest/RestUtil.java | 48 
 .../org/apache/hadoop/hbase/rest/RowResource.java  |  9 +--
 .../hadoop/hbase/rest/ScannerInstanceResource.java | 23 +++---
 .../hadoop/hbase/rest/TableScanResource.java   | 12 +--
 .../apache/hadoop/hbase/rest/model/CellModel.java  | 86 ++
 .../hadoop/hbase/rest/model/CellSetModel.java  | 18 -
 .../apache/hadoop/hbase/rest/model/RowModel.java   | 64 ++--
 9 files changed, 215 insertions(+), 73 deletions(-)

diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index 99fc0c845e6..8cce772472a 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -22,14 +22,10 @@ import java.util.ArrayList;
 import java.util.Base64;
 import java.util.Base64.Decoder;
 import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -125,12 +121,7 @@ public class MultiRowResource extends ResourceBase 
implements Constants {
 if (r.isEmpty()) {
   continue;
 }
-RowModel rowModel = new RowModel(r.getRow());
-for (Cell c : r.listCells()) {
-  rowModel.addCell(new CellModel(CellUtil.cloneFamily(c), 
CellUtil.cloneQualifier(c),
-c.getTimestamp(), CellUtil.cloneValue(c)));
-}
-model.addRow(rowModel);
+model.addRow(RestUtil.createRowModelFromResult(r));
   }
   if (model.getRows().isEmpty()) {
 // If no rows found.
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
index eadd6a9334b..60c3d363ec3 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
@@ -19,14 +19,9 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -91,15 +86,11 @@ public class ProtobufStreamingOutput implements 
StreamingOutput {
 
   private CellSetModel createModelFromResults(Result[] results) {
 CellSetModel cellSetModel = new CellSetModel();
-for (Result rs : results) {
-  byte[] rowKey = rs.getRow();
-  RowModel rModel = new RowModel(rowKey);
-  List kvs = rs.listCells();
-  for (Cell kv : kvs) {
-rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), 
CellUtil.cloneQualifier(kv),
-  kv.getTimestamp(), CellUtil.cloneValue(kv)));
+for (int i = 0; i < results.length; i++) {
+  if (results[i].isEmpty()) {
+continue;
   }
-  cellSetModel.addRow(rModel);
+  cellSetModel.addRow(RestUtil.createRowModelFromResult(results[i]));
 }
 return cellSetModel;
   }
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java
new file mode 100644
index 000..5f884c510d6
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUt

(hbase) branch branch-2.6 updated: HBASE-28556 Reduce memory copying in Rest server when serializing CellModel to Protobuf (#5870)

2024-05-06 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.6 by this push:
 new 4f931d26488 HBASE-28556 Reduce memory copying in Rest server when 
serializing CellModel to Protobuf (#5870)
4f931d26488 is described below

commit 4f931d26488c04e4a8807c5ae82ae1fa4e3e04cd
Author: Istvan Toth 
AuthorDate: Tue May 7 07:36:21 2024 +0200

HBASE-28556 Reduce memory copying in Rest server when serializing CellModel 
to Protobuf (#5870)

Signed-off-by: Duo Zhang 
---
 .../apache/hadoop/hbase/rest/MultiRowResource.java | 11 +--
 .../hadoop/hbase/rest/ProtobufStreamingOutput.java | 17 +
 .../org/apache/hadoop/hbase/rest/RestUtil.java | 48 
 .../org/apache/hadoop/hbase/rest/RowResource.java  |  9 +--
 .../hadoop/hbase/rest/ScannerInstanceResource.java | 23 +++---
 .../hadoop/hbase/rest/TableScanResource.java   | 12 +--
 .../apache/hadoop/hbase/rest/model/CellModel.java  | 86 ++
 .../hadoop/hbase/rest/model/CellSetModel.java  | 18 -
 .../apache/hadoop/hbase/rest/model/RowModel.java   | 64 ++--
 9 files changed, 215 insertions(+), 73 deletions(-)

diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index 99fc0c845e6..8cce772472a 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -22,14 +22,10 @@ import java.util.ArrayList;
 import java.util.Base64;
 import java.util.Base64.Decoder;
 import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -125,12 +121,7 @@ public class MultiRowResource extends ResourceBase 
implements Constants {
 if (r.isEmpty()) {
   continue;
 }
-RowModel rowModel = new RowModel(r.getRow());
-for (Cell c : r.listCells()) {
-  rowModel.addCell(new CellModel(CellUtil.cloneFamily(c), 
CellUtil.cloneQualifier(c),
-c.getTimestamp(), CellUtil.cloneValue(c)));
-}
-model.addRow(rowModel);
+model.addRow(RestUtil.createRowModelFromResult(r));
   }
   if (model.getRows().isEmpty()) {
 // If no rows found.
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
index eadd6a9334b..60c3d363ec3 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
@@ -19,14 +19,9 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -91,15 +86,11 @@ public class ProtobufStreamingOutput implements 
StreamingOutput {
 
   private CellSetModel createModelFromResults(Result[] results) {
 CellSetModel cellSetModel = new CellSetModel();
-for (Result rs : results) {
-  byte[] rowKey = rs.getRow();
-  RowModel rModel = new RowModel(rowKey);
-  List kvs = rs.listCells();
-  for (Cell kv : kvs) {
-rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), 
CellUtil.cloneQualifier(kv),
-  kv.getTimestamp(), CellUtil.cloneValue(kv)));
+for (int i = 0; i < results.length; i++) {
+  if (results[i].isEmpty()) {
+continue;
   }
-  cellSetModel.addRow(rModel);
+  cellSetModel.addRow(RestUtil.createRowModelFromResult(results[i]));
 }
 return cellSetModel;
   }
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java
new file mode 100644
index 000..5f884c510d6
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Re

(hbase) branch branch-2.5 updated: HBASE-28556 Reduce memory copying in Rest server when serializing CellModel to Protobuf (#5870)

2024-05-06 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
 new 7cd0436ac63 HBASE-28556 Reduce memory copying in Rest server when 
serializing CellModel to Protobuf (#5870)
7cd0436ac63 is described below

commit 7cd0436ac63062324df8ea91e5245154a13d1454
Author: Istvan Toth 
AuthorDate: Tue May 7 07:36:21 2024 +0200

HBASE-28556 Reduce memory copying in Rest server when serializing CellModel 
to Protobuf (#5870)

Signed-off-by: Duo Zhang 
---
 .../apache/hadoop/hbase/rest/MultiRowResource.java | 11 +--
 .../hadoop/hbase/rest/ProtobufStreamingOutput.java | 17 +
 .../org/apache/hadoop/hbase/rest/RestUtil.java | 48 
 .../org/apache/hadoop/hbase/rest/RowResource.java  |  9 +--
 .../hadoop/hbase/rest/ScannerInstanceResource.java | 23 +++---
 .../hadoop/hbase/rest/TableScanResource.java   | 12 +--
 .../apache/hadoop/hbase/rest/model/CellModel.java  | 86 ++
 .../hadoop/hbase/rest/model/CellSetModel.java  | 18 -
 .../apache/hadoop/hbase/rest/model/RowModel.java   | 64 ++--
 9 files changed, 215 insertions(+), 73 deletions(-)

diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index 99fc0c845e6..8cce772472a 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -22,14 +22,10 @@ import java.util.ArrayList;
 import java.util.Base64;
 import java.util.Base64.Decoder;
 import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -125,12 +121,7 @@ public class MultiRowResource extends ResourceBase 
implements Constants {
 if (r.isEmpty()) {
   continue;
 }
-RowModel rowModel = new RowModel(r.getRow());
-for (Cell c : r.listCells()) {
-  rowModel.addCell(new CellModel(CellUtil.cloneFamily(c), 
CellUtil.cloneQualifier(c),
-c.getTimestamp(), CellUtil.cloneValue(c)));
-}
-model.addRow(rowModel);
+model.addRow(RestUtil.createRowModelFromResult(r));
   }
   if (model.getRows().isEmpty()) {
 // If no rows found.
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
index eadd6a9334b..60c3d363ec3 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
@@ -19,14 +19,9 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -91,15 +86,11 @@ public class ProtobufStreamingOutput implements 
StreamingOutput {
 
   private CellSetModel createModelFromResults(Result[] results) {
 CellSetModel cellSetModel = new CellSetModel();
-for (Result rs : results) {
-  byte[] rowKey = rs.getRow();
-  RowModel rModel = new RowModel(rowKey);
-  List kvs = rs.listCells();
-  for (Cell kv : kvs) {
-rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), 
CellUtil.cloneQualifier(kv),
-  kv.getTimestamp(), CellUtil.cloneValue(kv)));
+for (int i = 0; i < results.length; i++) {
+  if (results[i].isEmpty()) {
+continue;
   }
-  cellSetModel.addRow(rModel);
+  cellSetModel.addRow(RestUtil.createRowModelFromResult(results[i]));
 }
 return cellSetModel;
   }
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java
new file mode 100644
index 000..5f884c510d6
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Re

(hbase) branch branch-2.4 updated: HBASE-28556 Reduce memory copying in Rest server when serializing CellModel to Protobuf (#5870)

2024-05-06 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new fcfc11250f9 HBASE-28556 Reduce memory copying in Rest server when 
serializing CellModel to Protobuf (#5870)
fcfc11250f9 is described below

commit fcfc11250f99e27aef503ae554dd888f0d91b6a1
Author: Istvan Toth 
AuthorDate: Tue May 7 07:36:21 2024 +0200

HBASE-28556 Reduce memory copying in Rest server when serializing CellModel 
to Protobuf (#5870)

Signed-off-by: Duo Zhang 
---
 .../apache/hadoop/hbase/rest/MultiRowResource.java | 11 +--
 .../hadoop/hbase/rest/ProtobufStreamingOutput.java | 17 +
 .../org/apache/hadoop/hbase/rest/RestUtil.java | 48 
 .../org/apache/hadoop/hbase/rest/RowResource.java  |  9 +--
 .../hadoop/hbase/rest/ScannerInstanceResource.java | 23 +++---
 .../hadoop/hbase/rest/TableScanResource.java   | 12 +--
 .../apache/hadoop/hbase/rest/model/CellModel.java  | 86 ++
 .../hadoop/hbase/rest/model/CellSetModel.java  | 18 -
 .../apache/hadoop/hbase/rest/model/RowModel.java   | 64 ++--
 9 files changed, 215 insertions(+), 73 deletions(-)

diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index 99fc0c845e6..8cce772472a 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -22,14 +22,10 @@ import java.util.ArrayList;
 import java.util.Base64;
 import java.util.Base64.Decoder;
 import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -125,12 +121,7 @@ public class MultiRowResource extends ResourceBase 
implements Constants {
 if (r.isEmpty()) {
   continue;
 }
-RowModel rowModel = new RowModel(r.getRow());
-for (Cell c : r.listCells()) {
-  rowModel.addCell(new CellModel(CellUtil.cloneFamily(c), 
CellUtil.cloneQualifier(c),
-c.getTimestamp(), CellUtil.cloneValue(c)));
-}
-model.addRow(rowModel);
+model.addRow(RestUtil.createRowModelFromResult(r));
   }
   if (model.getRows().isEmpty()) {
 // If no rows found.
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
index eadd6a9334b..60c3d363ec3 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
@@ -19,14 +19,9 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.io.OutputStream;
-import java.util.List;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
-import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -91,15 +86,11 @@ public class ProtobufStreamingOutput implements 
StreamingOutput {
 
   private CellSetModel createModelFromResults(Result[] results) {
 CellSetModel cellSetModel = new CellSetModel();
-for (Result rs : results) {
-  byte[] rowKey = rs.getRow();
-  RowModel rModel = new RowModel(rowKey);
-  List kvs = rs.listCells();
-  for (Cell kv : kvs) {
-rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), 
CellUtil.cloneQualifier(kv),
-  kv.getTimestamp(), CellUtil.cloneValue(kv)));
+for (int i = 0; i < results.length; i++) {
+  if (results[i].isEmpty()) {
+continue;
   }
-  cellSetModel.addRow(rModel);
+  cellSetModel.addRow(RestUtil.createRowModelFromResult(results[i]));
 }
 return cellSetModel;
   }
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RestUtil.java
new file mode 100644
index 000..5f884c510d6
--- /dev/null
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Re