[hbase] branch branch-2.4 updated: HBASE-26002 MultiRowMutationEndpoint should return the result of the conditional update (#3386)

2021-06-14 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new b061d47  HBASE-26002 MultiRowMutationEndpoint should return the result 
of the conditional update (#3386)
b061d47 is described below

commit b061d477a8f1206658d1f584c547e3a2322fe280
Author: Toshihiro Suzuki 
AuthorDate: Tue Jun 15 13:55:21 2021 +0900

HBASE-26002 MultiRowMutationEndpoint should return the result of the 
conditional update (#3386)

Signed-off-by: Duo Zhang 
---
 .../src/main/protobuf/MultiRowMutation.proto   |  1 +
 .../coprocessor/MultiRowMutationEndpoint.java  | 11 
 .../hadoop/hbase/client/TestFromClientSide5.java   | 29 --
 3 files changed, 29 insertions(+), 12 deletions(-)

diff --git a/hbase-protocol/src/main/protobuf/MultiRowMutation.proto 
b/hbase-protocol/src/main/protobuf/MultiRowMutation.proto
index 571e633..1c0bbf7 100644
--- a/hbase-protocol/src/main/protobuf/MultiRowMutation.proto
+++ b/hbase-protocol/src/main/protobuf/MultiRowMutation.proto
@@ -41,6 +41,7 @@ message MutateRowsRequest {
 }
 
 message MutateRowsResponse {
+  optional bool processed = 1;
 }
 
 service MultiRowMutationService {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
index c840d54..cd2fdf6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
@@ -96,7 +96,10 @@ import com.google.protobuf.Service;
  * MultiRowMutationService.BlockingInterface service =
  *   MultiRowMutationService.newBlockingStub(channel);
  * MutateRowsRequest mrm = mrmBuilder.build();
- * service.mutateRows(null, mrm);
+ * MutateRowsResponse response = service.mutateRows(null, mrm);
+ *
+ * // We can get the result of the conditional update
+ * boolean processed = response.getProcessed();
  * 
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@@ -109,8 +112,7 @@ public class MultiRowMutationEndpoint extends 
MultiRowMutationService implements
   @Override
   public void mutateRows(RpcController controller, MutateRowsRequest request,
   RpcCallback done) {
-MutateRowsResponse response = MutateRowsResponse.getDefaultInstance();
-
+boolean matches = true;
 List rowLocks = null;
 try {
   // set of rows to lock, sorted to avoid deadlocks
@@ -141,7 +143,6 @@ public class MultiRowMutationEndpoint extends 
MultiRowMutationService implements
 rowsToLock.add(m.getRow());
   }
 
-  boolean matches = true;
   if (request.getConditionCount() > 0) {
 // Get row locks for the mutations and the conditions
 rowLocks = new ArrayList<>();
@@ -184,7 +185,7 @@ public class MultiRowMutationEndpoint extends 
MultiRowMutationService implements
 }
   }
 }
-done.run(response);
+done.run(MutateRowsResponse.newBuilder().setProcessed(matches).build());
   }
 
   private boolean matches(Region region, ClientProtos.Condition condition) 
throws IOException {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
index 809fd2a..7c3206d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
@@ -76,6 +76,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
 import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
@@ -299,9 +300,11 @@ public class TestFromClientSide5 extends 
FromClientSideBase {
   CoprocessorRpcChannel channel = t.coprocessorService(ROW);
   MultiRowMutationService.BlockingInterface service =
   MultiRowMutationService.newBlockingStub(channel);
-  service.mutateRows(null, mrmBuilder.build());
+  MutateRowsResponse response = service.mutateRows(null, 
mrmBuilder.build());
 
   // Assert
+  assertTrue(response.getProcessed());
+
   Result r = t.get(new Get(ROW));
   assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getVal

[hbase] branch branch-2 updated: HBASE-26002 MultiRowMutationEndpoint should return the result of the conditional update (#3386)

2021-06-14 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 6f1e652  HBASE-26002 MultiRowMutationEndpoint should return the result 
of the conditional update (#3386)
6f1e652 is described below

commit 6f1e6520919dc135d896b63bacf6bb61ec6d4918
Author: Toshihiro Suzuki 
AuthorDate: Tue Jun 15 13:55:21 2021 +0900

HBASE-26002 MultiRowMutationEndpoint should return the result of the 
conditional update (#3386)

Signed-off-by: Duo Zhang 
---
 .../src/main/protobuf/MultiRowMutation.proto   |  1 +
 .../coprocessor/MultiRowMutationEndpoint.java  | 11 
 .../hadoop/hbase/client/TestFromClientSide5.java   | 29 --
 3 files changed, 29 insertions(+), 12 deletions(-)

diff --git a/hbase-protocol/src/main/protobuf/MultiRowMutation.proto 
b/hbase-protocol/src/main/protobuf/MultiRowMutation.proto
index 571e633..1c0bbf7 100644
--- a/hbase-protocol/src/main/protobuf/MultiRowMutation.proto
+++ b/hbase-protocol/src/main/protobuf/MultiRowMutation.proto
@@ -41,6 +41,7 @@ message MutateRowsRequest {
 }
 
 message MutateRowsResponse {
+  optional bool processed = 1;
 }
 
 service MultiRowMutationService {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
index c840d54..cd2fdf6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
@@ -96,7 +96,10 @@ import com.google.protobuf.Service;
  * MultiRowMutationService.BlockingInterface service =
  *   MultiRowMutationService.newBlockingStub(channel);
  * MutateRowsRequest mrm = mrmBuilder.build();
- * service.mutateRows(null, mrm);
+ * MutateRowsResponse response = service.mutateRows(null, mrm);
+ *
+ * // We can get the result of the conditional update
+ * boolean processed = response.getProcessed();
  * 
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@@ -109,8 +112,7 @@ public class MultiRowMutationEndpoint extends 
MultiRowMutationService implements
   @Override
   public void mutateRows(RpcController controller, MutateRowsRequest request,
   RpcCallback done) {
-MutateRowsResponse response = MutateRowsResponse.getDefaultInstance();
-
+boolean matches = true;
 List rowLocks = null;
 try {
   // set of rows to lock, sorted to avoid deadlocks
@@ -141,7 +143,6 @@ public class MultiRowMutationEndpoint extends 
MultiRowMutationService implements
 rowsToLock.add(m.getRow());
   }
 
-  boolean matches = true;
   if (request.getConditionCount() > 0) {
 // Get row locks for the mutations and the conditions
 rowLocks = new ArrayList<>();
@@ -184,7 +185,7 @@ public class MultiRowMutationEndpoint extends 
MultiRowMutationService implements
 }
   }
 }
-done.run(response);
+done.run(MutateRowsResponse.newBuilder().setProcessed(matches).build());
   }
 
   private boolean matches(Region region, ClientProtos.Condition condition) 
throws IOException {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
index a1c7a6b..61c605f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
@@ -76,6 +76,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
 import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
@@ -299,9 +300,11 @@ public class TestFromClientSide5 extends 
FromClientSideBase {
   CoprocessorRpcChannel channel = t.coprocessorService(ROW);
   MultiRowMutationService.BlockingInterface service =
   MultiRowMutationService.newBlockingStub(channel);
-  service.mutateRows(null, mrmBuilder.build());
+  MutateRowsResponse response = service.mutateRows(null, 
mrmBuilder.build());
 
   // Assert
+  assertTrue(response.getProcessed());
+
   Result r = t.get(new Get(ROW));
   assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getVal

[hbase] branch master updated: HBASE-26002 MultiRowMutationEndpoint should return the result of the conditional update (addendum)

2021-06-14 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 4262887  HBASE-26002 MultiRowMutationEndpoint should return the result 
of the conditional update (addendum)
4262887 is described below

commit 42628874324dcc869c9b8a6899e3aa016ac7391e
Author: Toshihiro Suzuki 
AuthorDate: Tue Jun 15 09:42:49 2021 +0900

HBASE-26002 MultiRowMutationEndpoint should return the result of the 
conditional update (addendum)

Signed-off-by: Duo Zhang 
---
 .../coprocessor/MultiRowMutationEndpoint.java  |  2 +-
 .../hadoop/hbase/client/TestFromClientSide5.java   | 23 --
 2 files changed, 9 insertions(+), 16 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
index 0a9b8fc..271ae87 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
@@ -96,7 +96,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.
  * MultiRowMutationService.BlockingInterface service =
  *   MultiRowMutationService.newBlockingStub(channel);
  * MutateRowsRequest mrm = mrmBuilder.build();
- * MultiRowMutationProtos.MutateRowsResponse response = 
service.mutateRows(null, mrm);
+ * MutateRowsResponse response = service.mutateRows(null, mrm);
  *
  * // We can get the result of the conditional update
  * boolean processed = response.getProcessed();
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
index 8f938ee..f98d2ca 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
@@ -76,7 +76,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -99,6 +98,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationPr
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
 
 /**
  * Run tests that use the HBase clients; {@link Table}.
@@ -302,8 +302,7 @@ public class TestFromClientSide5 extends FromClientSideBase 
{
   CoprocessorRpcChannel channel = t.coprocessorService(ROW);
   MultiRowMutationService.BlockingInterface service =
   MultiRowMutationService.newBlockingStub(channel);
-  MultiRowMutationProtos.MutateRowsResponse response =
-service.mutateRows(null, mrmBuilder.build());
+  MutateRowsResponse response = service.mutateRows(null, 
mrmBuilder.build());
 
   // Assert
   assertTrue(response.getProcessed());
@@ -353,8 +352,7 @@ public class TestFromClientSide5 extends FromClientSideBase 
{
   CoprocessorRpcChannel channel = t.coprocessorService(ROW);
   MultiRowMutationService.BlockingInterface service =
 MultiRowMutationService.newBlockingStub(channel);
-  MultiRowMutationProtos.MutateRowsResponse response =
-service.mutateRows(null, mrmBuilder.build());
+  MutateRowsResponse response = service.mutateRows(null, 
mrmBuilder.build());
 
   // Assert
   assertTrue(response.getProcessed());
@@ -400,8 +398,7 @@ public class TestFromClientSide5 extends FromClientSideBase 
{
   CoprocessorRpcChannel channel = t.coprocessorService(ROW);
   MultiRowMutationService.BlockingInterface service =
 MultiRowMutationService.newBlockingStub(channel);
-  MultiRowMutationProtos.MutateRowsResponse response =
-service.mutateRows(null, mrmBuilder.build());
+  MutateRowsResponse response = service.mutateRows(null, 
mrmBuilder.build());
 
   // Assert
   assertFalse(response.getProcessed());
@@ -449,8 +446,7 @@ public class TestFromClientSide5 extends FromClientSideBase 
{
   CoprocessorRpcChannel channel

[hbase] branch master updated: HBASE-26002 MultiRowMutationEndpoint should return the result of the conditional update (addendum)

2021-06-14 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new a35ec99  HBASE-26002 MultiRowMutationEndpoint should return the result 
of the conditional update (addendum)
a35ec99 is described below

commit a35ec994b94036fcd25acb1ea1738d8882ab587e
Author: Toshihiro Suzuki 
AuthorDate: Tue Jun 15 08:03:30 2021 +0900

HBASE-26002 MultiRowMutationEndpoint should return the result of the 
conditional update (addendum)

Signed-off-by: Duo Zhang 
---
 .../apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
index 382470d..0a9b8fc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java
@@ -96,7 +96,10 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.
  * MultiRowMutationService.BlockingInterface service =
  *   MultiRowMutationService.newBlockingStub(channel);
  * MutateRowsRequest mrm = mrmBuilder.build();
- * service.mutateRows(null, mrm);
+ * MultiRowMutationProtos.MutateRowsResponse response = 
service.mutateRows(null, mrm);
+ *
+ * // We can get the result of the conditional update
+ * boolean processed = response.getProcessed();
  * 
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)


[hbase] branch master updated (ba6995e -> ec31818)

2021-06-14 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from ba6995e  HBASE-25989 FanOutOneBlockAsyncDFSOutput using shaded 
protobuf in hdfs 3.3+ (#3368)
 add ec31818  HBASE-26002 MultiRowMutationEndpoint should return the result 
of the conditional update (#3384)

No new revisions were added by this update.

Summary of changes:
 .../server/coprocessor/MultiRowMutation.proto  |  1 +
 .../coprocessor/MultiRowMutationEndpoint.java  |  6 ++--
 .../hadoop/hbase/client/TestFromClientSide5.java   | 36 +-
 3 files changed, 32 insertions(+), 11 deletions(-)


[hbase] branch branch-2.4 updated: HBASE-25766 Introduce RegionSplitRestriction that restricts the pattern of the split point

2021-04-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new cc7c435  HBASE-25766 Introduce RegionSplitRestriction that restricts 
the pattern of the split point
cc7c435 is described below

commit cc7c4352ee089bcde3adcb02bb21d169000cff8a
Author: Toshihiro Suzuki 
AuthorDate: Thu Apr 22 13:53:36 2021 +0900

HBASE-25766 Introduce RegionSplitRestriction that restricts the pattern of 
the split point

Signed-off-by: Duo Zhang 
Signed-off-by: Michael Stack 
---
 .../assignment/SplitTableRegionProcedure.java  |  25 ++-
 .../DelimitedKeyPrefixRegionSplitPolicy.java   |   4 +
 ... DelimitedKeyPrefixRegionSplitRestriction.java} |  69 
 .../apache/hadoop/hbase/regionserver/HRegion.java  |   7 +
 .../regionserver/KeyPrefixRegionSplitPolicy.java   |   4 +
 .../KeyPrefixRegionSplitRestriction.java   |  76 +
 .../regionserver/NoRegionSplitRestriction.java |  40 +
 .../hbase/regionserver/RegionSplitRestriction.java | 129 +++
 .../regionserver/TestRegionSplitRestriction.java   | 184 +
 9 files changed, 496 insertions(+), 42 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 3ed6058..09ac827 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.RegionSplitRestriction;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
@@ -110,6 +111,21 @@ public class SplitTableRegionProcedure
 // we fail-fast on construction. There it skips the split with just a 
warning.
 checkOnline(env, regionToSplit);
 this.bestSplitRow = splitRow;
+TableDescriptor tableDescriptor = 
env.getMasterServices().getTableDescriptors()
+  .get(getTableName());
+Configuration conf = env.getMasterConfiguration();
+if (hasBestSplitRow()) {
+  // Apply the split restriction for the table to the user-specified split 
point
+  RegionSplitRestriction splitRestriction =
+RegionSplitRestriction.create(tableDescriptor, conf);
+  byte[] restrictedSplitRow = 
splitRestriction.getRestrictedSplitPoint(bestSplitRow);
+  if (!Bytes.equals(bestSplitRow, restrictedSplitRow)) {
+LOG.warn("The specified split point {} violates the split restriction 
of the table. "
++ "Using {} as a split point.", Bytes.toStringBinary(bestSplitRow),
+  Bytes.toStringBinary(restrictedSplitRow));
+bestSplitRow = restrictedSplitRow;
+  }
+}
 checkSplittable(env, regionToSplit);
 final TableName table = regionToSplit.getTable();
 final long rid = getDaughterRegionIdTimestamp(regionToSplit);
@@ -125,15 +141,14 @@ public class SplitTableRegionProcedure
 .setSplit(false)
 .setRegionId(rid)
 .build();
-TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
-if(htd.getRegionSplitPolicyClassName() != null) {
+if(tableDescriptor.getRegionSplitPolicyClassName() != null) {
   // Since we don't have region reference here, creating the split policy 
instance without it.
   // This can be used to invoke methods which don't require Region 
reference. This instantiation
   // of a class on Master-side though it only makes sense on the 
RegionServer-side is
   // for Phoenix Local Indexing. Refer HBASE-12583 for more information.
   Class clazz =
-  RegionSplitPolicy.getSplitPolicyClass(htd, 
env.getMasterConfiguration());
-  this.splitPolicy = ReflectionUtils.newInstance(clazz, 
env.getMasterConfiguration());
+RegionSplitPolicy.getSplitPolicyClass(tableDescriptor, conf);
+  this.splitPolicy = ReflectionUtils.newInstance(clazz, conf);
 }
   }
 
@@ -219,7 +234,7 @@ public class SplitTableRegionProcedure
   throw e;
 }
 
-if (bestSplitRow == null || bestSplitRow.length == 0) {
+if (!hasBestSplitRow()) {
   throw new DoNotRetryIOException("Region not splittable because 
bestSplitPoint = null, " +
 "maybe table is too small for auto split. For force split, try 
specifying split row");
 }
diff

[hbase] branch branch-2 updated: HBASE-25766 Introduce RegionSplitRestriction that restricts the pattern of the split point

2021-04-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new f4f8430  HBASE-25766 Introduce RegionSplitRestriction that restricts 
the pattern of the split point
f4f8430 is described below

commit f4f84302fa6b53acb9a0b7b13c3df55b39a44345
Author: Toshihiro Suzuki 
AuthorDate: Thu Apr 22 13:53:36 2021 +0900

HBASE-25766 Introduce RegionSplitRestriction that restricts the pattern of 
the split point

Signed-off-by: Duo Zhang 
Signed-off-by: Michael Stack 
---
 .../assignment/SplitTableRegionProcedure.java  |  25 ++-
 .../DelimitedKeyPrefixRegionSplitPolicy.java   |   4 +
 ... DelimitedKeyPrefixRegionSplitRestriction.java} |  69 
 .../apache/hadoop/hbase/regionserver/HRegion.java  |   7 +
 .../regionserver/KeyPrefixRegionSplitPolicy.java   |   4 +
 .../KeyPrefixRegionSplitRestriction.java   |  76 +
 .../regionserver/NoRegionSplitRestriction.java |  40 +
 .../hbase/regionserver/RegionSplitRestriction.java | 129 +++
 .../regionserver/TestRegionSplitRestriction.java   | 184 +
 9 files changed, 496 insertions(+), 42 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 3ed6058..09ac827 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.RegionSplitRestriction;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
@@ -110,6 +111,21 @@ public class SplitTableRegionProcedure
 // we fail-fast on construction. There it skips the split with just a 
warning.
 checkOnline(env, regionToSplit);
 this.bestSplitRow = splitRow;
+TableDescriptor tableDescriptor = 
env.getMasterServices().getTableDescriptors()
+  .get(getTableName());
+Configuration conf = env.getMasterConfiguration();
+if (hasBestSplitRow()) {
+  // Apply the split restriction for the table to the user-specified split 
point
+  RegionSplitRestriction splitRestriction =
+RegionSplitRestriction.create(tableDescriptor, conf);
+  byte[] restrictedSplitRow = 
splitRestriction.getRestrictedSplitPoint(bestSplitRow);
+  if (!Bytes.equals(bestSplitRow, restrictedSplitRow)) {
+LOG.warn("The specified split point {} violates the split restriction 
of the table. "
++ "Using {} as a split point.", Bytes.toStringBinary(bestSplitRow),
+  Bytes.toStringBinary(restrictedSplitRow));
+bestSplitRow = restrictedSplitRow;
+  }
+}
 checkSplittable(env, regionToSplit);
 final TableName table = regionToSplit.getTable();
 final long rid = getDaughterRegionIdTimestamp(regionToSplit);
@@ -125,15 +141,14 @@ public class SplitTableRegionProcedure
 .setSplit(false)
 .setRegionId(rid)
 .build();
-TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
-if(htd.getRegionSplitPolicyClassName() != null) {
+if(tableDescriptor.getRegionSplitPolicyClassName() != null) {
   // Since we don't have region reference here, creating the split policy 
instance without it.
   // This can be used to invoke methods which don't require Region 
reference. This instantiation
   // of a class on Master-side though it only makes sense on the 
RegionServer-side is
   // for Phoenix Local Indexing. Refer HBASE-12583 for more information.
   Class clazz =
-  RegionSplitPolicy.getSplitPolicyClass(htd, 
env.getMasterConfiguration());
-  this.splitPolicy = ReflectionUtils.newInstance(clazz, 
env.getMasterConfiguration());
+RegionSplitPolicy.getSplitPolicyClass(tableDescriptor, conf);
+  this.splitPolicy = ReflectionUtils.newInstance(clazz, conf);
 }
   }
 
@@ -219,7 +234,7 @@ public class SplitTableRegionProcedure
   throw e;
 }
 
-if (bestSplitRow == null || bestSplitRow.length == 0) {
+if (!hasBestSplitRow()) {
   throw new DoNotRetryIOException("Region not splittable because 
bestSplitPoint = null, " +
 "maybe table is too small for auto split. For force split, try 
specifying split row");
 }
diff --git 

[hbase] branch master updated: HBASE-25766 Introduce RegionSplitRestriction that restricts the pattern of the split point (#3150)

2021-04-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 5f4e2e1  HBASE-25766 Introduce RegionSplitRestriction that restricts 
the pattern of the split point (#3150)
5f4e2e1 is described below

commit 5f4e2e111bfa73821221ac4cf61c47c65c71213a
Author: Toshihiro Suzuki 
AuthorDate: Thu Apr 22 13:53:36 2021 +0900

HBASE-25766 Introduce RegionSplitRestriction that restricts the pattern of 
the split point (#3150)

Signed-off-by: Duo Zhang 
Signed-off-by: Michael Stack 
---
 .../assignment/SplitTableRegionProcedure.java  |  25 ++-
 .../DelimitedKeyPrefixRegionSplitPolicy.java   |   4 +
 ... DelimitedKeyPrefixRegionSplitRestriction.java} |  69 
 .../apache/hadoop/hbase/regionserver/HRegion.java  |   7 +
 .../regionserver/KeyPrefixRegionSplitPolicy.java   |   4 +
 .../KeyPrefixRegionSplitRestriction.java   |  76 +
 .../regionserver/NoRegionSplitRestriction.java |  40 +
 .../hbase/regionserver/RegionSplitRestriction.java | 129 +++
 .../regionserver/TestRegionSplitRestriction.java   | 184 +
 9 files changed, 496 insertions(+), 42 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 3ed6058..09ac827 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.RegionSplitRestriction;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
@@ -110,6 +111,21 @@ public class SplitTableRegionProcedure
 // we fail-fast on construction. There it skips the split with just a 
warning.
 checkOnline(env, regionToSplit);
 this.bestSplitRow = splitRow;
+TableDescriptor tableDescriptor = 
env.getMasterServices().getTableDescriptors()
+  .get(getTableName());
+Configuration conf = env.getMasterConfiguration();
+if (hasBestSplitRow()) {
+  // Apply the split restriction for the table to the user-specified split 
point
+  RegionSplitRestriction splitRestriction =
+RegionSplitRestriction.create(tableDescriptor, conf);
+  byte[] restrictedSplitRow = 
splitRestriction.getRestrictedSplitPoint(bestSplitRow);
+  if (!Bytes.equals(bestSplitRow, restrictedSplitRow)) {
+LOG.warn("The specified split point {} violates the split restriction 
of the table. "
++ "Using {} as a split point.", Bytes.toStringBinary(bestSplitRow),
+  Bytes.toStringBinary(restrictedSplitRow));
+bestSplitRow = restrictedSplitRow;
+  }
+}
 checkSplittable(env, regionToSplit);
 final TableName table = regionToSplit.getTable();
 final long rid = getDaughterRegionIdTimestamp(regionToSplit);
@@ -125,15 +141,14 @@ public class SplitTableRegionProcedure
 .setSplit(false)
 .setRegionId(rid)
 .build();
-TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
-if(htd.getRegionSplitPolicyClassName() != null) {
+if(tableDescriptor.getRegionSplitPolicyClassName() != null) {
   // Since we don't have region reference here, creating the split policy 
instance without it.
   // This can be used to invoke methods which don't require Region 
reference. This instantiation
   // of a class on Master-side though it only makes sense on the 
RegionServer-side is
   // for Phoenix Local Indexing. Refer HBASE-12583 for more information.
   Class clazz =
-  RegionSplitPolicy.getSplitPolicyClass(htd, 
env.getMasterConfiguration());
-  this.splitPolicy = ReflectionUtils.newInstance(clazz, 
env.getMasterConfiguration());
+RegionSplitPolicy.getSplitPolicyClass(tableDescriptor, conf);
+  this.splitPolicy = ReflectionUtils.newInstance(clazz, conf);
 }
   }
 
@@ -219,7 +234,7 @@ public class SplitTableRegionProcedure
   throw e;
 }
 
-if (bestSplitRow == null || bestSplitRow.length == 0) {
+if (!hasBestSplitRow()) {
   throw new DoNotRetryIOException("Region not splittable because 
bestSplitPoint = null, " +
 "maybe table is too small for auto split. For force split, try 
specifying split row");
 }

[hbase] branch branch-2.4 updated: HBASE-25703 Support conditional update in MultiRowMutationEndpoint (#3107)

2021-03-30 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 6f4ab4a  HBASE-25703 Support conditional update in 
MultiRowMutationEndpoint (#3107)
6f4ab4a is described below

commit 6f4ab4a13458c4e9aeedb4fac43e66f5e67f3f38
Author: Toshihiro Suzuki 
AuthorDate: Tue Mar 30 23:04:29 2021 +0900

HBASE-25703 Support conditional update in MultiRowMutationEndpoint (#3107)

Signed-off-by: Michael Stack 
---
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |  52 
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  35 +++
 .../hbase/shaded/protobuf/RequestConverter.java|  45 +--
 .../src/main/protobuf/Client.proto |   4 +-
 .../src/main/protobuf/MultiRowMutation.proto   |   1 +
 .../coprocessor/MultiRowMutationEndpoint.java  | 176 ++-
 .../hadoop/hbase/client/TestFromClientSide5.java   | 329 -
 .../hbase/client/TestMalformedCellFromClient.java  |   4 +-
 8 files changed, 579 insertions(+), 67 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 5bba1e1..dc7f7d2 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.Cell.Type;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ExtendedCellBuilder;
 import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
@@ -68,6 +69,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.SnapshotType;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
@@ -751,6 +753,9 @@ public final class ProtobufUtil {
*/
   public static Mutation toMutation(final MutationProto proto) throws 
IOException {
 MutationType type = proto.getMutateType();
+if (type == MutationType.INCREMENT) {
+  return toIncrement(proto, null);
+}
 if (type == MutationType.APPEND) {
   return toAppend(proto, null);
 }
@@ -1790,4 +1795,51 @@ public final class ProtobufUtil {
   .setTo(timeRange.getMax())
   .build();
   }
+
+  public static TimeRange toTimeRange(HBaseProtos.TimeRange timeRange) {
+if (timeRange == null) {
+  return TimeRange.allTime();
+}
+if (timeRange.hasFrom()) {
+  if (timeRange.hasTo()) {
+return TimeRange.between(timeRange.getFrom(), timeRange.getTo());
+  } else {
+return TimeRange.from(timeRange.getFrom());
+  }
+} else {
+  return TimeRange.until(timeRange.getTo());
+}
+  }
+
+  public static ClientProtos.Condition toCondition(final byte[] row, final 
byte[] family,
+final byte[] qualifier, final CompareOperator op, final byte[] value, 
final Filter filter,
+final TimeRange timeRange) throws IOException {
+
+ClientProtos.Condition.Builder builder = 
ClientProtos.Condition.newBuilder()
+  .setRow(ByteStringer.wrap(row));
+
+if (filter != null) {
+  builder.setFilter(ProtobufUtil.toFilter(filter));
+} else {
+  builder.setFamily(ByteStringer.wrap(family))
+.setQualifier(ByteStringer.wrap(
+  qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier))
+.setComparator(
+  ProtobufUtil.toComparator(new BinaryComparator(value)))
+.setCompareType(HBaseProtos.CompareType.valueOf(op.name()));
+}
+
+return builder.setTimeRange(ProtobufUtil.toTimeRange(timeRange)).build();
+  }
+
+  public static ClientProtos.Condition toCondition(final byte[] row, final 
Filter filter,
+final TimeRange timeRange) throws IOException {
+return toCondition(row, null, null, null, null, filter, timeRange);
+  }
+
+  public static ClientProtos.Condition toCondition(final byte[] row, final 
byte[] family,
+final byte[] qualifier, final CompareOperator op, final byte[] value,
+final TimeRange timeRange) throws IOException {
+return toCondition(row, family, qualifier, op, value, null, timeRange);
+  }
 }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 9064ded..cb471d2 100644

[hbase] branch branch-2 updated: HBASE-25703 Support conditional update in MultiRowMutationEndpoint (#3107)

2021-03-30 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 1d3ea38  HBASE-25703 Support conditional update in 
MultiRowMutationEndpoint (#3107)
1d3ea38 is described below

commit 1d3ea38f1e2847c055fa964e5df2c039b3b756dd
Author: Toshihiro Suzuki 
AuthorDate: Tue Mar 30 23:04:29 2021 +0900

HBASE-25703 Support conditional update in MultiRowMutationEndpoint (#3107)

Signed-off-by: Michael Stack 
---
 .../apache/hadoop/hbase/protobuf/ProtobufUtil.java |  52 
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  35 +++
 .../hbase/shaded/protobuf/RequestConverter.java|  45 +--
 .../src/main/protobuf/Client.proto |   4 +-
 .../src/main/protobuf/MultiRowMutation.proto   |   1 +
 .../coprocessor/MultiRowMutationEndpoint.java  | 176 ++-
 .../hadoop/hbase/client/TestFromClientSide5.java   | 329 -
 .../hbase/client/TestMalformedCellFromClient.java  |   4 +-
 8 files changed, 579 insertions(+), 67 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 5bba1e1..dc7f7d2 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.Cell.Type;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ExtendedCellBuilder;
 import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
@@ -68,6 +69,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.SnapshotType;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
@@ -751,6 +753,9 @@ public final class ProtobufUtil {
*/
   public static Mutation toMutation(final MutationProto proto) throws 
IOException {
 MutationType type = proto.getMutateType();
+if (type == MutationType.INCREMENT) {
+  return toIncrement(proto, null);
+}
 if (type == MutationType.APPEND) {
   return toAppend(proto, null);
 }
@@ -1790,4 +1795,51 @@ public final class ProtobufUtil {
   .setTo(timeRange.getMax())
   .build();
   }
+
+  public static TimeRange toTimeRange(HBaseProtos.TimeRange timeRange) {
+if (timeRange == null) {
+  return TimeRange.allTime();
+}
+if (timeRange.hasFrom()) {
+  if (timeRange.hasTo()) {
+return TimeRange.between(timeRange.getFrom(), timeRange.getTo());
+  } else {
+return TimeRange.from(timeRange.getFrom());
+  }
+} else {
+  return TimeRange.until(timeRange.getTo());
+}
+  }
+
+  public static ClientProtos.Condition toCondition(final byte[] row, final 
byte[] family,
+final byte[] qualifier, final CompareOperator op, final byte[] value, 
final Filter filter,
+final TimeRange timeRange) throws IOException {
+
+ClientProtos.Condition.Builder builder = 
ClientProtos.Condition.newBuilder()
+  .setRow(ByteStringer.wrap(row));
+
+if (filter != null) {
+  builder.setFilter(ProtobufUtil.toFilter(filter));
+} else {
+  builder.setFamily(ByteStringer.wrap(family))
+.setQualifier(ByteStringer.wrap(
+  qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier))
+.setComparator(
+  ProtobufUtil.toComparator(new BinaryComparator(value)))
+.setCompareType(HBaseProtos.CompareType.valueOf(op.name()));
+}
+
+return builder.setTimeRange(ProtobufUtil.toTimeRange(timeRange)).build();
+  }
+
+  public static ClientProtos.Condition toCondition(final byte[] row, final 
Filter filter,
+final TimeRange timeRange) throws IOException {
+return toCondition(row, null, null, null, null, filter, timeRange);
+  }
+
+  public static ClientProtos.Condition toCondition(final byte[] row, final 
byte[] family,
+final byte[] qualifier, final CompareOperator op, final byte[] value,
+final TimeRange timeRange) throws IOException {
+return toCondition(row, family, qualifier, op, value, null, timeRange);
+  }
 }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 9064ded..cb471d2 100644
--- 
a/hbase

[hbase] branch master updated: HBASE-25703 Support conditional update in MultiRowMutationEndpoint (#3098)

2021-03-29 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 46f7d9d  HBASE-25703 Support conditional update in 
MultiRowMutationEndpoint (#3098)
46f7d9d is described below

commit 46f7d9dd4b8d82780d0e4911fb64c3b641e7d8ad
Author: Toshihiro Suzuki 
AuthorDate: Tue Mar 30 09:18:56 2021 +0900

HBASE-25703 Support conditional update in MultiRowMutationEndpoint (#3098)

Signed-off-by: Michael Stack 
---
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  35 +++
 .../hbase/shaded/protobuf/RequestConverter.java|  38 +--
 .../src/main/protobuf/client/Client.proto  |   4 +-
 .../server/coprocessor/MultiRowMutation.proto  |   1 +
 .../coprocessor/MultiRowMutationEndpoint.java  | 177 ++-
 .../hadoop/hbase/client/TestFromClientSide5.java   | 329 -
 .../hbase/client/TestMalformedCellFromClient.java  |   4 +-
 7 files changed, 523 insertions(+), 65 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 073be3b..1b00887 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -100,6 +100,7 @@ import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
@@ -965,6 +966,9 @@ public final class ProtobufUtil {
*/
   public static Mutation toMutation(final MutationProto proto) throws 
IOException {
 MutationType type = proto.getMutateType();
+if (type == MutationType.INCREMENT) {
+  return toIncrement(proto, null);
+}
 if (type == MutationType.APPEND) {
   return toAppend(proto, null);
 }
@@ -3722,6 +3726,37 @@ public final class ProtobufUtil {
 }
   }
 
+  public static ClientProtos.Condition toCondition(final byte[] row, final 
byte[] family,
+final byte[] qualifier, final CompareOperator op, final byte[] value, 
final Filter filter,
+final TimeRange timeRange) throws IOException {
+
+ClientProtos.Condition.Builder builder = 
ClientProtos.Condition.newBuilder()
+  .setRow(UnsafeByteOperations.unsafeWrap(row));
+
+if (filter != null) {
+  builder.setFilter(ProtobufUtil.toFilter(filter));
+} else {
+  builder.setFamily(UnsafeByteOperations.unsafeWrap(family))
+.setQualifier(UnsafeByteOperations.unsafeWrap(
+  qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier))
+.setComparator(ProtobufUtil.toComparator(new BinaryComparator(value)))
+.setCompareType(HBaseProtos.CompareType.valueOf(op.name()));
+}
+
+return builder.setTimeRange(ProtobufUtil.toTimeRange(timeRange)).build();
+  }
+
+  public static ClientProtos.Condition toCondition(final byte[] row, final 
Filter filter,
+final TimeRange timeRange) throws IOException {
+return toCondition(row, null, null, null, null, filter, timeRange);
+  }
+
+  public static ClientProtos.Condition toCondition(final byte[] row, final 
byte[] family,
+final byte[] qualifier, final CompareOperator op, final byte[] value,
+final TimeRange timeRange) throws IOException {
+return toCondition(row, family, qualifier, op, value, null, timeRange);
+  }
+
   public static List toBalancerDecisionResponse(
   HBaseProtos.LogEntry logEntry) {
 try {
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index c3e20c3..c5911ab 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.master.RegionState;
@@ -101,7 +100,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationPr
 import

[hbase] branch master updated: HBASE-25702 Remove RowProcessor (#3097)

2021-03-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 93b1163  HBASE-25702 Remove RowProcessor (#3097)
93b1163 is described below

commit 93b1163a8bc759e2121cd25d8e209cc69c9c9e74
Author: Toshihiro Suzuki 
AuthorDate: Sun Mar 28 07:38:42 2021 +0900

HBASE-25702 Remove RowProcessor (#3097)

Signed-off-by: Duo Zhang 
---
 .../coprocessor/TestRowProcessorEndpoint.java  | 679 -
 .../protobuf/server/coprocessor/RowProcessor.proto |  46 --
 .../client/coprocessor/RowProcessorClient.java |  53 --
 .../coprocessor/BaseRowProcessorEndpoint.java  | 149 -
 .../hbase/regionserver/BaseRowProcessor.java   |  71 ---
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 257 
 .../apache/hadoop/hbase/regionserver/Region.java   |  47 +-
 .../hadoop/hbase/regionserver/RowProcessor.java| 159 -
 8 files changed, 1 insertion(+), 1460 deletions(-)

diff --git 
a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java
 
b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java
deleted file mode 100644
index ebdbc3d..000
--- 
a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java
+++ /dev/null
@@ -1,679 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.coprocessor;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.IsolationLevel;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.coprocessor.RowProcessorClient;
-import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-import org.apache.hadoop.hbase.ipc.RpcScheduler;
-import org.apache.hadoop.hbase.regionserver.BaseRowProcessor;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.ClassRule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hbase.thirdparty.com.google.protobuf.Message;
-import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-
-import 
org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos;
-import 
org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.FriendsOfFriendsProcessorRequest;
-import 
org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.FriendsOfFriendsProcessorResponse;
-import 
org.apache.hadoop.hbase.shaded.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.IncCounterProce

[hbase] branch branch-1 updated: HBASE-25686 [hbtop] Add some javadoc

2021-03-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 90a83f5  HBASE-25686 [hbtop] Add some javadoc
90a83f5 is described below

commit 90a83f5a6ab96c12d09f83f18862d3eedfdfc52b
Author: Toshihiro Suzuki 
AuthorDate: Sat Mar 27 17:20:22 2021 +0900

HBASE-25686 [hbtop] Add some javadoc

Signed-off-by: Wei-Chiu Chuang 
---
 .../org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java  | 5 -
 .../hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java  | 2 +-
 .../hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java  | 7 +++
 .../hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java  | 3 +++
 4 files changed, 15 insertions(+), 2 deletions(-)

diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
index 0084e23..4fd8aca 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
@@ -43,7 +43,10 @@ import 
org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
 
 /**
- * The implementation of the {@link Terminal} interface.
+ * An implementation of the {@link Terminal} interface for normal display mode.
+ *
+ * This implementation produces output intended for human viewing. In 
particular, it only displays
+ * one screenful of data. The output contains some escape sequences for 
formatting.
  */
 @InterfaceAudience.Private
 public class TerminalImpl implements Terminal {
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
index 0d698b0..3e8c6a2 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.hbtop.terminal.Color;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
 
 /**
- * The implementation of the {@link TerminalPrinter} interface.
+ * An implementation of the {@link TerminalPrinter} interface for normal 
display mode.
  */
 @InterfaceAudience.Private
 public class TerminalPrinterImpl extends AbstractTerminalPrinter {
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
index 19d6426..60f5502 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
@@ -24,6 +24,13 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
 
+/**
+ * An implementation of the {@link Terminal} interface for batch mode.
+ *
+ * This implementation produces output that's more sensible for collecting to 
a log file or for
+ * parsing. There is no limit on the number of output lines, and the output 
doesn't contain any
+ * escape sequences for formatting.
+ */
 public class BatchTerminal implements Terminal {
 
   private static final TerminalPrinter TERMINAL_PRINTER = new 
BatchTerminalPrinter();
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
index afae5e8..4864e07 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl.batch;
 import org.apache.hadoop.hbase.hbtop.terminal.AbstractTerminalPrinter;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
 
+/**
+ * An implementation of the {@link TerminalPrinter} interface for batch mode.
+ */
 public class BatchTerminalPrinter extends AbstractTerminalPrinter {
 
   @Override


[hbase] branch branch-2 updated: HBASE-25686 [hbtop] Add some javadoc (#3096)

2021-03-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 693de81  HBASE-25686 [hbtop] Add some javadoc (#3096)
693de81 is described below

commit 693de816b27fab07199eacdf4701cf762d3aada4
Author: Toshihiro Suzuki 
AuthorDate: Sat Mar 27 17:20:22 2021 +0900

HBASE-25686 [hbtop] Add some javadoc (#3096)

Signed-off-by: Wei-Chiu Chuang 
---
 .../org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java  | 6 --
 .../hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java  | 3 +--
 .../hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java  | 7 +++
 .../hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java  | 3 +++
 4 files changed, 15 insertions(+), 4 deletions(-)

diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
index ed71523..c6b74af 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
@@ -42,9 +42,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 /**
- * The implementation of the {@link Terminal} interface.
+ * An implementation of the {@link Terminal} interface for normal display mode.
+ *
+ * This implementation produces output intended for human viewing. In 
particular, it only displays
+ * one screenful of data. The output contains some escape sequences for 
formatting.
  */
 @InterfaceAudience.Private
 public class TerminalImpl implements Terminal {
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
index 6ad7bdc..788d267 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
@@ -23,9 +23,8 @@ import org.apache.hadoop.hbase.hbtop.terminal.Color;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
 import org.apache.yetus.audience.InterfaceAudience;
 
-
 /**
- * The implementation of the {@link TerminalPrinter} interface.
+ * An implementation of the {@link TerminalPrinter} interface for normal 
display mode.
  */
 @InterfaceAudience.Private
 public class TerminalPrinterImpl implements TerminalPrinter {
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
index 19d6426..60f5502 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
@@ -24,6 +24,13 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
 
+/**
+ * An implementation of the {@link Terminal} interface for batch mode.
+ *
+ * This implementation produces output that's more sensible for collecting to 
a log file or for
+ * parsing. There is no limit on the number of output lines, and the output 
doesn't contain any
+ * escape sequences for formatting.
+ */
 public class BatchTerminal implements Terminal {
 
   private static final TerminalPrinter TERMINAL_PRINTER = new 
BatchTerminalPrinter();
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
index db7a9e7..6031666 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
@@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl.batch;
 
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
 
+/**
+ * An implementation of the {@link TerminalPrinter} interface for batch mode.
+ */
 public class BatchTerminalPrinter implements TerminalPrinter {
 
   @Override


[hbase] branch master updated: HBASE-25686 [hbtop] Add some javadoc (#3096)

2021-03-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new b062598  HBASE-25686 [hbtop] Add some javadoc (#3096)
b062598 is described below

commit b0625984f2a1db8c246112b14bb228e83234d25f
Author: Toshihiro Suzuki 
AuthorDate: Sat Mar 27 17:20:22 2021 +0900

HBASE-25686 [hbtop] Add some javadoc (#3096)

Signed-off-by: Wei-Chiu Chuang 
---
 .../org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java  | 6 --
 .../hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java  | 3 +--
 .../hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java  | 7 +++
 .../hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java  | 3 +++
 4 files changed, 15 insertions(+), 4 deletions(-)

diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
index ed71523..c6b74af 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java
@@ -42,9 +42,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 /**
- * The implementation of the {@link Terminal} interface.
+ * An implementation of the {@link Terminal} interface for normal display mode.
+ *
+ * This implementation produces output intended for human viewing. In 
particular, it only displays
+ * one screenful of data. The output contains some escape sequences for 
formatting.
  */
 @InterfaceAudience.Private
 public class TerminalImpl implements Terminal {
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
index 6ad7bdc..788d267 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java
@@ -23,9 +23,8 @@ import org.apache.hadoop.hbase.hbtop.terminal.Color;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
 import org.apache.yetus.audience.InterfaceAudience;
 
-
 /**
- * The implementation of the {@link TerminalPrinter} interface.
+ * An implementation of the {@link TerminalPrinter} interface for normal 
display mode.
  */
 @InterfaceAudience.Private
 public class TerminalPrinterImpl implements TerminalPrinter {
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
index 19d6426..60f5502 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java
@@ -24,6 +24,13 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
 
+/**
+ * An implementation of the {@link Terminal} interface for batch mode.
+ *
+ * This implementation produces output that's more sensible for collecting to 
a log file or for
+ * parsing. There is no limit on the number of output lines, and the output 
doesn't contain any
+ * escape sequences for formatting.
+ */
 public class BatchTerminal implements Terminal {
 
   private static final TerminalPrinter TERMINAL_PRINTER = new 
BatchTerminalPrinter();
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
index db7a9e7..6031666 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java
@@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl.batch;
 
 import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter;
 
+/**
+ * An implementation of the {@link TerminalPrinter} interface for batch mode.
+ */
 public class BatchTerminalPrinter implements TerminalPrinter {
 
   @Override


[hbase] branch branch-2.4 updated: HBASE-25678 Support nonce operations for Increment/Append in RowMutations and CheckAndMutate

2021-03-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 4cf28c4  HBASE-25678 Support nonce operations for Increment/Append in 
RowMutations and CheckAndMutate
4cf28c4 is described below

commit 4cf28c43f589f8be0daa3f9b9846642b72c1b8ec
Author: Toshihiro Suzuki 
AuthorDate: Sun Mar 21 22:47:53 2021 +0900

HBASE-25678 Support nonce operations for Increment/Append in RowMutations 
and CheckAndMutate

Signed-off-by: stack 
---
 .../hbase/client/AsyncBatchRpcRetryingCaller.java  |  22 +-
 .../apache/hadoop/hbase/client/AsyncProcess.java   |  25 +-
 .../org/apache/hadoop/hbase/client/HTable.java |  36 +-
 .../hadoop/hbase/client/RawAsyncTableImpl.java |  40 +--
 .../hbase/shaded/protobuf/RequestConverter.java| 138 +---
 .../hadoop/hbase/coprocessor/RegionObserver.java   |   2 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  59 ++--
 .../hadoop/hbase/regionserver/RSRpcServices.java   |  54 +--
 .../hbase/client/TestAsyncTableNoncedRetry.java| 268 +--
 .../hadoop/hbase/client/TestHTableNoncedRetry.java | 365 +
 10 files changed, 851 insertions(+), 158 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
index 7e05b05..7af385d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
@@ -172,7 +172,7 @@ class AsyncBatchRpcRetryingCaller {
   } else {
 action = new Action(rawAction, i);
   }
-  if (rawAction instanceof Append || rawAction instanceof Increment) {
+  if (hasIncrementOrAppend(rawAction)) {
 action.setNonce(conn.getNonceGenerator().newNonce());
   }
   this.actions.add(action);
@@ -184,6 +184,26 @@ class AsyncBatchRpcRetryingCaller {
 this.startNs = System.nanoTime();
   }
 
+  private static boolean hasIncrementOrAppend(Row action) {
+if (action instanceof Append || action instanceof Increment) {
+  return true;
+} else if (action instanceof RowMutations) {
+  return hasIncrementOrAppend((RowMutations) action);
+} else if (action instanceof CheckAndMutate) {
+  return hasIncrementOrAppend(((CheckAndMutate) action).getAction());
+}
+return false;
+  }
+
+  private static boolean hasIncrementOrAppend(RowMutations mutations) {
+for (Mutation mutation : mutations.getMutations()) {
+  if (mutation instanceof Append || mutation instanceof Increment) {
+return true;
+  }
+}
+return false;
+  }
+
   private long remainingTimeNs() {
 return operationTimeoutNs - (System.nanoTime() - startNs);
   }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 8cd046f..6071cb6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -398,8 +398,29 @@ class AsyncProcess {
   }
 
   private void setNonce(NonceGenerator ng, Row r, Action action) {
-if (!(r instanceof Append) && !(r instanceof Increment)) return;
-action.setNonce(ng.newNonce()); // Action handles NO_NONCE, so it's ok if 
ng is disabled.
+if (hasIncrementOrAppend(r)) {
+  action.setNonce(ng.newNonce()); // Action handles NO_NONCE, so it's ok 
if ng is disabled.
+}
+  }
+
+  private static boolean hasIncrementOrAppend(Row action) {
+if (action instanceof Append || action instanceof Increment) {
+  return true;
+} else if (action instanceof RowMutations) {
+  return hasIncrementOrAppend((RowMutations) action);
+} else if (action instanceof CheckAndMutate) {
+  return hasIncrementOrAppend(((CheckAndMutate) action).getAction());
+}
+return false;
+  }
+
+  private static boolean hasIncrementOrAppend(RowMutations mutations) {
+for (Mutation mutation : mutations.getMutations()) {
+  if (mutation instanceof Append || mutation instanceof Increment) {
+return true;
+  }
+}
+return false;
   }
 
   private int checkTimeout(String name, int timeout) {
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index a219fed..a04fd26 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -46,7 +46,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;

[hbase] branch branch-2 updated: HBASE-25678 Support nonce operations for Increment/Append in RowMutations and CheckAndMutate (#3073)

2021-03-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 2b5c2c3  HBASE-25678 Support nonce operations for Increment/Append in 
RowMutations and CheckAndMutate (#3073)
2b5c2c3 is described below

commit 2b5c2c36d99e8ecb7e7a345d83b1551999632f4a
Author: Toshihiro Suzuki 
AuthorDate: Sun Mar 21 22:47:53 2021 +0900

HBASE-25678 Support nonce operations for Increment/Append in RowMutations 
and CheckAndMutate (#3073)

Signed-off-by: stack 
---
 .../hbase/client/AsyncBatchRpcRetryingCaller.java  |  22 +-
 .../apache/hadoop/hbase/client/AsyncProcess.java   |  25 +-
 .../org/apache/hadoop/hbase/client/HTable.java |  36 +-
 .../hadoop/hbase/client/RawAsyncTableImpl.java |  40 +--
 .../hbase/shaded/protobuf/RequestConverter.java| 138 +---
 .../hadoop/hbase/coprocessor/RegionObserver.java   |   2 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  59 ++--
 .../hadoop/hbase/regionserver/RSRpcServices.java   |  53 +--
 .../hbase/client/TestAsyncTableNoncedRetry.java| 268 +--
 .../hadoop/hbase/client/TestHTableNoncedRetry.java | 365 +
 10 files changed, 850 insertions(+), 158 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
index 7e05b05..7af385d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
@@ -172,7 +172,7 @@ class AsyncBatchRpcRetryingCaller {
   } else {
 action = new Action(rawAction, i);
   }
-  if (rawAction instanceof Append || rawAction instanceof Increment) {
+  if (hasIncrementOrAppend(rawAction)) {
 action.setNonce(conn.getNonceGenerator().newNonce());
   }
   this.actions.add(action);
@@ -184,6 +184,26 @@ class AsyncBatchRpcRetryingCaller {
 this.startNs = System.nanoTime();
   }
 
+  private static boolean hasIncrementOrAppend(Row action) {
+if (action instanceof Append || action instanceof Increment) {
+  return true;
+} else if (action instanceof RowMutations) {
+  return hasIncrementOrAppend((RowMutations) action);
+} else if (action instanceof CheckAndMutate) {
+  return hasIncrementOrAppend(((CheckAndMutate) action).getAction());
+}
+return false;
+  }
+
+  private static boolean hasIncrementOrAppend(RowMutations mutations) {
+for (Mutation mutation : mutations.getMutations()) {
+  if (mutation instanceof Append || mutation instanceof Increment) {
+return true;
+  }
+}
+return false;
+  }
+
   private long remainingTimeNs() {
 return operationTimeoutNs - (System.nanoTime() - startNs);
   }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 8cd046f..6071cb6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -398,8 +398,29 @@ class AsyncProcess {
   }
 
   private void setNonce(NonceGenerator ng, Row r, Action action) {
-if (!(r instanceof Append) && !(r instanceof Increment)) return;
-action.setNonce(ng.newNonce()); // Action handles NO_NONCE, so it's ok if 
ng is disabled.
+if (hasIncrementOrAppend(r)) {
+  action.setNonce(ng.newNonce()); // Action handles NO_NONCE, so it's ok 
if ng is disabled.
+}
+  }
+
+  private static boolean hasIncrementOrAppend(Row action) {
+if (action instanceof Append || action instanceof Increment) {
+  return true;
+} else if (action instanceof RowMutations) {
+  return hasIncrementOrAppend((RowMutations) action);
+} else if (action instanceof CheckAndMutate) {
+  return hasIncrementOrAppend(((CheckAndMutate) action).getAction());
+}
+return false;
+  }
+
+  private static boolean hasIncrementOrAppend(RowMutations mutations) {
+for (Mutation mutation : mutations.getMutations()) {
+  if (mutation instanceof Append || mutation instanceof Increment) {
+return true;
+  }
+}
+return false;
   }
 
   private int checkTimeout(String name, int timeout) {
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index a219fed..a04fd26 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -46,7 +46,6 @

[hbase] branch branch-1 updated: HBASE-25258 Backport HBASE-24776 "[hbtop] Support Batch mode" to branch-1 (#3065)

2021-03-19 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new f807800  HBASE-25258 Backport HBASE-24776 "[hbtop] Support Batch mode" 
to branch-1 (#3065)
f807800 is described below

commit f8078009e9e8cadee76fd38d1e35ea6812b866de
Author: Toshihiro Suzuki 
AuthorDate: Sat Mar 20 11:16:26 2021 +0900

HBASE-25258 Backport HBASE-24776 "[hbtop] Support Batch mode" to branch-1 
(#3065)

Signed-off-by: stack st...@apache.org
---
 .../java/org/apache/hadoop/hbase/hbtop/HBTop.java  | 169 ++---
 .../hbase/hbtop/screen/AbstractScreenView.java |   1 +
 .../apache/hadoop/hbase/hbtop/screen/Screen.java   |  26 +++-
 .../hbase/hbtop/screen/top/TopScreenModel.java |  44 --
 .../hbase/hbtop/screen/top/TopScreenPresenter.java |  46 --
 .../hbase/hbtop/screen/top/TopScreenView.java  |  48 --
 .../hadoop/hbase/hbtop/terminal/Terminal.java  |   2 +-
 .../hbtop/terminal/impl/batch/BatchTerminal.java   |  80 ++
 .../batch/BatchTerminalPrinter.java}   |  52 ---
 .../hbase/hbtop/screen/top/TestTopScreenModel.java |  16 +-
 .../hbtop/screen/top/TestTopScreenPresenter.java   |   3 +-
 11 files changed, 391 insertions(+), 96 deletions(-)

diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
index ac05bb2..b0f4710 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.hbtop;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Objects;
 
 import org.apache.commons.cli.BasicParser;
@@ -30,6 +32,8 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.hbtop.field.Field;
+import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
 import org.apache.hadoop.hbase.hbtop.mode.Mode;
 import org.apache.hadoop.hbase.hbtop.screen.Screen;
 import org.apache.hadoop.util.Tool;
@@ -55,17 +59,14 @@ public class HBTop extends Configured implements Tool {
   public int run(String[] args) throws Exception {
 long initialRefreshDelay = 3 * 1000;
 Mode initialMode = Mode.REGION;
+List initialFields = null;
+Field initialSortField = null;
+Boolean initialAscendingSort = null;
+List initialFilters = null;
+long numberOfIterations = Long.MAX_VALUE;
+boolean batchMode = false;
 try {
-  // Command line options
-  Options opts = new Options();
-  opts.addOption("h", "help", false,
-"Print usage; for help while the tool is running press 'h'");
-  opts.addOption("d", "delay", true,
-"The refresh delay (in seconds); default is 3 seconds");
-  opts.addOption("m", "mode", true,
-"The mode; n (Namespace)|t (Table)|r (Region)|s (RegionServer)"
-  + ", default is r (Region)");
-
+  Options opts = getOptions();
   CommandLine commandLine = new BasicParser().parse(opts, args);
 
   if (commandLine.hasOption("help")) {
@@ -73,20 +74,6 @@ public class HBTop extends Configured implements Tool {
 return 0;
   }
 
-  if (commandLine.hasOption("delay")) {
-int delay = 0;
-try {
-  delay = Integer.parseInt(commandLine.getOptionValue("delay"));
-} catch (NumberFormatException ignored) {
-}
-
-if (delay < 1) {
-  LOG.warn("Delay set too low or invalid, using default");
-} else {
-  initialRefreshDelay = delay * 1000L;
-}
-  }
-
   if (commandLine.hasOption("mode")) {
 String mode = commandLine.getOptionValue("mode");
 switch (mode) {
@@ -111,18 +98,150 @@ public class HBTop extends Configured implements Tool {
 break;
 }
   }
+
+  if (commandLine.hasOption("outputFieldNames")) {
+for (FieldInfo fieldInfo : initialMode.getFieldInfos()) {
+  System.out.println(fieldInfo.getField().getHeader());
+}
+return 0;
+  }
+
+  if (commandLine.hasOption("delay")) {
+int delay = 0;
+try {
+  delay = Integer.parseInt(commandLine.getOptionValue("delay"));
+} catch (NumberFormatException ignored) {
+}
+
+if (delay < 1) {
+  LOG.warn("Delay set too low or invalid, using default");
+} else {
+  

[hbase] branch master updated (cc6c14a -> f405990)

2021-03-19 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from cc6c14a  HBASE-25594 Make easier to use graceful_stop on localhost 
mode (#3054)
 add f405990  HBASE-25678 Support nonce operations for Increment/Append in 
RowMutations and CheckAndMutate (#3064)

No new revisions were added by this update.

Summary of changes:
 .../hbase/client/AsyncBatchRpcRetryingCaller.java  |  22 +-
 .../hadoop/hbase/client/RawAsyncTableImpl.java |  40 +--
 .../hbase/shaded/protobuf/RequestConverter.java| 138 +++
 .../hadoop/hbase/coprocessor/RegionObserver.java   |   2 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  59 +++--
 .../hadoop/hbase/regionserver/RSRpcServices.java   |  53 ++--
 .../hbase/client/TestAsyncTableNoncedRetry.java| 268 +++--
 7 files changed, 438 insertions(+), 144 deletions(-)


[hbase] branch branch-2.4 updated: HBASE-25575 Should validate Puts in RowMutations

2021-02-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new 9cfeec0  HBASE-25575 Should validate Puts in RowMutations
9cfeec0 is described below

commit 9cfeec0deb79e1e8541e237b4f02fb590556372c
Author: Toshihiro Suzuki 
AuthorDate: Mon Feb 22 10:46:16 2021 +0900

HBASE-25575 Should validate Puts in RowMutations

Signed-off-by: Duo Zhang 
---
 .../hadoop/hbase/client/ConnectionUtils.java   | 10 -
 .../hadoop/hbase/client/RawAsyncTableImpl.java | 13 --
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 43 ++
 .../hadoop/hbase/client/TestAsyncTableBatch.java   | 51 ++
 4 files changed, 113 insertions(+), 4 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index d6f2194..245c519 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -584,7 +584,7 @@ public final class ConnectionUtils {
   }
 
   // validate for well-formedness
-  static void validatePut(Put put, int maxKeyValueSize) throws 
IllegalArgumentException {
+  static void validatePut(Put put, int maxKeyValueSize) {
 if (put.isEmpty()) {
   throw new IllegalArgumentException("No columns to insert");
 }
@@ -599,6 +599,14 @@ public final class ConnectionUtils {
 }
   }
 
+  static void validatePutsInRowMutations(RowMutations rowMutations, int 
maxKeyValueSize) {
+for (Mutation mutation : rowMutations.getMutations()) {
+  if (mutation instanceof Put) {
+validatePut((Put) mutation, maxKeyValueSize);
+  }
+}
+  }
+
   /**
* Select the priority for the rpc call.
* 
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
index 63ade0d..1222d83 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
@@ -22,6 +22,7 @@ import static 
org.apache.hadoop.hbase.client.ConnectionUtils.checkHasFamilies;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
 import static 
org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.validatePut;
+import static 
org.apache.hadoop.hbase.client.ConnectionUtils.validatePutsInRowMutations;
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
 import com.google.protobuf.RpcChannel;
@@ -381,6 +382,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 @Override
 public CompletableFuture thenMutate(RowMutations mutation) {
   preCheck();
+  validatePutsInRowMutations(mutation, conn.connConf.getMaxKeyValueSize());
   return RawAsyncTableImpl.this. newCaller(row, 
mutation.getMaxPriority(),
 rpcTimeoutNs)
 .action((controller, loc, stub) -> 
RawAsyncTableImpl.this.mutateRow(controller,
@@ -441,6 +443,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 
 @Override
 public CompletableFuture thenMutate(RowMutations mutation) {
+  validatePutsInRowMutations(mutation, conn.connConf.getMaxKeyValueSize());
   return RawAsyncTableImpl.this. newCaller(row, 
mutation.getMaxPriority(),
 rpcTimeoutNs)
 .action((controller, loc, stub) -> 
RawAsyncTableImpl.this.mutateRow(controller,
@@ -458,9 +461,6 @@ class RawAsyncTableImpl implements 
AsyncTable {
 
   @Override
   public CompletableFuture checkAndMutate(CheckAndMutate 
checkAndMutate) {
-if (checkAndMutate.getAction() instanceof Put) {
-  validatePut((Put) checkAndMutate.getAction(), 
conn.connConf.getMaxKeyValueSize());
-}
 if (checkAndMutate.getAction() instanceof Put || 
checkAndMutate.getAction() instanceof Delete
   || checkAndMutate.getAction() instanceof Increment
   || checkAndMutate.getAction() instanceof Append) {
@@ -480,6 +480,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 .call();
 } else if (checkAndMutate.getAction() instanceof RowMutations) {
   RowMutations rowMutations = (RowMutations) checkAndMutate.getAction();
+  validatePutsInRowMutations(rowMutations, 
conn.connConf.getMaxKeyValueSize());
   return RawAsyncTableImpl.this. 
newCaller(checkAndMutate.getRow(),
 rowMutations.getMaxPriority(), rpcTimeoutNs)
 .action((controller, loc, stub) ->
@@ -552,6 +553,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 
   @Override
   public CompletableFuture mutateRow(Row

[hbase] branch branch-2 updated: HBASE-25575 Should validate Puts in RowMutations

2021-02-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new cfbae4d  HBASE-25575 Should validate Puts in RowMutations
cfbae4d is described below

commit cfbae4d3a37e7ac4d795461c3e19406a2786838d
Author: Toshihiro Suzuki 
AuthorDate: Mon Feb 22 10:46:16 2021 +0900

HBASE-25575 Should validate Puts in RowMutations

Signed-off-by: Duo Zhang 
---
 .../hadoop/hbase/client/ConnectionUtils.java   | 10 -
 .../hadoop/hbase/client/RawAsyncTableImpl.java | 13 --
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 43 ++
 .../hadoop/hbase/client/TestAsyncTableBatch.java   | 51 ++
 4 files changed, 113 insertions(+), 4 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index df376bb..3d97a57 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -570,7 +570,7 @@ public final class ConnectionUtils {
   }
 
   // validate for well-formedness
-  static void validatePut(Put put, int maxKeyValueSize) throws 
IllegalArgumentException {
+  static void validatePut(Put put, int maxKeyValueSize) {
 if (put.isEmpty()) {
   throw new IllegalArgumentException("No columns to insert");
 }
@@ -585,6 +585,14 @@ public final class ConnectionUtils {
 }
   }
 
+  static void validatePutsInRowMutations(RowMutations rowMutations, int 
maxKeyValueSize) {
+for (Mutation mutation : rowMutations.getMutations()) {
+  if (mutation instanceof Put) {
+validatePut((Put) mutation, maxKeyValueSize);
+  }
+}
+  }
+
   /**
* Select the priority for the rpc call.
* 
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
index 63ade0d..1222d83 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
@@ -22,6 +22,7 @@ import static 
org.apache.hadoop.hbase.client.ConnectionUtils.checkHasFamilies;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
 import static 
org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.validatePut;
+import static 
org.apache.hadoop.hbase.client.ConnectionUtils.validatePutsInRowMutations;
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
 import com.google.protobuf.RpcChannel;
@@ -381,6 +382,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 @Override
 public CompletableFuture thenMutate(RowMutations mutation) {
   preCheck();
+  validatePutsInRowMutations(mutation, conn.connConf.getMaxKeyValueSize());
   return RawAsyncTableImpl.this. newCaller(row, 
mutation.getMaxPriority(),
 rpcTimeoutNs)
 .action((controller, loc, stub) -> 
RawAsyncTableImpl.this.mutateRow(controller,
@@ -441,6 +443,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 
 @Override
 public CompletableFuture thenMutate(RowMutations mutation) {
+  validatePutsInRowMutations(mutation, conn.connConf.getMaxKeyValueSize());
   return RawAsyncTableImpl.this. newCaller(row, 
mutation.getMaxPriority(),
 rpcTimeoutNs)
 .action((controller, loc, stub) -> 
RawAsyncTableImpl.this.mutateRow(controller,
@@ -458,9 +461,6 @@ class RawAsyncTableImpl implements 
AsyncTable {
 
   @Override
   public CompletableFuture checkAndMutate(CheckAndMutate 
checkAndMutate) {
-if (checkAndMutate.getAction() instanceof Put) {
-  validatePut((Put) checkAndMutate.getAction(), 
conn.connConf.getMaxKeyValueSize());
-}
 if (checkAndMutate.getAction() instanceof Put || 
checkAndMutate.getAction() instanceof Delete
   || checkAndMutate.getAction() instanceof Increment
   || checkAndMutate.getAction() instanceof Append) {
@@ -480,6 +480,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 .call();
 } else if (checkAndMutate.getAction() instanceof RowMutations) {
   RowMutations rowMutations = (RowMutations) checkAndMutate.getAction();
+  validatePutsInRowMutations(rowMutations, 
conn.connConf.getMaxKeyValueSize());
   return RawAsyncTableImpl.this. 
newCaller(checkAndMutate.getRow(),
 rowMutations.getMaxPriority(), rpcTimeoutNs)
 .action((controller, loc, stub) ->
@@ -552,6 +553,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 
   @Override
   public CompletableFuture mutateRow(Row

[hbase] branch master updated: HBASE-25575 Should validate Puts in RowMutations (#2954)

2021-02-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 5fa15cf  HBASE-25575 Should validate Puts in RowMutations (#2954)
5fa15cf is described below

commit 5fa15cfde3d77e77ffb1f09d60dce4db264f3831
Author: Toshihiro Suzuki 
AuthorDate: Mon Feb 22 10:46:16 2021 +0900

HBASE-25575 Should validate Puts in RowMutations (#2954)

Signed-off-by: Duo Zhang 
---
 .../hadoop/hbase/client/ConnectionUtils.java   | 10 -
 .../hadoop/hbase/client/RawAsyncTableImpl.java | 13 --
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 43 +++
 .../hadoop/hbase/client/TestAsyncTableBatch.java   | 50 ++
 4 files changed, 112 insertions(+), 4 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 5b8cb84..70312aa 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -482,7 +482,7 @@ public final class ConnectionUtils {
   }
 
   // validate for well-formedness
-  static void validatePut(Put put, int maxKeyValueSize) throws 
IllegalArgumentException {
+  static void validatePut(Put put, int maxKeyValueSize) {
 if (put.isEmpty()) {
   throw new IllegalArgumentException("No columns to insert");
 }
@@ -497,6 +497,14 @@ public final class ConnectionUtils {
 }
   }
 
+  static void validatePutsInRowMutations(RowMutations rowMutations, int 
maxKeyValueSize) {
+for (Mutation mutation : rowMutations.getMutations()) {
+  if (mutation instanceof Put) {
+validatePut((Put) mutation, maxKeyValueSize);
+  }
+}
+  }
+
   /**
* Select the priority for the rpc call.
* 
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
index 3cffad8..187ecf1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
@@ -22,6 +22,7 @@ import static 
org.apache.hadoop.hbase.client.ConnectionUtils.checkHasFamilies;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
 import static 
org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.validatePut;
+import static 
org.apache.hadoop.hbase.client.ConnectionUtils.validatePutsInRowMutations;
 import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
 
 import java.io.IOException;
@@ -343,6 +344,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 @Override
 public CompletableFuture thenMutate(RowMutations mutation) {
   preCheck();
+  validatePutsInRowMutations(mutation, conn.connConf.getMaxKeyValueSize());
   return RawAsyncTableImpl.this. newCaller(row, 
mutation.getMaxPriority(),
 rpcTimeoutNs)
 .action((controller, loc, stub) -> 
RawAsyncTableImpl.this.mutateRow(controller,
@@ -403,6 +405,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 
 @Override
 public CompletableFuture thenMutate(RowMutations mutation) {
+  validatePutsInRowMutations(mutation, conn.connConf.getMaxKeyValueSize());
   return RawAsyncTableImpl.this. newCaller(row, 
mutation.getMaxPriority(),
 rpcTimeoutNs)
 .action((controller, loc, stub) -> 
RawAsyncTableImpl.this.mutateRow(controller,
@@ -420,9 +423,6 @@ class RawAsyncTableImpl implements 
AsyncTable {
 
   @Override
   public CompletableFuture checkAndMutate(CheckAndMutate 
checkAndMutate) {
-if (checkAndMutate.getAction() instanceof Put) {
-  validatePut((Put) checkAndMutate.getAction(), 
conn.connConf.getMaxKeyValueSize());
-}
 if (checkAndMutate.getAction() instanceof Put || 
checkAndMutate.getAction() instanceof Delete
   || checkAndMutate.getAction() instanceof Increment
   || checkAndMutate.getAction() instanceof Append) {
@@ -442,6 +442,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 .call();
 } else if (checkAndMutate.getAction() instanceof RowMutations) {
   RowMutations rowMutations = (RowMutations) checkAndMutate.getAction();
+  validatePutsInRowMutations(rowMutations, 
conn.connConf.getMaxKeyValueSize());
   return RawAsyncTableImpl.this. 
newCaller(checkAndMutate.getRow(),
 rowMutations.getMaxPriority(), rpcTimeoutNs)
 .action((controller, loc, stub) ->
@@ -514,6 +515,7 @@ class RawAsyncTableImpl implements 
AsyncTable {
 
   @Override
   public CompletableFuture mutateRow(Row

[hbase] branch branch-2.4 updated: HBASE-25574 Revisit put/delete/increment/append related RegionObserver methods

2021-02-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
 new b78366c  HBASE-25574 Revisit put/delete/increment/append related 
RegionObserver methods
b78366c is described below

commit b78366cf505f784622ccd60a83fe5bdae91e2386
Author: Toshihiro Suzuki 
AuthorDate: Mon Feb 22 09:37:30 2021 +0900

HBASE-25574 Revisit put/delete/increment/append related RegionObserver 
methods

Signed-off-by: Duo Zhang 
---
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 187 +++--
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  17 +-
 .../hbase/regionserver/RegionCoprocessorHost.java  |  57 +++
 3 files changed, 208 insertions(+), 53 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 0782dae..b1423ed 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -374,11 +374,31 @@ public interface RegionObserver {
* @param put The Put object
* @param edit The WALEdit object that will be written to the wal
* @param durability Persistence guarantee for this Put
+   * @deprecated since 2.4.2 and will be removed in 4.0.0. Use
+   *   {@link #prePut(ObserverContext, Put, WALEdit)} instead.
*/
+  @Deprecated
   default void prePut(ObserverContext c, Put 
put, WALEdit edit,
   Durability durability) throws IOException {}
 
   /**
+   * Called before the client stores a value.
+   * 
+   * Call CoprocessorEnvironment#bypass to skip default actions.
+   * If 'bypass' is set, we skip out on calling any subsequent chained 
coprocessors.
+   * 
+   * Note: Do not retain references to any Cells in 'put' beyond the life of 
this invocation.
+   * If need a Cell reference for later use, copy the cell and use that.
+   * @param c the environment provided by the region server
+   * @param put The Put object
+   * @param edit The WALEdit object that will be written to the wal
+   */
+  default void prePut(ObserverContext c, Put 
put, WALEdit edit)
+throws IOException {
+prePut(c, put, edit, put.getDurability());
+  }
+
+  /**
* Called after the client stores a value.
* 
* Note: Do not retain references to any Cells in 'put' beyond the life of 
this invocation.
@@ -387,11 +407,28 @@ public interface RegionObserver {
* @param put The Put object
* @param edit The WALEdit object for the wal
* @param durability Persistence guarantee for this Put
+   * @deprecated since 2.4.2 and will be removed in 4.0.0. Use
+   *   {@link #postPut(ObserverContext, Put, WALEdit)} instead.
*/
+  @Deprecated
   default void postPut(ObserverContext c, Put 
put, WALEdit edit,
   Durability durability) throws IOException {}
 
   /**
+   * Called after the client stores a value.
+   * 
+   * Note: Do not retain references to any Cells in 'put' beyond the life of 
this invocation.
+   * If need a Cell reference for later use, copy the cell and use that.
+   * @param c the environment provided by the region server
+   * @param put The Put object
+   * @param edit The WALEdit object for the wal
+   */
+  default void postPut(ObserverContext c, Put 
put, WALEdit edit)
+throws IOException {
+postPut(c, put, edit, put.getDurability());
+  }
+
+  /**
* Called before the client deletes a value.
* 
* Call CoprocessorEnvironment#bypass to skip default actions.
@@ -403,11 +440,31 @@ public interface RegionObserver {
* @param delete The Delete object
* @param edit The WALEdit object for the wal
* @param durability Persistence guarantee for this Delete
+   * @deprecated since 2.4.2 and will be removed in 4.0.0. Use
+   *   {@link #preDelete(ObserverContext, Delete, WALEdit)} instead.
*/
+  @Deprecated
   default void preDelete(ObserverContext c, 
Delete delete,
   WALEdit edit, Durability durability) throws IOException {}
 
   /**
+   * Called before the client deletes a value.
+   * 
+   * Call CoprocessorEnvironment#bypass to skip default actions.
+   * If 'bypass' is set, we skip out on calling any subsequent chained 
coprocessors.
+   * 
+   * Note: Do not retain references to any Cells in 'delete' beyond the life 
of this invocation.
+   * If need a Cell reference for later use, copy the cell and use that.
+   * @param c the environment provided by the region server
+   * @param delete The Delete object
+   * @param edit The WALEdit object for the wal
+   */
+  default void preDelete(ObserverContext c, 
Delete delete,
+WALEdit edit) throws IOException {
+preDelete(c, delete, edit, delete.getDurability());
+  }
+
+  /**
* Called before the server updates

[hbase] branch branch-2 updated: HBASE-25574 Revisit put/delete/increment/append related RegionObserver methods

2021-02-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 5356edf  HBASE-25574 Revisit put/delete/increment/append related 
RegionObserver methods
5356edf is described below

commit 5356edfe63d1deff636db6157f110a7f0b52fa0e
Author: Toshihiro Suzuki 
AuthorDate: Mon Feb 22 09:37:30 2021 +0900

HBASE-25574 Revisit put/delete/increment/append related RegionObserver 
methods

Signed-off-by: Duo Zhang 
---
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 187 +++--
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  17 +-
 .../hbase/regionserver/RegionCoprocessorHost.java  |  57 +++
 3 files changed, 208 insertions(+), 53 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 0782dae..2bd7665 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -374,11 +374,31 @@ public interface RegionObserver {
* @param put The Put object
* @param edit The WALEdit object that will be written to the wal
* @param durability Persistence guarantee for this Put
+   * @deprecated since 2.5.0 and will be removed in 4.0.0. Use
+   *   {@link #prePut(ObserverContext, Put, WALEdit)} instead.
*/
+  @Deprecated
   default void prePut(ObserverContext c, Put 
put, WALEdit edit,
   Durability durability) throws IOException {}
 
   /**
+   * Called before the client stores a value.
+   * 
+   * Call CoprocessorEnvironment#bypass to skip default actions.
+   * If 'bypass' is set, we skip out on calling any subsequent chained 
coprocessors.
+   * 
+   * Note: Do not retain references to any Cells in 'put' beyond the life of 
this invocation.
+   * If need a Cell reference for later use, copy the cell and use that.
+   * @param c the environment provided by the region server
+   * @param put The Put object
+   * @param edit The WALEdit object that will be written to the wal
+   */
+  default void prePut(ObserverContext c, Put 
put, WALEdit edit)
+throws IOException {
+prePut(c, put, edit, put.getDurability());
+  }
+
+  /**
* Called after the client stores a value.
* 
* Note: Do not retain references to any Cells in 'put' beyond the life of 
this invocation.
@@ -387,11 +407,28 @@ public interface RegionObserver {
* @param put The Put object
* @param edit The WALEdit object for the wal
* @param durability Persistence guarantee for this Put
+   * @deprecated since 2.5.0 and will be removed in 4.0.0. Use
+   *   {@link #postPut(ObserverContext, Put, WALEdit)} instead.
*/
+  @Deprecated
   default void postPut(ObserverContext c, Put 
put, WALEdit edit,
   Durability durability) throws IOException {}
 
   /**
+   * Called after the client stores a value.
+   * 
+   * Note: Do not retain references to any Cells in 'put' beyond the life of 
this invocation.
+   * If need a Cell reference for later use, copy the cell and use that.
+   * @param c the environment provided by the region server
+   * @param put The Put object
+   * @param edit The WALEdit object for the wal
+   */
+  default void postPut(ObserverContext c, Put 
put, WALEdit edit)
+throws IOException {
+postPut(c, put, edit, put.getDurability());
+  }
+
+  /**
* Called before the client deletes a value.
* 
* Call CoprocessorEnvironment#bypass to skip default actions.
@@ -403,11 +440,31 @@ public interface RegionObserver {
* @param delete The Delete object
* @param edit The WALEdit object for the wal
* @param durability Persistence guarantee for this Delete
+   * @deprecated since 2.5.0 and will be removed in 4.0.0. Use
+   *   {@link #preDelete(ObserverContext, Delete, WALEdit)} instead.
*/
+  @Deprecated
   default void preDelete(ObserverContext c, 
Delete delete,
   WALEdit edit, Durability durability) throws IOException {}
 
   /**
+   * Called before the client deletes a value.
+   * 
+   * Call CoprocessorEnvironment#bypass to skip default actions.
+   * If 'bypass' is set, we skip out on calling any subsequent chained 
coprocessors.
+   * 
+   * Note: Do not retain references to any Cells in 'delete' beyond the life 
of this invocation.
+   * If need a Cell reference for later use, copy the cell and use that.
+   * @param c the environment provided by the region server
+   * @param delete The Delete object
+   * @param edit The WALEdit object for the wal
+   */
+  default void preDelete(ObserverContext c, 
Delete delete,
+WALEdit edit) throws IOException {
+preDelete(c, delete, edit, delete.getDurability());
+  }
+
+  /**
* Called before the server updates

[hbase] branch master updated: HBASE-25574 Revisit put/delete/increment/append related RegionObserver methods (#2953)

2021-02-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new d8b8662  HBASE-25574 Revisit put/delete/increment/append related 
RegionObserver methods (#2953)
d8b8662 is described below

commit d8b86627ea0100837da8846b14eb60979a878f75
Author: Toshihiro Suzuki 
AuthorDate: Mon Feb 22 09:37:30 2021 +0900

HBASE-25574 Revisit put/delete/increment/append related RegionObserver 
methods (#2953)

Signed-off-by: Duo Zhang 
---
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 187 +++--
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  17 +-
 .../hbase/regionserver/RegionCoprocessorHost.java  |  57 +++
 3 files changed, 208 insertions(+), 53 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 9eac46f..757b42e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -374,11 +374,31 @@ public interface RegionObserver {
* @param put The Put object
* @param edit The WALEdit object that will be written to the wal
* @param durability Persistence guarantee for this Put
+   * @deprecated since 3.0.0 and will be removed in 4.0.0. Use
+   *   {@link #prePut(ObserverContext, Put, WALEdit)} instead.
*/
+  @Deprecated
   default void prePut(ObserverContext c, Put 
put, WALEdit edit,
   Durability durability) throws IOException {}
 
   /**
+   * Called before the client stores a value.
+   * 
+   * Call CoprocessorEnvironment#bypass to skip default actions.
+   * If 'bypass' is set, we skip out on calling any subsequent chained 
coprocessors.
+   * 
+   * Note: Do not retain references to any Cells in 'put' beyond the life of 
this invocation.
+   * If need a Cell reference for later use, copy the cell and use that.
+   * @param c the environment provided by the region server
+   * @param put The Put object
+   * @param edit The WALEdit object that will be written to the wal
+   */
+  default void prePut(ObserverContext c, Put 
put, WALEdit edit)
+throws IOException {
+prePut(c, put, edit, put.getDurability());
+  }
+
+  /**
* Called after the client stores a value.
* 
* Note: Do not retain references to any Cells in 'put' beyond the life of 
this invocation.
@@ -387,11 +407,28 @@ public interface RegionObserver {
* @param put The Put object
* @param edit The WALEdit object for the wal
* @param durability Persistence guarantee for this Put
+   * @deprecated since 3.0.0 and will be removed in 4.0.0. Use
+   *   {@link #postPut(ObserverContext, Put, WALEdit)} instead.
*/
+  @Deprecated
   default void postPut(ObserverContext c, Put 
put, WALEdit edit,
   Durability durability) throws IOException {}
 
   /**
+   * Called after the client stores a value.
+   * 
+   * Note: Do not retain references to any Cells in 'put' beyond the life of 
this invocation.
+   * If need a Cell reference for later use, copy the cell and use that.
+   * @param c the environment provided by the region server
+   * @param put The Put object
+   * @param edit The WALEdit object for the wal
+   */
+  default void postPut(ObserverContext c, Put 
put, WALEdit edit)
+throws IOException {
+postPut(c, put, edit, put.getDurability());
+  }
+
+  /**
* Called before the client deletes a value.
* 
* Call CoprocessorEnvironment#bypass to skip default actions.
@@ -403,11 +440,31 @@ public interface RegionObserver {
* @param delete The Delete object
* @param edit The WALEdit object for the wal
* @param durability Persistence guarantee for this Delete
+   * @deprecated since 3.0.0 and will be removed in 4.0.0. Use
+   *   {@link #preDelete(ObserverContext, Delete, WALEdit)} instead.
*/
+  @Deprecated
   default void preDelete(ObserverContext c, 
Delete delete,
   WALEdit edit, Durability durability) throws IOException {}
 
   /**
+   * Called before the client deletes a value.
+   * 
+   * Call CoprocessorEnvironment#bypass to skip default actions.
+   * If 'bypass' is set, we skip out on calling any subsequent chained 
coprocessors.
+   * 
+   * Note: Do not retain references to any Cells in 'delete' beyond the life 
of this invocation.
+   * If need a Cell reference for later use, copy the cell and use that.
+   * @param c the environment provided by the region server
+   * @param delete The Delete object
+   * @param edit The WALEdit object for the wal
+   */
+  default void preDelete(ObserverContext c, 
Delete delete,
+WALEdit edit) throws IOException {
+preDelete(c, delete, edit, delete.getDurability());
+  }
+
+  /**
* Called before the server

[hbase] branch branch-2 updated: HBASE-25242 Add Increment/Append support to RowMutations (#2711)

2020-11-26 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 3775464  HBASE-25242 Add Increment/Append support to RowMutations 
(#2711)
3775464 is described below

commit 3775464981b473599c6d1bb24a7eea3badf0ed03
Author: Toshihiro Suzuki 
AuthorDate: Fri Nov 27 03:53:19 2020 +0900

HBASE-25242 Add Increment/Append support to RowMutations (#2711)

Signed-off-by: Duo Zhang 
Signed-off-by: Andrew Purtell 
---
 .../org/apache/hadoop/hbase/client/AsyncTable.java |   4 +-
 .../apache/hadoop/hbase/client/AsyncTableImpl.java |   2 +-
 .../apache/hadoop/hbase/client/CheckAndMutate.java | 127 +++--
 .../org/apache/hadoop/hbase/client/HTable.java |  21 +-
 .../hadoop/hbase/client/RawAsyncTableImpl.java |  10 +-
 .../apache/hadoop/hbase/client/RowMutations.java   |   5 +-
 .../java/org/apache/hadoop/hbase/client/Table.java |   5 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |   8 +-
 .../hbase/shaded/protobuf/RequestConverter.java|  37 +--
 .../hbase/shaded/protobuf/ResponseConverter.java   |  85 --
 .../hadoop/hbase/rest/client/RemoteHTable.java |   2 +-
 .../hadoop/hbase/coprocessor/RegionObserver.java   |   2 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 145 +-
 .../regionserver/MiniBatchOperationInProgress.java |   1 +
 .../hadoop/hbase/regionserver/RSRpcServices.java   |  93 ++-
 .../apache/hadoop/hbase/regionserver/Region.java   |  12 +-
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 146 +-
 .../hadoop/hbase/client/TestAsyncTableBatch.java   |  44 ++-
 .../hbase/client/TestAsyncTableNoncedRetry.java|  27 ++
 .../hadoop/hbase/client/TestCheckAndMutate.java| 118 
 .../hadoop/hbase/client/TestFromClientSide3.java   |  44 ++-
 .../hadoop/hbase/client/TestFromClientSide5.java   |  41 ++-
 .../hbase/coprocessor/SimpleRegionObserver.java|  12 +
 .../coprocessor/TestRegionObserverInterface.java   |  92 ++-
 .../hadoop/hbase/regionserver/RegionAsTable.java   |   2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java | 305 -
 .../regionserver/TestWALEntrySinkFilter.java   |   4 +-
 .../hadoop/hbase/thrift2/client/ThriftTable.java   |   3 +-
 28 files changed, 1098 insertions(+), 299 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
index 25ea143..7473ed0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
@@ -396,9 +396,9 @@ public interface AsyncTable {
* Performs multiple mutations atomically on a single row. Currently {@link 
Put} and
* {@link Delete} are supported.
* @param mutation object that specifies the set of mutations to perform 
atomically
-   * @return A {@link CompletableFuture} that always returns null when 
complete normally.
+   * @return A {@link CompletableFuture} that returns results of 
Increment/Append operations
*/
-  CompletableFuture mutateRow(RowMutations mutation);
+  CompletableFuture mutateRow(RowMutations mutation);
 
   /**
* The scan API uses the observer pattern.
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
index 82e8cd5..ba2c560 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
@@ -217,7 +217,7 @@ class AsyncTableImpl implements 
AsyncTable {
   }
 
   @Override
-  public CompletableFuture mutateRow(RowMutations mutation) {
+  public CompletableFuture mutateRow(RowMutations mutation) {
 return wrap(rawTable.mutateRow(mutation));
   }
 
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java
index a163c8d..47bbc53 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java
@@ -17,18 +17,15 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import java.util.Collections;
-import java.util.List;
-import java.util.NavigableMap;
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CompareOperator;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 import

[hbase] branch master updated: HBASE-25242 Add Increment/Append support to RowMutations (#2630)

2020-11-25 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new b142f5d  HBASE-25242 Add Increment/Append support to RowMutations 
(#2630)
b142f5d is described below

commit b142f5dcd26f99337499c4a42889b09907e26c8c
Author: Toshihiro Suzuki 
AuthorDate: Thu Nov 26 13:31:43 2020 +0900

HBASE-25242 Add Increment/Append support to RowMutations (#2630)

Signed-off-by: Duo Zhang 
Signed-off-by: Andrew Purtell 
---
 .../org/apache/hadoop/hbase/client/AsyncTable.java |   4 +-
 .../apache/hadoop/hbase/client/AsyncTableImpl.java |   2 +-
 .../apache/hadoop/hbase/client/CheckAndMutate.java | 112 ++--
 .../hadoop/hbase/client/RawAsyncTableImpl.java |  10 +-
 .../apache/hadoop/hbase/client/RowMutations.java   |   7 +-
 .../java/org/apache/hadoop/hbase/client/Table.java |   5 +-
 .../hadoop/hbase/client/TableOverAsyncTable.java   |   4 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |   8 +-
 .../hbase/shaded/protobuf/RequestConverter.java|  37 +--
 .../hbase/shaded/protobuf/ResponseConverter.java   |  83 +++---
 .../hadoop/hbase/rest/client/RemoteHTable.java |   2 +-
 .../hadoop/hbase/coprocessor/RegionObserver.java   |   2 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 145 +-
 .../regionserver/MiniBatchOperationInProgress.java |   1 +
 .../hadoop/hbase/regionserver/RSRpcServices.java   |  93 ++-
 .../apache/hadoop/hbase/regionserver/Region.java   |  12 +-
 .../hadoop/hbase/client/DummyAsyncTable.java   |   2 +-
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 146 +-
 .../hadoop/hbase/client/TestAsyncTableBatch.java   |  46 +++-
 .../hbase/client/TestAsyncTableNoncedRetry.java|  27 ++
 .../hadoop/hbase/client/TestCheckAndMutate.java| 118 
 .../hadoop/hbase/client/TestFromClientSide3.java   |  46 +++-
 .../hadoop/hbase/client/TestFromClientSide5.java   |  41 ++-
 .../hbase/coprocessor/SimpleRegionObserver.java|  12 +
 .../coprocessor/TestRegionObserverInterface.java   |  92 ++-
 .../hadoop/hbase/regionserver/RegionAsTable.java   |   2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java | 305 -
 .../hadoop/hbase/thrift2/client/ThriftTable.java   |   3 +-
 28 files changed, 1054 insertions(+), 313 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
index aae4fc7..b390909 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
@@ -396,9 +396,9 @@ public interface AsyncTable {
* Performs multiple mutations atomically on a single row. Currently {@link 
Put} and
* {@link Delete} are supported.
* @param mutation object that specifies the set of mutations to perform 
atomically
-   * @return A {@link CompletableFuture} that always returns null when 
complete normally.
+   * @return A {@link CompletableFuture} that returns results of 
Increment/Append operations
*/
-  CompletableFuture mutateRow(RowMutations mutation);
+  CompletableFuture mutateRow(RowMutations mutation);
 
   /**
* The scan API uses the observer pattern.
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
index f931d67..a124467 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
@@ -218,7 +218,7 @@ class AsyncTableImpl implements 
AsyncTable {
   }
 
   @Override
-  public CompletableFuture mutateRow(RowMutations mutation) {
+  public CompletableFuture mutateRow(RowMutations mutation) {
 return wrap(rawTable.mutateRow(mutation));
   }
 
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java
index f7d846b..b7f17f3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java
@@ -17,14 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import java.util.Collections;
-import java.util.List;
-import java.util.NavigableMap;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
-import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CompareOperator;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -60,7 +53,7 @@ import

[hbase] branch branch-2 updated: HBASE-25160 Refactor AccessController and VisibilityController (#2506)

2020-10-08 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 4815740  HBASE-25160 Refactor AccessController and 
VisibilityController (#2506)
4815740 is described below

commit 481574072c1f794dad0c4455aa98a3f3c5725481
Author: Toshihiro Suzuki 
AuthorDate: Thu Oct 8 17:04:48 2020 +0900

HBASE-25160 Refactor AccessController and VisibilityController (#2506)

 Signed-off-by: stack 
---
 .../hbase/security/access/AccessController.java| 66 --
 .../security/visibility/VisibilityController.java  | 66 +-
 2 files changed, 13 insertions(+), 119 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 6cac67a..1f53e27 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -426,7 +426,6 @@ public class AccessController implements MasterCoprocessor, 
RegionCoprocessor,
 DELETE("delete"),
 CHECK_AND_PUT("checkAndPut"),
 CHECK_AND_DELETE("checkAndDelete"),
-INCREMENT_COLUMN_VALUE("incrementColumnValue"),
 APPEND("append"),
 INCREMENT("increment");
 
@@ -1503,18 +1502,27 @@ public class AccessController implements 
MasterCoprocessor, RegionCoprocessor,
   // We have a failure with table, cf and q perm checks and now giving 
a chance for cell
   // perm check
   OpType opType;
+  long timestamp;
   if (m instanceof Put) {
 checkForReservedTagPresence(user, m);
 opType = OpType.PUT;
+timestamp = m.getTimestamp();
   } else if (m instanceof Delete) {
 opType = OpType.DELETE;
+timestamp = m.getTimestamp();
+  } else if (m instanceof Increment) {
+opType = OpType.INCREMENT;
+timestamp = ((Increment) m).getTimeRange().getMax();
+  } else if (m instanceof Append) {
+opType = OpType.APPEND;
+timestamp = ((Append) m).getTimeRange().getMax();
   } else {
-// If the operation type is not Put or Delete, do nothing
+// If the operation type is not Put/Delete/Increment/Append, do 
nothing
 continue;
   }
   AuthResult authResult = null;
   if (checkCoveringPermission(user, opType, c.getEnvironment(), 
m.getRow(),
-m.getFamilyCellMap(), m.getTimestamp(), Action.WRITE)) {
+m.getFamilyCellMap(), timestamp, Action.WRITE)) {
 authResult = AuthResult.allow(opType.toString(), "Covering cell 
set",
   user, Action.WRITE, table, m.getFamilyCellMap());
   } else {
@@ -1696,32 +1704,6 @@ public class AccessController implements 
MasterCoprocessor, RegionCoprocessor,
   }
 
   @Override
-  public Result preAppendAfterRowLock(final 
ObserverContext c,
-  final Append append) throws IOException {
-if (append.getAttribute(CHECK_COVERING_PERM) != null) {
-  // We had failure with table, cf and q perm checks and now giving a 
chance for cell
-  // perm check
-  TableName table = 
c.getEnvironment().getRegion().getRegionInfo().getTable();
-  AuthResult authResult = null;
-  User user = getActiveUser(c);
-  if (checkCoveringPermission(user, OpType.APPEND, c.getEnvironment(), 
append.getRow(),
-  append.getFamilyCellMap(), append.getTimeRange().getMax(), 
Action.WRITE)) {
-authResult = AuthResult.allow(OpType.APPEND.toString(),
-"Covering cell set", user, Action.WRITE, table, 
append.getFamilyCellMap());
-  } else {
-authResult = AuthResult.deny(OpType.APPEND.toString(),
-"Covering cell set", user, Action.WRITE, table, 
append.getFamilyCellMap());
-  }
-  AccessChecker.logResult(authResult);
-  if (authorizationEnabled && !authResult.isAllowed()) {
-throw new AccessDeniedException("Insufficient permissions " +
-  authResult.toContextString());
-  }
-}
-return null;
-  }
-
-  @Override
   public Result preIncrement(final 
ObserverContext c,
   final Increment increment)
   throws IOException {
@@ -1757,32 +1739,6 @@ public class AccessController implements 
MasterCoprocessor, RegionCoprocessor,
   }
 
   @Override
-  public Result preIncrementAfterRowLock(final 
ObserverContext c,
-  final Increment increment) throws IOException {
-if (increment.getAttribute(CHECK_COVERING_PERM) != null) {
-  // We had failure with table, cf and q perm checks a

[hbase] branch master updated (2fc79e2 -> aff8bbf)

2020-10-08 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from 2fc79e2  HBASE-23959 Fix javadoc for JDK11 (#2500)
 add aff8bbf  HBASE-25160 Refactor AccessController and 
VisibilityController (#2506)

No new revisions were added by this update.

Summary of changes:
 .../hbase/security/access/AccessController.java| 66 --
 .../security/visibility/VisibilityController.java  | 66 +-
 2 files changed, 13 insertions(+), 119 deletions(-)



[hbase] branch branch-1 updated: HBASE-25096 WAL size in RegionServer UI is wrong

2020-09-28 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 53f51f3  HBASE-25096 WAL size in RegionServer UI is wrong
53f51f3 is described below

commit 53f51f3a7fe671789f16438e0babdb9feec44b08
Author: Toshihiro Suzuki 
AuthorDate: Mon Sep 28 15:09:42 2020 +0900

HBASE-25096 WAL size in RegionServer UI is wrong

Signed-off-by: Guanghao Zhang 
---
 .../hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 5d0d75c..f4fe4e2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -769,7 +769,7 @@ class MetricsRegionServerWrapperImpl
 + (metaProvider == null ? 0 : metaProvider.getNumLogFiles());
 walFileSize =
 (provider == null ? 0 : provider.getLogFileSize())
-+ (provider == null ? 0 : provider.getLogFileSize());
++ (metaProvider == null ? 0 : metaProvider.getLogFileSize());
 // Copy over computed values so that no thread sees half computed 
values.
 numStores = tempNumStores;
 numStoreFiles = tempNumStoreFiles;



[hbase] branch branch-2.2 updated: HBASE-25096 WAL size in RegionServer UI is wrong (#2456)

2020-09-28 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new c34f8dd  HBASE-25096 WAL size in RegionServer UI is wrong (#2456)
c34f8dd is described below

commit c34f8dd26921427ce028c163f977fba7a5e77bb5
Author: Toshihiro Suzuki 
AuthorDate: Mon Sep 28 14:47:18 2020 +0900

HBASE-25096 WAL size in RegionServer UI is wrong (#2456)

Signed-off-by: Guanghao Zhang 
---
 .../hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 8f16e5c..70ae282 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -833,7 +833,7 @@ class MetricsRegionServerWrapperImpl
 numWALFiles = (provider == null ? 0 : provider.getNumLogFiles()) +
 (metaProvider == null ? 0 : metaProvider.getNumLogFiles());
 walFileSize = (provider == null ? 0 : provider.getLogFileSize()) +
-(provider == null ? 0 : provider.getLogFileSize());
+  (metaProvider == null ? 0 : metaProvider.getLogFileSize());
 // Copy over computed values so that no thread sees half computed 
values.
 numStores = tempNumStores;
 numStoreFiles = tempNumStoreFiles;



[hbase] branch branch-2.3 updated: HBASE-25096 WAL size in RegionServer UI is wrong (#2456)

2020-09-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new 4e08b40  HBASE-25096 WAL size in RegionServer UI is wrong (#2456)
4e08b40 is described below

commit 4e08b40bffa6e6d11b5183ec6e00437111607e05
Author: Toshihiro Suzuki 
AuthorDate: Mon Sep 28 14:47:18 2020 +0900

HBASE-25096 WAL size in RegionServer UI is wrong (#2456)

Signed-off-by: Guanghao Zhang 
---
 .../hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 5e1431c..36d9f94 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -835,7 +835,7 @@ class MetricsRegionServerWrapperImpl
 numWALFiles = (provider == null ? 0 : provider.getNumLogFiles()) +
 (metaProvider == null ? 0 : metaProvider.getNumLogFiles());
 walFileSize = (provider == null ? 0 : provider.getLogFileSize()) +
-(provider == null ? 0 : provider.getLogFileSize());
+  (metaProvider == null ? 0 : metaProvider.getLogFileSize());
 // Copy over computed values so that no thread sees half computed 
values.
 numStores = tempNumStores;
 numStoreFiles = tempNumStoreFiles;



[hbase] branch branch-2 updated: HBASE-25096 WAL size in RegionServer UI is wrong (#2456)

2020-09-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 4941fc0  HBASE-25096 WAL size in RegionServer UI is wrong (#2456)
4941fc0 is described below

commit 4941fc02eacf2dd27bb02b34edaef431875a97fc
Author: Toshihiro Suzuki 
AuthorDate: Mon Sep 28 14:47:18 2020 +0900

HBASE-25096 WAL size in RegionServer UI is wrong (#2456)

Signed-off-by: Guanghao Zhang 
---
 .../hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 6425193..0f2f081 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -835,7 +835,7 @@ class MetricsRegionServerWrapperImpl
 numWALFiles = (provider == null ? 0 : provider.getNumLogFiles()) +
 (metaProvider == null ? 0 : metaProvider.getNumLogFiles());
 walFileSize = (provider == null ? 0 : provider.getLogFileSize()) +
-(provider == null ? 0 : provider.getLogFileSize());
+  (metaProvider == null ? 0 : metaProvider.getLogFileSize());
 // Copy over computed values so that no thread sees half computed 
values.
 numStores = tempNumStores;
 numStoreFiles = tempNumStoreFiles;



[hbase] branch master updated: HBASE-25096 WAL size in RegionServer UI is wrong (#2456)

2020-09-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 86557ed  HBASE-25096 WAL size in RegionServer UI is wrong (#2456)
86557ed is described below

commit 86557edf2c0f286391898921b64624c412dcfc23
Author: Toshihiro Suzuki 
AuthorDate: Mon Sep 28 14:47:18 2020 +0900

HBASE-25096 WAL size in RegionServer UI is wrong (#2456)

Signed-off-by: Guanghao Zhang 
---
 .../hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index c4328c4..8ce2baa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -856,7 +856,7 @@ class MetricsRegionServerWrapperImpl
 numWALFiles = (provider == null ? 0 : provider.getNumLogFiles()) +
 (metaProvider == null ? 0 : metaProvider.getNumLogFiles());
 walFileSize = (provider == null ? 0 : provider.getLogFileSize()) +
-(provider == null ? 0 : provider.getLogFileSize());
+  (metaProvider == null ? 0 : metaProvider.getLogFileSize());
 // Copy over computed values so that no thread sees half computed 
values.
 numStores = tempNumStores;
 numStoreFiles = tempNumStoreFiles;



[hbase] branch master updated (9c5dbb2 -> 3c00ff5)

2020-09-11 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from 9c5dbb2  HBASE-24764: Add support of adding default peer configs via 
hbase-site.xml for all replication peers. (#2284)
 add 3c00ff5  HBASE-23643 Add document for "HBASE-23065 [hbtop] Top-N heavy 
hitter user and client drill downs" (#2381)

No new revisions were added by this update.

Summary of changes:
 src/main/asciidoc/_chapters/hbtop.adoc | 38 +-
 1 file changed, 37 insertions(+), 1 deletion(-)



[hbase] branch master updated: HBASE-25008 Add document for "HBASE-24776 [hbtop] Support Batch mode" (#2382)

2020-09-11 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new e5ca9ad  HBASE-25008 Add document for "HBASE-24776 [hbtop] Support 
Batch mode" (#2382)
e5ca9ad is described below

commit e5ca9adc54f9f580f85d21d38217afa97aa79d68
Author: Toshihiro Suzuki 
AuthorDate: Fri Sep 11 15:38:13 2020 +0900

HBASE-25008 Add document for "HBASE-24776 [hbtop] Support Batch mode" 
(#2382)

Signed-off-by: stack 
---
 src/main/asciidoc/_chapters/hbtop.adoc | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/src/main/asciidoc/_chapters/hbtop.adoc 
b/src/main/asciidoc/_chapters/hbtop.adoc
index a587dda..b8523df 100644
--- a/src/main/asciidoc/_chapters/hbtop.adoc
+++ b/src/main/asciidoc/_chapters/hbtop.adoc
@@ -70,7 +70,13 @@ 
image::https://hbase.apache.org/hbtop-images/scrolling_metric_records.gif[Scroll
 | Argument | Description
 | -d,--delay arg | The refresh delay (in seconds); default is 3 seconds
 | -h,--help | Print usage; for help while the tool is running press `h` key
-| -m,--mode arg | The mode; `n` (Namespace)`t` (Table)r 
(Region)`s` (RegionServer), default is `r` (Region)
+| -m,--mode arg | The mode; `n` (Namespace) `t` (Table) 
`r` (Region) `s` (RegionServer), default is `r`
+| -n,--numberOfIterations arg | The number of iterations
+| -O,--outputFieldNames | Print each of the available field names on a 
separate line, then quit
+| -f,--fields arg | Show only the given fields. Specify comma 
separated fields to show multiple fields
+| -s,--sortField arg | The initial sort field. You can prepend a 
`' or `-' to the field name to also override the sort direction. A 
leading `' will force sorting high to low, whereas a `-' will ensure a 
low to high ordering
+| -i,--filters arg | The initial filters. Specify comma separated 
filters to set multiple filters
+| -b,--batchMode | Starts hbtop in Batch mode, which could be useful for 
sending output from hbtop to other programs or to a file. In this mode, hbtop 
will not accept input and runs until the iterations limit you've set with the 
`-n' command-line option or until killed
 |=
 
 === Modes



[hbase] branch branch-2.2 updated: HBASE-24776 [hbtop] Support Batch mode

2020-09-10 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new b4bba3d  HBASE-24776 [hbtop] Support Batch mode
b4bba3d is described below

commit b4bba3dc9ef163edbc82ce3a02872d8c6247378b
Author: Toshihiro Suzuki 
AuthorDate: Thu Sep 10 17:04:57 2020 +0900

HBASE-24776 [hbtop] Support Batch mode

Signed-off-by: stack 
---
 .../java/org/apache/hadoop/hbase/hbtop/HBTop.java  | 156 +
 .../hbase/hbtop/screen/AbstractScreenView.java |   1 +
 .../apache/hadoop/hbase/hbtop/screen/Screen.java   |  26 ++--
 .../hbase/hbtop/screen/top/TopScreenModel.java |  48 +--
 .../hbase/hbtop/screen/top/TopScreenPresenter.java |  48 +--
 .../hbase/hbtop/screen/top/TopScreenView.java  |  48 +--
 .../hadoop/hbase/hbtop/terminal/Terminal.java  |   2 +-
 .../hbtop/terminal/impl/batch/BatchTerminal.java   |  80 +++
 .../batch/BatchTerminalPrinter.java}   |  51 ---
 .../hbase/hbtop/screen/top/TestTopScreenModel.java |  16 +--
 .../hbtop/screen/top/TestTopScreenPresenter.java   |   3 +-
 11 files changed, 382 insertions(+), 97 deletions(-)

diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
index 34c83f5..dd3cdee 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
@@ -17,11 +17,17 @@
  */
 package org.apache.hadoop.hbase.hbtop;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.hbtop.field.Field;
+import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
 import org.apache.hadoop.hbase.hbtop.mode.Mode;
 import org.apache.hadoop.hbase.hbtop.screen.Screen;
 import org.apache.hadoop.util.Tool;
@@ -56,17 +62,14 @@ public class HBTop extends Configured implements Tool {
   public int run(String[] args) throws Exception {
 long initialRefreshDelay = 3 * 1000;
 Mode initialMode = Mode.REGION;
+List initialFields = null;
+Field initialSortField = null;
+Boolean initialAscendingSort = null;
+List initialFilters = null;
+long numberOfIterations = Long.MAX_VALUE;
+boolean batchMode = false;
 try {
-  // Command line options
-  Options opts = new Options();
-  opts.addOption("h", "help", false,
-"Print usage; for help while the tool is running press 'h'");
-  opts.addOption("d", "delay", true,
-"The refresh delay (in seconds); default is 3 seconds");
-  opts.addOption("m", "mode", true,
-"The mode; n (Namespace)|t (Table)|r (Region)|s (RegionServer)"
-  + ", default is r (Region)");
-
+  Options opts = getOptions();
   CommandLine commandLine = new DefaultParser().parse(opts, args);
 
   if (commandLine.hasOption("help")) {
@@ -74,20 +77,6 @@ public class HBTop extends Configured implements Tool {
 return 0;
   }
 
-  if (commandLine.hasOption("delay")) {
-int delay = 0;
-try {
-  delay = Integer.parseInt(commandLine.getOptionValue("delay"));
-} catch (NumberFormatException ignored) {
-}
-
-if (delay < 1) {
-  LOGGER.warn("Delay set too low or invalid, using default");
-} else {
-  initialRefreshDelay = delay * 1000L;
-}
-  }
-
   if (commandLine.hasOption("mode")) {
 String mode = commandLine.getOptionValue("mode");
 switch (mode) {
@@ -112,18 +101,135 @@ public class HBTop extends Configured implements Tool {
 break;
 }
   }
+
+  if (commandLine.hasOption("outputFieldNames")) {
+initialMode.getFieldInfos().forEach(f -> 
System.out.println(f.getField().getHeader()));
+return 0;
+  }
+
+  if (commandLine.hasOption("delay")) {
+int delay = 0;
+try {
+  delay = Integer.parseInt(commandLine.getOptionValue("delay"));
+} catch (NumberFormatException ignored) {
+}
+
+if (delay < 1) {
+  LOGGER.warn("Delay set too low or invalid, using default");
+} else {
+  initialRefreshDelay = delay * 1000L;
+}
+  }
+
+  if (commandLine.hasOption("numberOfIterations")) {
+try {
+

[hbase] branch branch-2.3 updated: HBASE-24776 [hbtop] Support Batch mode (#2291)

2020-09-10 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new f382923  HBASE-24776 [hbtop] Support Batch mode (#2291)
f382923 is described below

commit f38292307c0972cf50cd0b2d2bae3a6f79d2accf
Author: Toshihiro Suzuki 
AuthorDate: Thu Sep 10 17:04:57 2020 +0900

HBASE-24776 [hbtop] Support Batch mode (#2291)

Signed-off-by: stack 
---
 .../java/org/apache/hadoop/hbase/hbtop/HBTop.java  | 164 +
 .../hbase/hbtop/screen/AbstractScreenView.java |   1 +
 .../apache/hadoop/hbase/hbtop/screen/Screen.java   |  26 +++-
 .../hbase/hbtop/screen/top/TopScreenModel.java |  49 --
 .../hbase/hbtop/screen/top/TopScreenPresenter.java |  48 --
 .../hbase/hbtop/screen/top/TopScreenView.java  |  48 --
 .../hadoop/hbase/hbtop/terminal/Terminal.java  |   2 +-
 .../hbtop/terminal/impl/batch/BatchTerminal.java   |  80 ++
 .../batch/BatchTerminalPrinter.java}   |  51 ---
 .../hbase/hbtop/screen/top/TestTopScreenModel.java |  16 +-
 .../hbtop/screen/top/TestTopScreenPresenter.java   |   3 +-
 11 files changed, 390 insertions(+), 98 deletions(-)

diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
index 34c83f5..9c1a000 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
@@ -17,11 +17,17 @@
  */
 package org.apache.hadoop.hbase.hbtop;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.hbtop.field.Field;
+import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
 import org.apache.hadoop.hbase.hbtop.mode.Mode;
 import org.apache.hadoop.hbase.hbtop.screen.Screen;
 import org.apache.hadoop.util.Tool;
@@ -56,17 +62,14 @@ public class HBTop extends Configured implements Tool {
   public int run(String[] args) throws Exception {
 long initialRefreshDelay = 3 * 1000;
 Mode initialMode = Mode.REGION;
+List initialFields = null;
+Field initialSortField = null;
+Boolean initialAscendingSort = null;
+List initialFilters = null;
+long numberOfIterations = Long.MAX_VALUE;
+boolean batchMode = false;
 try {
-  // Command line options
-  Options opts = new Options();
-  opts.addOption("h", "help", false,
-"Print usage; for help while the tool is running press 'h'");
-  opts.addOption("d", "delay", true,
-"The refresh delay (in seconds); default is 3 seconds");
-  opts.addOption("m", "mode", true,
-"The mode; n (Namespace)|t (Table)|r (Region)|s (RegionServer)"
-  + ", default is r (Region)");
-
+  Options opts = getOptions();
   CommandLine commandLine = new DefaultParser().parse(opts, args);
 
   if (commandLine.hasOption("help")) {
@@ -74,20 +77,6 @@ public class HBTop extends Configured implements Tool {
 return 0;
   }
 
-  if (commandLine.hasOption("delay")) {
-int delay = 0;
-try {
-  delay = Integer.parseInt(commandLine.getOptionValue("delay"));
-} catch (NumberFormatException ignored) {
-}
-
-if (delay < 1) {
-  LOGGER.warn("Delay set too low or invalid, using default");
-} else {
-  initialRefreshDelay = delay * 1000L;
-}
-  }
-
   if (commandLine.hasOption("mode")) {
 String mode = commandLine.getOptionValue("mode");
 switch (mode) {
@@ -107,23 +96,148 @@ public class HBTop extends Configured implements Tool {
 initialMode = Mode.REGION_SERVER;
 break;
 
+  case "u":
+initialMode = Mode.USER;
+break;
+
+  case "c":
+initialMode = Mode.CLIENT;
+break;
+
   default:
 LOGGER.warn("Mode set invalid, using default");
 break;
 }
   }
+
+  if (commandLine.hasOption("outputFieldNames")) {
+initialMode.getFieldInfos().forEach(f -> 
System.out.println(f.getField().getHeader()));
+return 0;
+  }
+
+  if (commandLine.hasOption("delay")) {
+int delay = 0;
+try {
+  delay = Integer.parseInt(commandLine.getOptionValue("delay"));

[hbase] branch branch-2 updated: HBASE-24776 [hbtop] Support Batch mode (#2291)

2020-09-10 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 2a63d46  HBASE-24776 [hbtop] Support Batch mode (#2291)
2a63d46 is described below

commit 2a63d467f4b2cceb37786dbbb756a6091b69695b
Author: Toshihiro Suzuki 
AuthorDate: Thu Sep 10 17:04:57 2020 +0900

HBASE-24776 [hbtop] Support Batch mode (#2291)

Signed-off-by: stack 
---
 .../java/org/apache/hadoop/hbase/hbtop/HBTop.java  | 164 +
 .../hbase/hbtop/screen/AbstractScreenView.java |   1 +
 .../apache/hadoop/hbase/hbtop/screen/Screen.java   |  26 +++-
 .../hbase/hbtop/screen/top/TopScreenModel.java |  49 --
 .../hbase/hbtop/screen/top/TopScreenPresenter.java |  48 --
 .../hbase/hbtop/screen/top/TopScreenView.java  |  48 --
 .../hadoop/hbase/hbtop/terminal/Terminal.java  |   2 +-
 .../hbtop/terminal/impl/batch/BatchTerminal.java   |  80 ++
 .../batch/BatchTerminalPrinter.java}   |  51 ---
 .../hbase/hbtop/screen/top/TestTopScreenModel.java |  16 +-
 .../hbtop/screen/top/TestTopScreenPresenter.java   |   3 +-
 11 files changed, 390 insertions(+), 98 deletions(-)

diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
index 34c83f5..9c1a000 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
@@ -17,11 +17,17 @@
  */
 package org.apache.hadoop.hbase.hbtop;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.hbtop.field.Field;
+import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
 import org.apache.hadoop.hbase.hbtop.mode.Mode;
 import org.apache.hadoop.hbase.hbtop.screen.Screen;
 import org.apache.hadoop.util.Tool;
@@ -56,17 +62,14 @@ public class HBTop extends Configured implements Tool {
   public int run(String[] args) throws Exception {
 long initialRefreshDelay = 3 * 1000;
 Mode initialMode = Mode.REGION;
+List initialFields = null;
+Field initialSortField = null;
+Boolean initialAscendingSort = null;
+List initialFilters = null;
+long numberOfIterations = Long.MAX_VALUE;
+boolean batchMode = false;
 try {
-  // Command line options
-  Options opts = new Options();
-  opts.addOption("h", "help", false,
-"Print usage; for help while the tool is running press 'h'");
-  opts.addOption("d", "delay", true,
-"The refresh delay (in seconds); default is 3 seconds");
-  opts.addOption("m", "mode", true,
-"The mode; n (Namespace)|t (Table)|r (Region)|s (RegionServer)"
-  + ", default is r (Region)");
-
+  Options opts = getOptions();
   CommandLine commandLine = new DefaultParser().parse(opts, args);
 
   if (commandLine.hasOption("help")) {
@@ -74,20 +77,6 @@ public class HBTop extends Configured implements Tool {
 return 0;
   }
 
-  if (commandLine.hasOption("delay")) {
-int delay = 0;
-try {
-  delay = Integer.parseInt(commandLine.getOptionValue("delay"));
-} catch (NumberFormatException ignored) {
-}
-
-if (delay < 1) {
-  LOGGER.warn("Delay set too low or invalid, using default");
-} else {
-  initialRefreshDelay = delay * 1000L;
-}
-  }
-
   if (commandLine.hasOption("mode")) {
 String mode = commandLine.getOptionValue("mode");
 switch (mode) {
@@ -107,23 +96,148 @@ public class HBTop extends Configured implements Tool {
 initialMode = Mode.REGION_SERVER;
 break;
 
+  case "u":
+initialMode = Mode.USER;
+break;
+
+  case "c":
+initialMode = Mode.CLIENT;
+break;
+
   default:
 LOGGER.warn("Mode set invalid, using default");
 break;
 }
   }
+
+  if (commandLine.hasOption("outputFieldNames")) {
+initialMode.getFieldInfos().forEach(f -> 
System.out.println(f.getField().getHeader()));
+return 0;
+  }
+
+  if (commandLine.hasOption("delay")) {
+int delay = 0;
+try {
+  delay = Integer.parseInt(commandLine.getOptionValue("delay"));

[hbase] branch master updated: HBASE-24776 [hbtop] Support Batch mode (#2291)

2020-09-10 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new bc15b61  HBASE-24776 [hbtop] Support Batch mode (#2291)
bc15b61 is described below

commit bc15b619275a87d508a68679183034113df29b70
Author: Toshihiro Suzuki 
AuthorDate: Thu Sep 10 17:04:57 2020 +0900

HBASE-24776 [hbtop] Support Batch mode (#2291)

Signed-off-by: stack 
---
 .../java/org/apache/hadoop/hbase/hbtop/HBTop.java  | 164 +
 .../hbase/hbtop/screen/AbstractScreenView.java |   1 +
 .../apache/hadoop/hbase/hbtop/screen/Screen.java   |  26 +++-
 .../hbase/hbtop/screen/top/TopScreenModel.java |  49 --
 .../hbase/hbtop/screen/top/TopScreenPresenter.java |  48 --
 .../hbase/hbtop/screen/top/TopScreenView.java  |  48 --
 .../hadoop/hbase/hbtop/terminal/Terminal.java  |   2 +-
 .../hbtop/terminal/impl/batch/BatchTerminal.java   |  80 ++
 .../batch/BatchTerminalPrinter.java}   |  51 ---
 .../hbase/hbtop/screen/top/TestTopScreenModel.java |  16 +-
 .../hbtop/screen/top/TestTopScreenPresenter.java   |   3 +-
 11 files changed, 390 insertions(+), 98 deletions(-)

diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
index 34c83f5..9c1a000 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java
@@ -17,11 +17,17 @@
  */
 package org.apache.hadoop.hbase.hbtop;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.hbtop.field.Field;
+import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
 import org.apache.hadoop.hbase.hbtop.mode.Mode;
 import org.apache.hadoop.hbase.hbtop.screen.Screen;
 import org.apache.hadoop.util.Tool;
@@ -56,17 +62,14 @@ public class HBTop extends Configured implements Tool {
   public int run(String[] args) throws Exception {
 long initialRefreshDelay = 3 * 1000;
 Mode initialMode = Mode.REGION;
+List initialFields = null;
+Field initialSortField = null;
+Boolean initialAscendingSort = null;
+List initialFilters = null;
+long numberOfIterations = Long.MAX_VALUE;
+boolean batchMode = false;
 try {
-  // Command line options
-  Options opts = new Options();
-  opts.addOption("h", "help", false,
-"Print usage; for help while the tool is running press 'h'");
-  opts.addOption("d", "delay", true,
-"The refresh delay (in seconds); default is 3 seconds");
-  opts.addOption("m", "mode", true,
-"The mode; n (Namespace)|t (Table)|r (Region)|s (RegionServer)"
-  + ", default is r (Region)");
-
+  Options opts = getOptions();
   CommandLine commandLine = new DefaultParser().parse(opts, args);
 
   if (commandLine.hasOption("help")) {
@@ -74,20 +77,6 @@ public class HBTop extends Configured implements Tool {
 return 0;
   }
 
-  if (commandLine.hasOption("delay")) {
-int delay = 0;
-try {
-  delay = Integer.parseInt(commandLine.getOptionValue("delay"));
-} catch (NumberFormatException ignored) {
-}
-
-if (delay < 1) {
-  LOGGER.warn("Delay set too low or invalid, using default");
-} else {
-  initialRefreshDelay = delay * 1000L;
-}
-  }
-
   if (commandLine.hasOption("mode")) {
 String mode = commandLine.getOptionValue("mode");
 switch (mode) {
@@ -107,23 +96,148 @@ public class HBTop extends Configured implements Tool {
 initialMode = Mode.REGION_SERVER;
 break;
 
+  case "u":
+initialMode = Mode.USER;
+break;
+
+  case "c":
+initialMode = Mode.CLIENT;
+break;
+
   default:
 LOGGER.warn("Mode set invalid, using default");
 break;
 }
   }
+
+  if (commandLine.hasOption("outputFieldNames")) {
+initialMode.getFieldInfos().forEach(f -> 
System.out.println(f.getField().getHeader()));
+return 0;
+  }
+
+  if (commandLine.hasOption("delay")) {
+int delay = 0;
+try {
+  delay = Integer.parseInt(commandLine.getOptionValue("delay"));

[hbase] branch master updated (a589e55 -> d48c732)

2020-09-08 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from a589e55  HBASE-24992 log after Generator success when running ITBLL 
(#2358)
 add d48c732  HBASE-24602 Add Increment and Append support to 
CheckAndMutate (#2228)

No new revisions were added by this update.

Summary of changes:
 .../apache/hadoop/hbase/client/CheckAndMutate.java |  33 +-
 .../hadoop/hbase/client/RawAsyncTableImpl.java |   7 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |   4 +
 .../hbase/shaded/protobuf/RequestConverter.java|  63 +-
 .../hbase/shaded/protobuf/ResponseConverter.java   |   4 +-
 .../hadoop/hbase/coprocessor/RegionObserver.java   |  19 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 806 +++--
 .../regionserver/MiniBatchOperationInProgress.java |  18 +
 .../hadoop/hbase/regionserver/OperationStatus.java |  26 +-
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 162 +
 .../apache/hadoop/hbase/regionserver/Region.java   |   3 +-
 .../hbase/security/access/AccessController.java|   5 +-
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 230 ++
 .../hadoop/hbase/client/TestAsyncTableBatch.java   |  35 +-
 .../hadoop/hbase/client/TestCheckAndMutate.java| 219 ++
 .../hadoop/hbase/client/TestFromClientSide3.java   |  35 +-
 .../hbase/regionserver/TestAtomicOperation.java|   2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java | 137 +++-
 18 files changed, 1258 insertions(+), 550 deletions(-)



[hbase] branch branch-2.2 updated: HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should accept -D options from command line parameters (#2275)

2020-08-19 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 0b4c9f1  HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should 
accept -D options from command line parameters (#2275)
0b4c9f1 is described below

commit 0b4c9f17ffc8bd172f98176a13db30825061a801
Author: Toshihiro Suzuki 
AuthorDate: Wed Aug 19 19:55:34 2020 +0900

HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should accept -D 
options from command line parameters (#2275)

Signed-off-by: Peter Somogyi 
---
 .../hadoop/hbase/tool/LoadIncrementalHFiles.java   | 22 +-
 1 file changed, 13 insertions(+), 9 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
index f96562a..5a4b979 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
@@ -143,16 +143,16 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   // above. It is invalid family name.
   static final String TMP_DIR = ".tmp";
 
-  private final int maxFilesPerRegionPerFamily;
-  private final boolean assignSeqIds;
+  private int maxFilesPerRegionPerFamily;
+  private boolean assignSeqIds;
   private boolean bulkLoadByFamily;
 
   // Source delegation token
-  private final FsDelegationToken fsDelegationToken;
-  private final UserProvider userProvider;
-  private final int nrThreads;
+  private FsDelegationToken fsDelegationToken;
+  private UserProvider userProvider;
+  private int nrThreads;
   private AtomicInteger numRetries;
-  private final RpcControllerFactory rpcControllerFactory;
+  private RpcControllerFactory rpcControllerFactory;
 
   private String bulkToken;
 
@@ -181,7 +181,11 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   public LoadIncrementalHFiles(Configuration conf) {
 // make a copy, just to be sure we're not overriding someone else's config
 super(HBaseConfiguration.create(conf));
-conf = getConf();
+initialize();
+  }
+
+  public void initialize() {
+Configuration conf = getConf();
 // disable blockcache for tool invocation, see HBASE-10500
 conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
 userProvider = UserProvider.instantiate(conf);
@@ -1248,10 +1252,10 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   usage();
   return -1;
 }
+// Re-initialize to apply -D options from the command line parameters
+initialize();
 String dirPath = args[0];
 TableName tableName = TableName.valueOf(args[1]);
-
-
 if (args.length == 2) {
   return !run(dirPath, tableName).isEmpty() ? 0 : -1;
 } else {



[hbase] branch branch-2.3 updated: HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should accept -D options from command line parameters (#2275)

2020-08-19 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new 4f6e44f  HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should 
accept -D options from command line parameters (#2275)
4f6e44f is described below

commit 4f6e44fe7925c5d0b137073a16faa4d5196468a2
Author: Toshihiro Suzuki 
AuthorDate: Wed Aug 19 19:55:34 2020 +0900

HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should accept -D 
options from command line parameters (#2275)

Signed-off-by: Peter Somogyi 
---
 .../hadoop/hbase/tool/LoadIncrementalHFiles.java   | 22 +-
 1 file changed, 13 insertions(+), 9 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
index d89756e..ddc857c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
@@ -146,16 +146,16 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   // above. It is invalid family name.
   static final String TMP_DIR = ".tmp";
 
-  private final int maxFilesPerRegionPerFamily;
-  private final boolean assignSeqIds;
+  private int maxFilesPerRegionPerFamily;
+  private boolean assignSeqIds;
   private boolean bulkLoadByFamily;
 
   // Source delegation token
-  private final FsDelegationToken fsDelegationToken;
-  private final UserProvider userProvider;
-  private final int nrThreads;
+  private FsDelegationToken fsDelegationToken;
+  private UserProvider userProvider;
+  private int nrThreads;
   private AtomicInteger numRetries;
-  private final RpcControllerFactory rpcControllerFactory;
+  private RpcControllerFactory rpcControllerFactory;
 
   private String bulkToken;
 
@@ -184,7 +184,11 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   public LoadIncrementalHFiles(Configuration conf) {
 // make a copy, just to be sure we're not overriding someone else's config
 super(HBaseConfiguration.create(conf));
-conf = getConf();
+initialize();
+  }
+
+  public void initialize() {
+Configuration conf = getConf();
 // disable blockcache for tool invocation, see HBASE-10500
 conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
 userProvider = UserProvider.instantiate(conf);
@@ -1252,10 +1256,10 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   usage();
   return -1;
 }
+// Re-initialize to apply -D options from the command line parameters
+initialize();
 String dirPath = args[0];
 TableName tableName = TableName.valueOf(args[1]);
-
-
 if (args.length == 2) {
   return !run(dirPath, tableName).isEmpty() ? 0 : -1;
 } else {



[hbase] branch branch-2 updated: HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should accept -D options from command line parameters (#2275)

2020-08-19 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 00aa3bc  HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should 
accept -D options from command line parameters (#2275)
00aa3bc is described below

commit 00aa3bc9fcb22304437244b90c12c94ee2971e13
Author: Toshihiro Suzuki 
AuthorDate: Wed Aug 19 19:55:34 2020 +0900

HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should accept -D 
options from command line parameters (#2275)

Signed-off-by: Peter Somogyi 
---
 .../hadoop/hbase/tool/LoadIncrementalHFiles.java   | 22 +-
 1 file changed, 13 insertions(+), 9 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
index d89756e..ddc857c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
@@ -146,16 +146,16 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   // above. It is invalid family name.
   static final String TMP_DIR = ".tmp";
 
-  private final int maxFilesPerRegionPerFamily;
-  private final boolean assignSeqIds;
+  private int maxFilesPerRegionPerFamily;
+  private boolean assignSeqIds;
   private boolean bulkLoadByFamily;
 
   // Source delegation token
-  private final FsDelegationToken fsDelegationToken;
-  private final UserProvider userProvider;
-  private final int nrThreads;
+  private FsDelegationToken fsDelegationToken;
+  private UserProvider userProvider;
+  private int nrThreads;
   private AtomicInteger numRetries;
-  private final RpcControllerFactory rpcControllerFactory;
+  private RpcControllerFactory rpcControllerFactory;
 
   private String bulkToken;
 
@@ -184,7 +184,11 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   public LoadIncrementalHFiles(Configuration conf) {
 // make a copy, just to be sure we're not overriding someone else's config
 super(HBaseConfiguration.create(conf));
-conf = getConf();
+initialize();
+  }
+
+  public void initialize() {
+Configuration conf = getConf();
 // disable blockcache for tool invocation, see HBASE-10500
 conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
 userProvider = UserProvider.instantiate(conf);
@@ -1252,10 +1256,10 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   usage();
   return -1;
 }
+// Re-initialize to apply -D options from the command line parameters
+initialize();
 String dirPath = args[0];
 TableName tableName = TableName.valueOf(args[1]);
-
-
 if (args.length == 2) {
   return !run(dirPath, tableName).isEmpty() ? 0 : -1;
 } else {



[hbase] branch master updated: HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should accept -D options from command line parameters (#2260)

2020-08-18 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 1164531  HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should 
accept -D options from command line parameters (#2260)
1164531 is described below

commit 1164531d5ab519ab58af82ba3849f8fcded3453f
Author: Toshihiro Suzuki 
AuthorDate: Wed Aug 19 13:36:17 2020 +0900

HBASE-24884 BulkLoadHFilesTool/LoadIncrementalHFiles should accept -D 
options from command line parameters (#2260)

Signed-off-by: Peter Somogyi 
---
 .../apache/hadoop/hbase/tool/BulkLoadHFilesTool.java| 17 -
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
index e8b701b..6c62a3d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
@@ -129,14 +129,14 @@ public class BulkLoadHFilesTool extends Configured 
implements BulkLoadHFiles, To
   // above. It is invalid family name.
   static final String TMP_DIR = ".tmp";
 
-  private final int maxFilesPerRegionPerFamily;
-  private final boolean assignSeqIds;
+  private int maxFilesPerRegionPerFamily;
+  private boolean assignSeqIds;
   private boolean bulkLoadByFamily;
 
   // Source delegation token
-  private final FsDelegationToken fsDelegationToken;
-  private final UserProvider userProvider;
-  private final int nrThreads;
+  private FsDelegationToken fsDelegationToken;
+  private UserProvider userProvider;
+  private int nrThreads;
   private final AtomicInteger numRetries = new AtomicInteger(0);
   private String bulkToken;
 
@@ -146,6 +146,11 @@ public class BulkLoadHFilesTool extends Configured 
implements BulkLoadHFiles, To
   public BulkLoadHFilesTool(Configuration conf) {
 // make a copy, just to be sure we're not overriding someone else's config
 super(new Configuration(conf));
+initialize();
+  }
+
+  public void initialize() {
+Configuration conf = getConf();
 // disable blockcache for tool invocation, see HBASE-10500
 conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
 userProvider = UserProvider.instantiate(conf);
@@ -1073,6 +1078,8 @@ public class BulkLoadHFilesTool extends Configured 
implements BulkLoadHFiles, To
   usage();
   return -1;
 }
+// Re-initialize to apply -D options from the command line parameters
+initialize();
 Path dirPath = new Path(args[0]);
 TableName tableName = TableName.valueOf(args[1]);
 if (args.length == 2) {



[hbase] branch branch-2 updated: HBASE-24680 Refactor the checkAndMutate code on the server side (#2184)

2020-08-10 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 22bf9a3  HBASE-24680 Refactor the checkAndMutate code on the server 
side (#2184)
22bf9a3 is described below

commit 22bf9a38c97f73bc1507c7de86af032500ead1ac
Author: Toshihiro Suzuki 
AuthorDate: Mon Aug 10 18:57:17 2020 +0900

HBASE-24680 Refactor the checkAndMutate code on the server side (#2184)

Signed-off-by: Duo Zhang 
Signed-off-by: Josh Elser 
---
 .../apache/hadoop/hbase/client/CheckAndMutate.java |  11 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  71 +++
 .../regionserver/MetricsRegionServerSource.java|   7 +
 .../MetricsRegionServerSourceImpl.java |   7 +
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 189 
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 133 --
 .../hbase/regionserver/MetricsRegionServer.java|   4 +
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 428 +++--
 .../apache/hadoop/hbase/regionserver/Region.java   |  44 ++
 .../hbase/regionserver/RegionCoprocessorHost.java  | 311 ++---
 .../hbase/coprocessor/SimpleRegionObserver.java|  39 ++
 .../coprocessor/TestRegionObserverInterface.java   |  73 ++-
 .../hadoop/hbase/regionserver/TestHRegion.java | 512 -
 .../regionserver/TestMetricsRegionServer.java  |   3 +-
 14 files changed, 1213 insertions(+), 619 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java
index 5fce3e2..26eb23d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java
@@ -214,7 +214,7 @@ public final class CheckAndMutate extends Mutation {
 this.op = op;
 this.value = value;
 this.filter = null;
-this.timeRange = timeRange;
+this.timeRange = timeRange != null ? timeRange : TimeRange.allTime();
 this.action = action;
   }
 
@@ -225,7 +225,7 @@ public final class CheckAndMutate extends Mutation {
 this.op = null;
 this.value = null;
 this.filter = filter;
-this.timeRange = timeRange;
+this.timeRange = timeRange != null ? timeRange : TimeRange.allTime();
 this.action = action;
   }
 
@@ -265,6 +265,13 @@ public final class CheckAndMutate extends Mutation {
   }
 
   /**
+   * @return whether this has a filter or not
+   */
+  public boolean hasFilter() {
+return filter != null;
+  }
+
+  /**
* @return the time range to check
*/
   public TimeRange getTimeRange() {
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 09db446..c82243a 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.Cell.Type;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ExtendedCellBuilder;
 import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
@@ -65,6 +66,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.CheckAndMutate;
 import org.apache.hadoop.hbase.client.ClientUtil;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -84,6 +86,7 @@ import org.apache.hadoop.hbase.client.RegionLoadStats;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.RegionStatesCount;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.SlowLogParams;
 import org.apache.hadoop.hbase.client.SnapshotDescription;
@@ -3469,4 +3472,72 @@ public final class ProtobufUtil {
 return clearSlowLogResponses.getIsCleaned();
   }
 
+  public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition 
condition,
+MutationProto mutation, CellScanner cellScanner) throws IOException {
+byte[] row = condition.getRow().toByteArray();
+CheckAndMutate.Builder builder = CheckAndMutate.newBuilder(row);
+Filter filter = condition.hasFilter

[hbase] branch master updated (a3f623e -> e22a2d2)

2020-08-01 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from a3f623e  HBASE-24695 FSHLog - close the current WAL file in a 
background thread. (#2168)
 add e22a2d2  HBASE-24680 Refactor the checkAndMutate code on the server 
side (#2094)

No new revisions were added by this update.

Summary of changes:
 .../apache/hadoop/hbase/client/CheckAndMutate.java |  11 +-
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java |  72 +++
 .../regionserver/MetricsRegionServerSource.java|   7 +
 .../MetricsRegionServerSourceImpl.java |   7 +
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 189 
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 133 --
 .../hbase/regionserver/MetricsRegionServer.java|   4 +
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 428 +++--
 .../apache/hadoop/hbase/regionserver/Region.java   |  44 ++
 .../hbase/regionserver/RegionCoprocessorHost.java  | 311 ++---
 .../hbase/coprocessor/SimpleRegionObserver.java|  39 ++
 .../coprocessor/TestRegionObserverInterface.java   |  73 ++-
 .../hadoop/hbase/regionserver/TestHRegion.java | 512 -
 .../regionserver/TestMetricsRegionServer.java  |   3 +-
 14 files changed, 1214 insertions(+), 619 deletions(-)



[hbase] branch branch-1 updated: HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)

2020-07-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new b0d49ae  HBASE-24775 [hbtop] StoreFile size should be rounded off 
(#2144)
b0d49ae is described below

commit b0d49aebeaf05a5cc045eeda7b819cccf7cbee9d
Author: Toshihiro Suzuki 
AuthorDate: Tue Jul 28 08:14:45 2020 +0900

HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
---
 .../main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
index db7d22f..bbfe508 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
@@ -174,9 +174,12 @@ public final class FieldValue implements 
Comparable {
   case INTEGER:
   case LONG:
   case FLOAT:
-  case SIZE:
 return value.toString();
 
+  case SIZE:
+Size size = (Size) value;
+return String.format("%.1f", size.get()) + 
size.getUnit().getSimpleName();
+
   case PERCENT:
 return String.format("%.2f", (Float) value) + "%";
 



[hbase] branch branch-2.2 updated: HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)

2020-07-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new c774e36  HBASE-24775 [hbtop] StoreFile size should be rounded off 
(#2144)
c774e36 is described below

commit c774e36d25c15c1c9f09fe5669a932b810cf3c40
Author: Toshihiro Suzuki 
AuthorDate: Tue Jul 28 08:14:45 2020 +0900

HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
---
 .../main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
index 6150df9..086dadc 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
@@ -175,9 +175,12 @@ public final class FieldValue implements 
Comparable {
   case INTEGER:
   case LONG:
   case FLOAT:
-  case SIZE:
 return value.toString();
 
+  case SIZE:
+Size size = (Size) value;
+return String.format("%.1f", size.get()) + 
size.getUnit().getSimpleName();
+
   case PERCENT:
 return String.format("%.2f", (Float) value) + "%";
 



[hbase] branch branch-2.3 updated: HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)

2020-07-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new ece0792  HBASE-24775 [hbtop] StoreFile size should be rounded off 
(#2144)
ece0792 is described below

commit ece0792bf4f7e04a04a83536ef279e62398219b3
Author: Toshihiro Suzuki 
AuthorDate: Tue Jul 28 08:14:45 2020 +0900

HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
---
 .../main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
index 6150df9..086dadc 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
@@ -175,9 +175,12 @@ public final class FieldValue implements 
Comparable {
   case INTEGER:
   case LONG:
   case FLOAT:
-  case SIZE:
 return value.toString();
 
+  case SIZE:
+Size size = (Size) value;
+return String.format("%.1f", size.get()) + 
size.getUnit().getSimpleName();
+
   case PERCENT:
 return String.format("%.2f", (Float) value) + "%";
 



[hbase] branch branch-2 updated: HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)

2020-07-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 2720a9d  HBASE-24775 [hbtop] StoreFile size should be rounded off 
(#2144)
2720a9d is described below

commit 2720a9d93e134f0c3b752446069c174a79e673a1
Author: Toshihiro Suzuki 
AuthorDate: Tue Jul 28 08:14:45 2020 +0900

HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
---
 .../main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
index 6150df9..086dadc 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
@@ -175,9 +175,12 @@ public final class FieldValue implements 
Comparable {
   case INTEGER:
   case LONG:
   case FLOAT:
-  case SIZE:
 return value.toString();
 
+  case SIZE:
+Size size = (Size) value;
+return String.format("%.1f", size.get()) + 
size.getUnit().getSimpleName();
+
   case PERCENT:
 return String.format("%.2f", (Float) value) + "%";
 



[hbase] branch master updated: HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)

2020-07-27 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 477debd  HBASE-24775 [hbtop] StoreFile size should be rounded off 
(#2144)
477debd is described below

commit 477debdc74c26275131884d91e177cd5f764bdd9
Author: Toshihiro Suzuki 
AuthorDate: Tue Jul 28 08:14:45 2020 +0900

HBASE-24775 [hbtop] StoreFile size should be rounded off (#2144)

Signed-off-by: Duo Zhang 
Signed-off-by: Viraj Jasani 
---
 .../main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
index 6150df9..086dadc 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java
@@ -175,9 +175,12 @@ public final class FieldValue implements 
Comparable {
   case INTEGER:
   case LONG:
   case FLOAT:
-  case SIZE:
 return value.toString();
 
+  case SIZE:
+Size size = (Size) value;
+return String.format("%.1f", size.get()) + 
size.getUnit().getSimpleName();
+
   case PERCENT:
 return String.format("%.2f", (Float) value) + "%";
 



[hbase] branch branch-2 updated: HBASE-24650 Change the return types of the new checkAndMutate methods introduced in HBASE-8458 (#2033)

2020-07-07 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new f771fd2  HBASE-24650 Change the return types of the new checkAndMutate 
methods introduced in HBASE-8458 (#2033)
f771fd2 is described below

commit f771fd26b44929623ee31c866544cbc26dad3ffe
Author: Toshihiro Suzuki 
AuthorDate: Wed Jul 8 08:00:48 2020 +0900

HBASE-24650 Change the return types of the new checkAndMutate methods 
introduced in HBASE-8458 (#2033)

Signed-off-by: Duo Zhang 
---
 .../org/apache/hadoop/hbase/client/AsyncTable.java |   9 +-
 .../apache/hadoop/hbase/client/AsyncTableImpl.java |   5 +-
 .../hadoop/hbase/client/CheckAndMutateResult.java  |  48 +++
 .../org/apache/hadoop/hbase/client/HTable.java |  77 ++-
 .../hadoop/hbase/client/RawAsyncTableImpl.java |  53 +++
 .../java/org/apache/hadoop/hbase/client/Table.java |  12 +-
 .../hbase/shaded/protobuf/ResponseConverter.java   |  74 +++---
 .../hadoop/hbase/rest/client/RemoteHTable.java |   6 +-
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 154 ++---
 .../hadoop/hbase/client/TestAsyncTableBatch.java   |   4 +-
 .../hadoop/hbase/client/TestCheckAndMutate.java| 129 -
 .../hadoop/hbase/client/TestFromClientSide3.java   |   4 +-
 .../hadoop/hbase/thrift2/client/ThriftTable.java   |   5 +-
 13 files changed, 342 insertions(+), 238 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
index fd10603..25ea143 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
@@ -367,7 +367,7 @@ public interface AsyncTable {
* @param checkAndMutate The CheckAndMutate object.
* @return A {@link CompletableFuture}s that represent the result for the 
CheckAndMutate.
*/
-  CompletableFuture checkAndMutate(CheckAndMutate checkAndMutate);
+  CompletableFuture checkAndMutate(CheckAndMutate 
checkAndMutate);
 
   /**
* Batch version of checkAndMutate. The specified CheckAndMutates are 
batched only in the sense
@@ -378,15 +378,16 @@ public interface AsyncTable {
* @return A list of {@link CompletableFuture}s that represent the result 
for each
*   CheckAndMutate.
*/
-  List> checkAndMutate(List 
checkAndMutates);
+  List> checkAndMutate(
+List checkAndMutates);
 
   /**
* A simple version of batch checkAndMutate. It will fail if there are any 
failures.
*
* @param checkAndMutates The list of rows to apply.
-   * @return A {@link CompletableFuture} that wrapper the result boolean list.
+   * @return A {@link CompletableFuture} that wrapper the result list.
*/
-  default CompletableFuture> checkAndMutateAll(
+  default CompletableFuture> checkAndMutateAll(
 List checkAndMutates) {
 return allOf(checkAndMutate(checkAndMutates));
   }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
index 836c9b5..82e8cd5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
@@ -205,12 +205,13 @@ class AsyncTableImpl implements 
AsyncTable {
   }
 
   @Override
-  public CompletableFuture checkAndMutate(CheckAndMutate 
checkAndMutate) {
+  public CompletableFuture checkAndMutate(CheckAndMutate 
checkAndMutate) {
 return wrap(rawTable.checkAndMutate(checkAndMutate));
   }
 
   @Override
-  public List> checkAndMutate(List 
checkAndMutates) {
+  public List> checkAndMutate(
+List checkAndMutates) {
 return rawTable.checkAndMutate(checkAndMutates).stream()
   .map(this::wrap).collect(toList());
   }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java
new file mode 100644
index 000..88b438a
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or ag

[hbase] branch master updated: HBASE-24650 Change the return types of the new CheckAndMutate methods introduced in HBASE-8458 (#1991)

2020-07-06 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 34e807a  HBASE-24650 Change the return types of the new CheckAndMutate 
methods introduced in HBASE-8458 (#1991)
34e807a is described below

commit 34e807a8b51400d170ef1e876f6221df95fab30a
Author: Toshihiro Suzuki 
AuthorDate: Tue Jul 7 12:49:20 2020 +0900

HBASE-24650 Change the return types of the new CheckAndMutate methods 
introduced in HBASE-8458 (#1991)

Signed-off-by: Duo Zhang 
---
 .../org/apache/hadoop/hbase/client/AsyncTable.java |   9 +-
 .../apache/hadoop/hbase/client/AsyncTableImpl.java |   5 +-
 .../hadoop/hbase/client/CheckAndMutateResult.java  |  48 +++
 .../hadoop/hbase/client/RawAsyncTableImpl.java |  53 +++
 .../java/org/apache/hadoop/hbase/client/Table.java |  12 +-
 .../hadoop/hbase/client/TableOverAsyncTable.java   |   7 +-
 .../hbase/shaded/protobuf/ResponseConverter.java   |  75 +++---
 .../hadoop/hbase/rest/client/RemoteHTable.java |   5 +-
 .../hadoop/hbase/client/DummyAsyncTable.java   |   5 +-
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 154 ++---
 .../hadoop/hbase/client/TestAsyncTableBatch.java   |   4 +-
 .../hadoop/hbase/client/TestCheckAndMutate.java| 129 -
 .../hadoop/hbase/client/TestFromClientSide3.java   |   4 +-
 .../hadoop/hbase/thrift2/client/ThriftTable.java   |   5 +-
 14 files changed, 311 insertions(+), 204 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
index b2bb2f7..aae4fc7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
@@ -367,7 +367,7 @@ public interface AsyncTable {
* @param checkAndMutate The CheckAndMutate object.
* @return A {@link CompletableFuture}s that represent the result for the 
CheckAndMutate.
*/
-  CompletableFuture checkAndMutate(CheckAndMutate checkAndMutate);
+  CompletableFuture checkAndMutate(CheckAndMutate 
checkAndMutate);
 
   /**
* Batch version of checkAndMutate. The specified CheckAndMutates are 
batched only in the sense
@@ -378,15 +378,16 @@ public interface AsyncTable {
* @return A list of {@link CompletableFuture}s that represent the result 
for each
*   CheckAndMutate.
*/
-  List> checkAndMutate(List 
checkAndMutates);
+  List> checkAndMutate(
+List checkAndMutates);
 
   /**
* A simple version of batch checkAndMutate. It will fail if there are any 
failures.
*
* @param checkAndMutates The list of rows to apply.
-   * @return A {@link CompletableFuture} that wrapper the result boolean list.
+   * @return A {@link CompletableFuture} that wrapper the result list.
*/
-  default CompletableFuture> checkAndMutateAll(
+  default CompletableFuture> checkAndMutateAll(
 List checkAndMutates) {
 return allOf(checkAndMutate(checkAndMutates));
   }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
index 53a020e..f931d67 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
@@ -206,12 +206,13 @@ class AsyncTableImpl implements 
AsyncTable {
   }
 
   @Override
-  public CompletableFuture checkAndMutate(CheckAndMutate 
checkAndMutate) {
+  public CompletableFuture checkAndMutate(CheckAndMutate 
checkAndMutate) {
 return wrap(rawTable.checkAndMutate(checkAndMutate));
   }
 
   @Override
-  public List> checkAndMutate(List 
checkAndMutates) {
+  public List> checkAndMutate(
+List checkAndMutates) {
 return rawTable.checkAndMutate(checkAndMutates).stream()
   .map(this::wrap).collect(toList());
   }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java
new file mode 100644
index 000..88b438a
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * U

[hbase] branch branch-2.1 updated: HBASE-24600 Empty RegionAction added to MultiRequest in case of RowMutations/CheckAndMutate batch

2020-06-24 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 72323d8  HBASE-24600 Empty RegionAction added to MultiRequest in case 
of RowMutations/CheckAndMutate batch
72323d8 is described below

commit 72323d85927e1ec051667cdd37c52704b893aeaa
Author: Toshihiro Suzuki 
AuthorDate: Thu Jun 25 09:52:44 2020 +0900

HBASE-24600 Empty RegionAction added to MultiRequest in case of 
RowMutations/CheckAndMutate batch

Signed-off-by: Guanghao Zhang 
---
 .../org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index acf5a57..8055ed7 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -698,7 +698,9 @@ public final class RequestConverter {
 if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
-multiRequestBuilder.addRegionAction(builder.build());
+if (builder.getActionCount() > 0) {
+  multiRequestBuilder.addRegionAction(builder.build());
+}
 
 // Process RowMutations here. We can not process it in the big loop above 
because
 // it will corrupt the sequence order maintained in cells.
@@ -821,7 +823,9 @@ public final class RequestConverter {
 if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
-multiRequestBuilder.addRegionAction(builder.build());
+if (builder.getActionCount() > 0) {
+  multiRequestBuilder.addRegionAction(builder.build());
+}
 
 // Process RowMutations here. We can not process it in the big loop above 
because
 // it will corrupt the sequence order maintained in cells.



[hbase] branch branch-2.2 updated: HBASE-24600 Empty RegionAction added to MultiRequest in case of RowMutations/CheckAndMutate batch

2020-06-24 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 152c492  HBASE-24600 Empty RegionAction added to MultiRequest in case 
of RowMutations/CheckAndMutate batch
152c492 is described below

commit 152c492ba86cc3c0a35ac1c11f2d80d709939462
Author: Toshihiro Suzuki 
AuthorDate: Thu Jun 25 09:52:44 2020 +0900

HBASE-24600 Empty RegionAction added to MultiRequest in case of 
RowMutations/CheckAndMutate batch

Signed-off-by: Guanghao Zhang 
---
 .../org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 1c9bac7..70b6e47 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -698,7 +698,9 @@ public final class RequestConverter {
 if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
-multiRequestBuilder.addRegionAction(builder.build());
+if (builder.getActionCount() > 0) {
+  multiRequestBuilder.addRegionAction(builder.build());
+}
 
 // Process RowMutations here. We can not process it in the big loop above 
because
 // it will corrupt the sequence order maintained in cells.
@@ -821,7 +823,9 @@ public final class RequestConverter {
 if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
-multiRequestBuilder.addRegionAction(builder.build());
+if (builder.getActionCount() > 0) {
+  multiRequestBuilder.addRegionAction(builder.build());
+}
 
 // Process RowMutations here. We can not process it in the big loop above 
because
 // it will corrupt the sequence order maintained in cells.



[hbase] branch branch-2.3 updated: HBASE-24600 Empty RegionAction added to MultiRequest in case of RowMutations/CheckAndMutate batch

2020-06-24 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new 449f23c  HBASE-24600 Empty RegionAction added to MultiRequest in case 
of RowMutations/CheckAndMutate batch
449f23c is described below

commit 449f23cedc3b34125a7ed06962b829aec959aed6
Author: Toshihiro Suzuki 
AuthorDate: Thu Jun 25 09:52:44 2020 +0900

HBASE-24600 Empty RegionAction added to MultiRequest in case of 
RowMutations/CheckAndMutate batch

Signed-off-by: Guanghao Zhang 
---
 .../org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 9657d6e..e191e81 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -691,7 +691,9 @@ public final class RequestConverter {
 if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
-multiRequestBuilder.addRegionAction(builder.build());
+if (builder.getActionCount() > 0) {
+  multiRequestBuilder.addRegionAction(builder.build());
+}
 
 // Process RowMutations here. We can not process it in the big loop above 
because
 // it will corrupt the sequence order maintained in cells.
@@ -814,7 +816,9 @@ public final class RequestConverter {
 if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
-multiRequestBuilder.addRegionAction(builder.build());
+if (builder.getActionCount() > 0) {
+  multiRequestBuilder.addRegionAction(builder.build());
+}
 
 // Process RowMutations here. We can not process it in the big loop above 
because
 // it will corrupt the sequence order maintained in cells.



[hbase] branch branch-2 updated: HBASE-24600 Empty RegionAction added to MultiRequest in case of RowMutations/CheckAndMutate batch

2020-06-24 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new a701f0e  HBASE-24600 Empty RegionAction added to MultiRequest in case 
of RowMutations/CheckAndMutate batch
a701f0e is described below

commit a701f0ef32acb8095b26d190f4547e0af96b5245
Author: Toshihiro Suzuki 
AuthorDate: Thu Jun 25 09:52:44 2020 +0900

HBASE-24600 Empty RegionAction added to MultiRequest in case of 
RowMutations/CheckAndMutate batch

Signed-off-by: Guanghao Zhang 
---
 .../org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index f41af09..1218aac 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -641,7 +641,9 @@ public final class RequestConverter {
 if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
-multiRequestBuilder.addRegionAction(builder.build());
+if (builder.getActionCount() > 0) {
+  multiRequestBuilder.addRegionAction(builder.build());
+}
 
 // Process RowMutations here. We can not process it in the big loop above 
because
 // it will corrupt the sequence order maintained in cells.
@@ -810,7 +812,9 @@ public final class RequestConverter {
 if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
-multiRequestBuilder.addRegionAction(builder.build());
+if (builder.getActionCount() > 0) {
+  multiRequestBuilder.addRegionAction(builder.build());
+}
 
 // Process RowMutations here. We can not process it in the big loop above 
because
 // it will corrupt the sequence order maintained in cells.



[hbase] branch master updated (f67846e -> 7fee4b5)

2020-06-24 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from f67846e  HBASE-24630 Purge dev javadoc from client bin tarball
 add 7fee4b5  HBASE-24600 Empty RegionAction added to MultiRequest in case 
of RowMutations/CheckAndMutate batch (#1938)

No new revisions were added by this update.

Summary of changes:
 .../org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)



[hbase] branch master updated: HBASE-8458 Support for batch version of checkAndMutate() (addendum) (#1899)

2020-06-14 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new d6e9c31  HBASE-8458 Support for batch version of checkAndMutate() 
(addendum) (#1899)
d6e9c31 is described below

commit d6e9c3164d8e271dcea292bb91230e544a4e6532
Author: Toshihiro Suzuki 
AuthorDate: Sun Jun 14 19:27:00 2020 +0900

HBASE-8458 Support for batch version of checkAndMutate() (addendum) (#1899)
---
 .../apache/hadoop/hbase/shaded/protobuf/RequestConverter.java  | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index a524ed3..46dc984 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -600,8 +600,6 @@ public final class RequestConverter {
 for (Action action : rowMutationsList) {
   builder.clear();
   getRegionActionBuilderWithRegion(builder, regionName);
-  actionBuilder.clear();
-  mutationBuilder.clear();
 
   buildNoDataRegionAction((RowMutations) action.getAction(), cells, 
builder, actionBuilder,
 mutationBuilder);
@@ -620,17 +618,19 @@ public final class RequestConverter {
 for (Action action : checkAndMutates) {
   builder.clear();
   getRegionActionBuilderWithRegion(builder, regionName);
-  actionBuilder.clear();
-  mutationBuilder.clear();
 
   CheckAndMutate cam = (CheckAndMutate) action.getAction();
   builder.setCondition(buildCondition(cam.getRow(), cam.getFamily(), 
cam.getQualifier(),
 cam.getCompareOp(), cam.getValue(), cam.getFilter(), 
cam.getTimeRange()));
 
   if (cam.getAction() instanceof Put) {
+actionBuilder.clear();
+mutationBuilder.clear();
 buildNoDataRegionAction((Put) cam.getAction(), cells, builder, 
actionBuilder,
   mutationBuilder);
   } else if (cam.getAction() instanceof Delete) {
+actionBuilder.clear();
+mutationBuilder.clear();
 buildNoDataRegionAction((Delete) cam.getAction(), cells, builder, 
actionBuilder,
   mutationBuilder);
   } else if (cam.getAction() instanceof RowMutations) {
@@ -693,8 +693,10 @@ public final class RequestConverter {
 throw new DoNotRetryIOException("RowMutations supports only put and 
delete, not " +
   mutation.getClass().getName());
   }
+  mutationBuilder.clear();
   MutationProto mp = ProtobufUtil.toMutationNoData(type, mutation, 
mutationBuilder);
   cells.add(mutation);
+  actionBuilder.clear();
   regionActionBuilder.addAction(actionBuilder.setMutation(mp).build());
 }
   }



[hbase] branch branch-2.1 updated: HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing compacted files and closing the storefiles

2020-06-12 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new d52ffd8  HBASE-24529 hbase.rs.evictblocksonclose is not honored when 
removing compacted files and closing the storefiles
d52ffd8 is described below

commit d52ffd85e86a617f14b1d1a534e12c76609c24f2
Author: Toshihiro Suzuki 
AuthorDate: Fri Jun 12 18:59:03 2020 +0900

HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing 
compacted files and closing the storefiles

Signed-off-by: Anoop Sam John 
---
 .../org/apache/hadoop/hbase/regionserver/HStore.java | 16 ++--
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 9fcb138..007b115 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -933,7 +933,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
   storeEngine.getStoreFileManager().clearCompactedFiles();
   // clear the compacted files
   if (CollectionUtils.isNotEmpty(compactedfiles)) {
-removeCompactedfiles(compactedfiles, true);
+removeCompactedfiles(compactedfiles, true, cacheConf != null ?
+  cacheConf.shouldEvictOnClose() : true);
   }
   if (!result.isEmpty()) {
 // initialize the thread pool for closing store files in parallel.
@@ -2610,7 +2611,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
 lock.readLock().unlock();
   }
   if (CollectionUtils.isNotEmpty(copyCompactedfiles)) {
-removeCompactedfiles(copyCompactedfiles, storeClosing);
+removeCompactedfiles(copyCompactedfiles, storeClosing, true);
   }
 } finally {
   archiveLock.unlock();
@@ -2620,9 +2621,12 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
   /**
* Archives and removes the compacted files
* @param compactedfiles The compacted files in this store that are not 
active in reads
+   * @param storeClosing
+   * @param evictOnClose true if blocks should be evicted from the cache when 
an HFile reader is
+   *   closed, false if not
*/
-  private void removeCompactedfiles(Collection compactedfiles, 
boolean storeClosing)
-  throws IOException {
+  private void removeCompactedfiles(Collection compactedfiles, 
boolean storeClosing,
+boolean evictOnClose) throws IOException {
 final List filesToRemove = new 
ArrayList<>(compactedfiles.size());
 final List storeFileSizes = new ArrayList<>(compactedfiles.size());
 for (final HStoreFile file : compactedfiles) {
@@ -2662,8 +2666,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
 LOG.trace("Closing and archiving the file {}", file);
 // Copy the file size before closing the reader
 final long length = r.length();
-r.close(true);
-file.closeStreamReaders(true);
+r.close(evictOnClose);
+file.closeStreamReaders(evictOnClose);
 // Just close and return
 filesToRemove.add(file);
 // Only add the length if we successfully added the file to 
`filesToRemove`



[hbase] branch branch-2.2 updated: HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing compacted files and closing the storefiles

2020-06-12 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new aebe6df  HBASE-24529 hbase.rs.evictblocksonclose is not honored when 
removing compacted files and closing the storefiles
aebe6df is described below

commit aebe6df3fced79de501b1822c77cc0517f854fbb
Author: Toshihiro Suzuki 
AuthorDate: Fri Jun 12 18:59:03 2020 +0900

HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing 
compacted files and closing the storefiles

Signed-off-by: Anoop Sam John 
---
 .../java/org/apache/hadoop/hbase/regionserver/HStore.java  | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index bb17b43..711771e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -628,7 +628,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
   for (HStoreFile storeFile : results) {
 if (compactedStoreFiles.contains(storeFile.getPath().getName())) {
   LOG.warn("Clearing the compacted storefile {} from this store", 
storeFile);
-  storeFile.getReader().close(true);
+  storeFile.getReader().close(storeFile.getCacheConf() != null ?
+  storeFile.getCacheConf().shouldEvictOnClose() : true);
   filesToRemove.add(storeFile);
 }
   }
@@ -960,7 +961,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
   storeEngine.getStoreFileManager().clearCompactedFiles();
   // clear the compacted files
   if (CollectionUtils.isNotEmpty(compactedfiles)) {
-removeCompactedfiles(compactedfiles);
+removeCompactedfiles(compactedfiles, cacheConf != null ?
+  cacheConf.shouldEvictOnClose() : true);
   }
   if (!result.isEmpty()) {
 // initialize the thread pool for closing store files in parallel.
@@ -2650,7 +2652,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
 lock.readLock().unlock();
   }
   if (CollectionUtils.isNotEmpty(copyCompactedfiles)) {
-removeCompactedfiles(copyCompactedfiles);
+removeCompactedfiles(copyCompactedfiles, true);
   }
 } finally {
   archiveLock.unlock();
@@ -2660,8 +2662,10 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
   /**
* Archives and removes the compacted files
* @param compactedfiles The compacted files in this store that are not 
active in reads
+   * @param evictOnClose true if blocks should be evicted from the cache when 
an HFile reader is
+   *   closed, false if not
*/
-  private void removeCompactedfiles(Collection compactedfiles)
+  private void removeCompactedfiles(Collection compactedfiles, 
boolean evictOnClose)
   throws IOException {
 final List filesToRemove = new 
ArrayList<>(compactedfiles.size());
 final List storeFileSizes = new ArrayList<>(compactedfiles.size());
@@ -2686,7 +2690,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
 LOG.trace("Closing and archiving the file {}", file);
 // Copy the file size before closing the reader
 final long length = r.length();
-r.close(true);
+r.close(evictOnClose);
 // Just close and return
 filesToRemove.add(file);
 // Only add the length if we successfully added the file to 
`filesToRemove`



[hbase] branch branch-2.3 updated: HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing compacted files and closing the storefiles (#1881)

2020-06-12 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new b907648  HBASE-24529 hbase.rs.evictblocksonclose is not honored when 
removing compacted files and closing the storefiles (#1881)
b907648 is described below

commit b90764837268fe16233e7956af5f7f6e2d4ff952
Author: Toshihiro Suzuki 
AuthorDate: Fri Jun 12 18:59:03 2020 +0900

HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing 
compacted files and closing the storefiles (#1881)

Signed-off-by: Anoop Sam John 
---
 .../java/org/apache/hadoop/hbase/regionserver/HStore.java  | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 8acd5e2..b94de5a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -636,7 +636,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   for (HStoreFile storeFile : results) {
 if (compactedStoreFiles.contains(storeFile.getPath().getName())) {
   LOG.warn("Clearing the compacted storefile {} from {}", storeFile, 
this);
-  storeFile.getReader().close(true);
+  storeFile.getReader().close(storeFile.getCacheConf() != null ?
+storeFile.getCacheConf().shouldEvictOnClose() : true);
   filesToRemove.add(storeFile);
 }
   }
@@ -970,7 +971,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   storeEngine.getStoreFileManager().clearCompactedFiles();
   // clear the compacted files
   if (CollectionUtils.isNotEmpty(compactedfiles)) {
-removeCompactedfiles(compactedfiles);
+removeCompactedfiles(compactedfiles, cacheConf != null ?
+  cacheConf.shouldEvictOnClose() : true);
   }
   if (!result.isEmpty()) {
 // initialize the thread pool for closing store files in parallel.
@@ -2719,7 +2721,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
 lock.readLock().unlock();
   }
   if (CollectionUtils.isNotEmpty(copyCompactedfiles)) {
-removeCompactedfiles(copyCompactedfiles);
+removeCompactedfiles(copyCompactedfiles, true);
   }
 } finally {
   archiveLock.unlock();
@@ -2729,8 +2731,10 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   /**
* Archives and removes the compacted files
* @param compactedfiles The compacted files in this store that are not 
active in reads
+   * @param evictOnClose true if blocks should be evicted from the cache when 
an HFile reader is
+   *   closed, false if not
*/
-  private void removeCompactedfiles(Collection compactedfiles)
+  private void removeCompactedfiles(Collection compactedfiles, 
boolean evictOnClose)
   throws IOException {
 final List filesToRemove = new 
ArrayList<>(compactedfiles.size());
 final List storeFileSizes = new ArrayList<>(compactedfiles.size());
@@ -2755,7 +2759,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
 LOG.trace("Closing and archiving the file {}", file);
 // Copy the file size before closing the reader
 final long length = r.length();
-r.close(true);
+r.close(evictOnClose);
 // Just close and return
 filesToRemove.add(file);
 // Only add the length if we successfully added the file to 
`filesToRemove`



[hbase] branch branch-2 updated: HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing compacted files and closing the storefiles (#1881)

2020-06-12 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 043a9e8  HBASE-24529 hbase.rs.evictblocksonclose is not honored when 
removing compacted files and closing the storefiles (#1881)
043a9e8 is described below

commit 043a9e862f59db5e70d208aa51a9c680ea0c3993
Author: Toshihiro Suzuki 
AuthorDate: Fri Jun 12 18:59:03 2020 +0900

HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing 
compacted files and closing the storefiles (#1881)

Signed-off-by: Anoop Sam John 
---
 .../java/org/apache/hadoop/hbase/regionserver/HStore.java  | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 8acd5e2..b94de5a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -636,7 +636,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   for (HStoreFile storeFile : results) {
 if (compactedStoreFiles.contains(storeFile.getPath().getName())) {
   LOG.warn("Clearing the compacted storefile {} from {}", storeFile, 
this);
-  storeFile.getReader().close(true);
+  storeFile.getReader().close(storeFile.getCacheConf() != null ?
+storeFile.getCacheConf().shouldEvictOnClose() : true);
   filesToRemove.add(storeFile);
 }
   }
@@ -970,7 +971,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   storeEngine.getStoreFileManager().clearCompactedFiles();
   // clear the compacted files
   if (CollectionUtils.isNotEmpty(compactedfiles)) {
-removeCompactedfiles(compactedfiles);
+removeCompactedfiles(compactedfiles, cacheConf != null ?
+  cacheConf.shouldEvictOnClose() : true);
   }
   if (!result.isEmpty()) {
 // initialize the thread pool for closing store files in parallel.
@@ -2719,7 +2721,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
 lock.readLock().unlock();
   }
   if (CollectionUtils.isNotEmpty(copyCompactedfiles)) {
-removeCompactedfiles(copyCompactedfiles);
+removeCompactedfiles(copyCompactedfiles, true);
   }
 } finally {
   archiveLock.unlock();
@@ -2729,8 +2731,10 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   /**
* Archives and removes the compacted files
* @param compactedfiles The compacted files in this store that are not 
active in reads
+   * @param evictOnClose true if blocks should be evicted from the cache when 
an HFile reader is
+   *   closed, false if not
*/
-  private void removeCompactedfiles(Collection compactedfiles)
+  private void removeCompactedfiles(Collection compactedfiles, 
boolean evictOnClose)
   throws IOException {
 final List filesToRemove = new 
ArrayList<>(compactedfiles.size());
 final List storeFileSizes = new ArrayList<>(compactedfiles.size());
@@ -2755,7 +2759,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
 LOG.trace("Closing and archiving the file {}", file);
 // Copy the file size before closing the reader
 final long length = r.length();
-r.close(true);
+r.close(evictOnClose);
 // Just close and return
 filesToRemove.add(file);
 // Only add the length if we successfully added the file to 
`filesToRemove`



[hbase] branch master updated: HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing compacted files and closing the storefiles (#1881)

2020-06-12 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 6b7a93d  HBASE-24529 hbase.rs.evictblocksonclose is not honored when 
removing compacted files and closing the storefiles (#1881)
6b7a93d is described below

commit 6b7a93d10ce4abcf140afbdc8c8f7ca670399541
Author: Toshihiro Suzuki 
AuthorDate: Fri Jun 12 18:59:03 2020 +0900

HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing 
compacted files and closing the storefiles (#1881)

Signed-off-by: Anoop Sam John 
---
 .../java/org/apache/hadoop/hbase/regionserver/HStore.java  | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index b35727e..3fbad6b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -622,7 +622,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   for (HStoreFile storeFile : results) {
 if (compactedStoreFiles.contains(storeFile.getPath().getName())) {
   LOG.warn("Clearing the compacted storefile {} from {}", storeFile, 
this);
-  storeFile.getReader().close(true);
+  storeFile.getReader().close(storeFile.getCacheConf() != null ?
+storeFile.getCacheConf().shouldEvictOnClose() : true);
   filesToRemove.add(storeFile);
 }
   }
@@ -956,7 +957,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   storeEngine.getStoreFileManager().clearCompactedFiles();
   // clear the compacted files
   if (CollectionUtils.isNotEmpty(compactedfiles)) {
-removeCompactedfiles(compactedfiles);
+removeCompactedfiles(compactedfiles, cacheConf != null ?
+  cacheConf.shouldEvictOnClose() : true);
   }
   if (!result.isEmpty()) {
 // initialize the thread pool for closing store files in parallel.
@@ -2705,7 +2707,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
 lock.readLock().unlock();
   }
   if (CollectionUtils.isNotEmpty(copyCompactedfiles)) {
-removeCompactedfiles(copyCompactedfiles);
+removeCompactedfiles(copyCompactedfiles, true);
   }
 } finally {
   archiveLock.unlock();
@@ -2715,8 +2717,10 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   /**
* Archives and removes the compacted files
* @param compactedfiles The compacted files in this store that are not 
active in reads
+   * @param evictOnClose true if blocks should be evicted from the cache when 
an HFile reader is
+   *   closed, false if not
*/
-  private void removeCompactedfiles(Collection compactedfiles)
+  private void removeCompactedfiles(Collection compactedfiles, 
boolean evictOnClose)
   throws IOException {
 final List filesToRemove = new 
ArrayList<>(compactedfiles.size());
 final List storeFileSizes = new ArrayList<>(compactedfiles.size());
@@ -2741,7 +2745,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
 LOG.trace("Closing and archiving the file {}", file);
 // Copy the file size before closing the reader
 final long length = r.length();
-r.close(true);
+r.close(evictOnClose);
 // Just close and return
 filesToRemove.add(file);
 // Only add the length if we successfully added the file to 
`filesToRemove`



[hbase] branch master updated: HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing compacted files and closing the storefiles (#1881)

2020-06-12 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 6b7a93d  HBASE-24529 hbase.rs.evictblocksonclose is not honored when 
removing compacted files and closing the storefiles (#1881)
6b7a93d is described below

commit 6b7a93d10ce4abcf140afbdc8c8f7ca670399541
Author: Toshihiro Suzuki 
AuthorDate: Fri Jun 12 18:59:03 2020 +0900

HBASE-24529 hbase.rs.evictblocksonclose is not honored when removing 
compacted files and closing the storefiles (#1881)

Signed-off-by: Anoop Sam John 
---
 .../java/org/apache/hadoop/hbase/regionserver/HStore.java  | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index b35727e..3fbad6b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -622,7 +622,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   for (HStoreFile storeFile : results) {
 if (compactedStoreFiles.contains(storeFile.getPath().getName())) {
   LOG.warn("Clearing the compacted storefile {} from {}", storeFile, 
this);
-  storeFile.getReader().close(true);
+  storeFile.getReader().close(storeFile.getCacheConf() != null ?
+storeFile.getCacheConf().shouldEvictOnClose() : true);
   filesToRemove.add(storeFile);
 }
   }
@@ -956,7 +957,8 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   storeEngine.getStoreFileManager().clearCompactedFiles();
   // clear the compacted files
   if (CollectionUtils.isNotEmpty(compactedfiles)) {
-removeCompactedfiles(compactedfiles);
+removeCompactedfiles(compactedfiles, cacheConf != null ?
+  cacheConf.shouldEvictOnClose() : true);
   }
   if (!result.isEmpty()) {
 // initialize the thread pool for closing store files in parallel.
@@ -2705,7 +2707,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
 lock.readLock().unlock();
   }
   if (CollectionUtils.isNotEmpty(copyCompactedfiles)) {
-removeCompactedfiles(copyCompactedfiles);
+removeCompactedfiles(copyCompactedfiles, true);
   }
 } finally {
   archiveLock.unlock();
@@ -2715,8 +2717,10 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
   /**
* Archives and removes the compacted files
* @param compactedfiles The compacted files in this store that are not 
active in reads
+   * @param evictOnClose true if blocks should be evicted from the cache when 
an HFile reader is
+   *   closed, false if not
*/
-  private void removeCompactedfiles(Collection compactedfiles)
+  private void removeCompactedfiles(Collection compactedfiles, 
boolean evictOnClose)
   throws IOException {
 final List filesToRemove = new 
ArrayList<>(compactedfiles.size());
 final List storeFileSizes = new ArrayList<>(compactedfiles.size());
@@ -2741,7 +2745,7 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation,
 LOG.trace("Closing and archiving the file {}", file);
 // Copy the file size before closing the reader
 final long length = r.length();
-r.close(true);
+r.close(evictOnClose);
 // Just close and return
 filesToRemove.add(file);
 // Only add the length if we successfully added the file to 
`filesToRemove`



[hbase] branch branch-2.1 updated: HBASE-24515 batch Increment/Append fails when retrying the RPC

2020-06-07 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 1f5acc1  HBASE-24515 batch Increment/Append fails when retrying the RPC
1f5acc1 is described below

commit 1f5acc14f853d611056b08baf9ee6253a31a2770
Author: Toshihiro Suzuki 
AuthorDate: Mon Jun 8 09:51:21 2020 +0900

HBASE-24515 batch Increment/Append fails when retrying the RPC

Signed-off-by: Viraj Jasani 
---
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 54 --
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 31 +++--
 .../hadoop/hbase/client/TestFromClientSide.java| 41 
 .../hbase/client/TestIncrementsFromClientSide.java | 41 
 4 files changed, 109 insertions(+), 58 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index d230117..7b328ee 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -909,60 +909,6 @@ public final class ProtobufUtil {
 throw new IOException("Unknown mutation type " + type);
   }
 
-  /**
-   * Convert a protocol buffer Mutate to a Get.
-   * @param proto the protocol buffer Mutate to convert.
-   * @param cellScanner
-   * @return the converted client get.
-   * @throws IOException
-   */
-  public static Get toGet(final MutationProto proto, final CellScanner 
cellScanner)
-  throws IOException {
-MutationType type = proto.getMutateType();
-assert type == MutationType.INCREMENT || type == MutationType.APPEND : 
type.name();
-byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
-Get get = null;
-int cellCount = proto.hasAssociatedCellCount() ? 
proto.getAssociatedCellCount() : 0;
-if (cellCount > 0) {
-  // The proto has metadata only and the data is separate to be found in 
the cellScanner.
-  if (cellScanner == null) {
-throw new DoNotRetryIOException("Cell count of " + cellCount + " but 
no cellScanner: "
-+ TextFormat.shortDebugString(proto));
-  }
-  for (int i = 0; i < cellCount; i++) {
-if (!cellScanner.advance()) {
-  throw new DoNotRetryIOException("Cell count of " + cellCount + " but 
at index " + i
-  + " no cell returned: " + TextFormat.shortDebugString(proto));
-}
-Cell cell = cellScanner.current();
-if (get == null) {
-  get = new Get(CellUtil.cloneRow(cell));
-}
-get.addColumn(CellUtil.cloneFamily(cell), 
CellUtil.cloneQualifier(cell));
-  }
-} else {
-  get = new Get(row);
-  for (ColumnValue column : proto.getColumnValueList()) {
-byte[] family = column.getFamily().toByteArray();
-for (QualifierValue qv : column.getQualifierValueList()) {
-  byte[] qualifier = qv.getQualifier().toByteArray();
-  if (!qv.hasValue()) {
-throw new DoNotRetryIOException("Missing required field: qualifier 
value");
-  }
-  get.addColumn(family, qualifier);
-}
-  }
-}
-if (proto.hasTimeRange()) {
-  TimeRange timeRange = toTimeRange(proto.getTimeRange());
-  get.setTimeRange(timeRange.getMin(), timeRange.getMax());
-}
-for (NameBytesPair attribute : proto.getAttributeList()) {
-  get.setAttribute(attribute.getName(), 
attribute.getValue().toByteArray());
-}
-return get;
-  }
-
   public static ClientProtos.Scan.ReadType toReadType(Scan.ReadType readType) {
 switch (readType) {
   case DEFAULT:
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 2c2fb2e..05b5959 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -682,8 +682,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   r = region.append(append, nonceGroup, nonce);
 } else {
   // convert duplicate append to get
-  List results = region.get(ProtobufUtil.toGet(mutation, 
cellScanner), false,
-  nonceGroup, nonce);
+  List results = region.get(toGet(append), false, nonceGroup, 
nonce);
   r = Result.create(results);
 }
 success = true;
@@ -734,8 +733,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   r = region.increment(increment, nonceGroup, nonce);
  

[hbase] branch branch-2.2 updated: HBASE-24515 batch Increment/Append fails when retrying the RPC

2020-06-07 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new c3ed22b  HBASE-24515 batch Increment/Append fails when retrying the RPC
c3ed22b is described below

commit c3ed22bf74c7ec16fcb63ef7ea09465561c39727
Author: Toshihiro Suzuki 
AuthorDate: Mon Jun 8 09:51:21 2020 +0900

HBASE-24515 batch Increment/Append fails when retrying the RPC

Signed-off-by: Viraj Jasani 
---
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 54 --
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 31 +++--
 .../hadoop/hbase/client/TestFromClientSide.java| 41 
 .../hbase/client/TestIncrementsFromClientSide.java | 41 
 4 files changed, 109 insertions(+), 58 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index c3996c5..97977b4 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -910,60 +910,6 @@ public final class ProtobufUtil {
 throw new IOException("Unknown mutation type " + type);
   }
 
-  /**
-   * Convert a protocol buffer Mutate to a Get.
-   * @param proto the protocol buffer Mutate to convert.
-   * @param cellScanner
-   * @return the converted client get.
-   * @throws IOException
-   */
-  public static Get toGet(final MutationProto proto, final CellScanner 
cellScanner)
-  throws IOException {
-MutationType type = proto.getMutateType();
-assert type == MutationType.INCREMENT || type == MutationType.APPEND : 
type.name();
-byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
-Get get = null;
-int cellCount = proto.hasAssociatedCellCount() ? 
proto.getAssociatedCellCount() : 0;
-if (cellCount > 0) {
-  // The proto has metadata only and the data is separate to be found in 
the cellScanner.
-  if (cellScanner == null) {
-throw new DoNotRetryIOException("Cell count of " + cellCount + " but 
no cellScanner: "
-+ TextFormat.shortDebugString(proto));
-  }
-  for (int i = 0; i < cellCount; i++) {
-if (!cellScanner.advance()) {
-  throw new DoNotRetryIOException("Cell count of " + cellCount + " but 
at index " + i
-  + " no cell returned: " + TextFormat.shortDebugString(proto));
-}
-Cell cell = cellScanner.current();
-if (get == null) {
-  get = new Get(CellUtil.cloneRow(cell));
-}
-get.addColumn(CellUtil.cloneFamily(cell), 
CellUtil.cloneQualifier(cell));
-  }
-} else {
-  get = new Get(row);
-  for (ColumnValue column : proto.getColumnValueList()) {
-byte[] family = column.getFamily().toByteArray();
-for (QualifierValue qv : column.getQualifierValueList()) {
-  byte[] qualifier = qv.getQualifier().toByteArray();
-  if (!qv.hasValue()) {
-throw new DoNotRetryIOException("Missing required field: qualifier 
value");
-  }
-  get.addColumn(family, qualifier);
-}
-  }
-}
-if (proto.hasTimeRange()) {
-  TimeRange timeRange = toTimeRange(proto.getTimeRange());
-  get.setTimeRange(timeRange.getMin(), timeRange.getMax());
-}
-for (NameBytesPair attribute : proto.getAttributeList()) {
-  get.setAttribute(attribute.getName(), 
attribute.getValue().toByteArray());
-}
-return get;
-  }
-
   public static ClientProtos.Scan.ReadType toReadType(Scan.ReadType readType) {
 switch (readType) {
   case DEFAULT:
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 44bee45..9a02831 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -690,8 +690,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   r = region.append(append, nonceGroup, nonce);
 } else {
   // convert duplicate append to get
-  List results = region.get(ProtobufUtil.toGet(mutation, 
cellScanner), false,
-  nonceGroup, nonce);
+  List results = region.get(toGet(append), false, nonceGroup, 
nonce);
   r = Result.create(results);
 }
 success = true;
@@ -742,8 +741,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   r = region.increment(increment, nonceGroup, nonce);
  

[hbase] branch branch-2.3 updated: HBASE-24515 batch Increment/Append fails when retrying the RPC

2020-06-07 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new a30d3e1  HBASE-24515 batch Increment/Append fails when retrying the RPC
a30d3e1 is described below

commit a30d3e1a55fe866a1d81c782a2eff0490225d079
Author: Toshihiro Suzuki 
AuthorDate: Mon Jun 8 09:51:21 2020 +0900

HBASE-24515 batch Increment/Append fails when retrying the RPC

Signed-off-by: Viraj Jasani 
---
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 54 --
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 31 +++--
 .../hadoop/hbase/client/TestFromClientSide.java| 41 
 .../hbase/client/TestIncrementsFromClientSide.java | 41 
 4 files changed, 109 insertions(+), 58 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 2c6..09db446 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -925,60 +925,6 @@ public final class ProtobufUtil {
 throw new IOException("Unknown mutation type " + type);
   }
 
-  /**
-   * Convert a protocol buffer Mutate to a Get.
-   * @param proto the protocol buffer Mutate to convert.
-   * @param cellScanner
-   * @return the converted client get.
-   * @throws IOException
-   */
-  public static Get toGet(final MutationProto proto, final CellScanner 
cellScanner)
-  throws IOException {
-MutationType type = proto.getMutateType();
-assert type == MutationType.INCREMENT || type == MutationType.APPEND : 
type.name();
-byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
-Get get = null;
-int cellCount = proto.hasAssociatedCellCount() ? 
proto.getAssociatedCellCount() : 0;
-if (cellCount > 0) {
-  // The proto has metadata only and the data is separate to be found in 
the cellScanner.
-  if (cellScanner == null) {
-throw new DoNotRetryIOException("Cell count of " + cellCount + " but 
no cellScanner: "
-+ TextFormat.shortDebugString(proto));
-  }
-  for (int i = 0; i < cellCount; i++) {
-if (!cellScanner.advance()) {
-  throw new DoNotRetryIOException("Cell count of " + cellCount + " but 
at index " + i
-  + " no cell returned: " + TextFormat.shortDebugString(proto));
-}
-Cell cell = cellScanner.current();
-if (get == null) {
-  get = new Get(CellUtil.cloneRow(cell));
-}
-get.addColumn(CellUtil.cloneFamily(cell), 
CellUtil.cloneQualifier(cell));
-  }
-} else {
-  get = new Get(row);
-  for (ColumnValue column : proto.getColumnValueList()) {
-byte[] family = column.getFamily().toByteArray();
-for (QualifierValue qv : column.getQualifierValueList()) {
-  byte[] qualifier = qv.getQualifier().toByteArray();
-  if (!qv.hasValue()) {
-throw new DoNotRetryIOException("Missing required field: qualifier 
value");
-  }
-  get.addColumn(family, qualifier);
-}
-  }
-}
-if (proto.hasTimeRange()) {
-  TimeRange timeRange = toTimeRange(proto.getTimeRange());
-  get.setTimeRange(timeRange.getMin(), timeRange.getMax());
-}
-for (NameBytesPair attribute : proto.getAttributeList()) {
-  get.setAttribute(attribute.getName(), 
attribute.getValue().toByteArray());
-}
-return get;
-  }
-
   public static ClientProtos.Scan.ReadType toReadType(Scan.ReadType readType) {
 switch (readType) {
   case DEFAULT:
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 8f57a37..8cab3fe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -720,8 +720,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   r = region.append(append, nonceGroup, nonce);
 } else {
   // convert duplicate append to get
-  List results = region.get(ProtobufUtil.toGet(mutation, 
cellScanner), false,
-  nonceGroup, nonce);
+  List results = region.get(toGet(append), false, nonceGroup, 
nonce);
   r = Result.create(results);
 }
 success = true;
@@ -767,8 +766,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   r = region.increment(increment, nonceGroup, nonce);
  

[hbase] branch branch-2 updated: HBASE-24515 batch Increment/Append fails when retrying the RPC

2020-06-07 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 467efa5  HBASE-24515 batch Increment/Append fails when retrying the RPC
467efa5 is described below

commit 467efa573c9c7402d739df4dc1026e8efe3fbc71
Author: Toshihiro Suzuki 
AuthorDate: Mon Jun 8 09:51:21 2020 +0900

HBASE-24515 batch Increment/Append fails when retrying the RPC

Signed-off-by: Viraj Jasani 
---
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 54 --
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 31 +++--
 .../hadoop/hbase/client/TestFromClientSide.java| 41 
 .../hbase/client/TestIncrementsFromClientSide.java | 41 
 4 files changed, 109 insertions(+), 58 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 2c6..09db446 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -925,60 +925,6 @@ public final class ProtobufUtil {
 throw new IOException("Unknown mutation type " + type);
   }
 
-  /**
-   * Convert a protocol buffer Mutate to a Get.
-   * @param proto the protocol buffer Mutate to convert.
-   * @param cellScanner
-   * @return the converted client get.
-   * @throws IOException
-   */
-  public static Get toGet(final MutationProto proto, final CellScanner 
cellScanner)
-  throws IOException {
-MutationType type = proto.getMutateType();
-assert type == MutationType.INCREMENT || type == MutationType.APPEND : 
type.name();
-byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
-Get get = null;
-int cellCount = proto.hasAssociatedCellCount() ? 
proto.getAssociatedCellCount() : 0;
-if (cellCount > 0) {
-  // The proto has metadata only and the data is separate to be found in 
the cellScanner.
-  if (cellScanner == null) {
-throw new DoNotRetryIOException("Cell count of " + cellCount + " but 
no cellScanner: "
-+ TextFormat.shortDebugString(proto));
-  }
-  for (int i = 0; i < cellCount; i++) {
-if (!cellScanner.advance()) {
-  throw new DoNotRetryIOException("Cell count of " + cellCount + " but 
at index " + i
-  + " no cell returned: " + TextFormat.shortDebugString(proto));
-}
-Cell cell = cellScanner.current();
-if (get == null) {
-  get = new Get(CellUtil.cloneRow(cell));
-}
-get.addColumn(CellUtil.cloneFamily(cell), 
CellUtil.cloneQualifier(cell));
-  }
-} else {
-  get = new Get(row);
-  for (ColumnValue column : proto.getColumnValueList()) {
-byte[] family = column.getFamily().toByteArray();
-for (QualifierValue qv : column.getQualifierValueList()) {
-  byte[] qualifier = qv.getQualifier().toByteArray();
-  if (!qv.hasValue()) {
-throw new DoNotRetryIOException("Missing required field: qualifier 
value");
-  }
-  get.addColumn(family, qualifier);
-}
-  }
-}
-if (proto.hasTimeRange()) {
-  TimeRange timeRange = toTimeRange(proto.getTimeRange());
-  get.setTimeRange(timeRange.getMin(), timeRange.getMax());
-}
-for (NameBytesPair attribute : proto.getAttributeList()) {
-  get.setAttribute(attribute.getName(), 
attribute.getValue().toByteArray());
-}
-return get;
-  }
-
   public static ClientProtos.Scan.ReadType toReadType(Scan.ReadType readType) {
 switch (readType) {
   case DEFAULT:
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 8f57a37..8cab3fe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -720,8 +720,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   r = region.append(append, nonceGroup, nonce);
 } else {
   // convert duplicate append to get
-  List results = region.get(ProtobufUtil.toGet(mutation, 
cellScanner), false,
-  nonceGroup, nonce);
+  List results = region.get(toGet(append), false, nonceGroup, 
nonce);
   r = Result.create(results);
 }
 success = true;
@@ -767,8 +766,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   r = region.increment(increment, nonceGroup, nonce);
  

[hbase] branch master updated: HBASE-24515 batch Increment/Append fails when retrying the RPC (#1864)

2020-06-07 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 022dd96  HBASE-24515 batch Increment/Append fails when retrying the 
RPC (#1864)
022dd96 is described below

commit 022dd9687f66bd37b0f597393f018dcae7679dfc
Author: Toshihiro Suzuki 
AuthorDate: Mon Jun 8 09:51:21 2020 +0900

HBASE-24515 batch Increment/Append fails when retrying the RPC (#1864)

Signed-off-by: Viraj Jasani 
---
 .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 54 --
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 31 +--
 .../hadoop/hbase/client/TestFromClientSide.java| 64 +++---
 .../hbase/client/TestIncrementsFromClientSide.java | 56 +--
 4 files changed, 136 insertions(+), 69 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index fcdd18b..4de8778 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -970,60 +970,6 @@ public final class ProtobufUtil {
 throw new IOException("Unknown mutation type " + type);
   }
 
-  /**
-   * Convert a protocol buffer Mutate to a Get.
-   * @param proto the protocol buffer Mutate to convert.
-   * @param cellScanner
-   * @return the converted client get.
-   * @throws IOException
-   */
-  public static Get toGet(final MutationProto proto, final CellScanner 
cellScanner)
-  throws IOException {
-MutationType type = proto.getMutateType();
-assert type == MutationType.INCREMENT || type == MutationType.APPEND : 
type.name();
-byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
-Get get = null;
-int cellCount = proto.hasAssociatedCellCount() ? 
proto.getAssociatedCellCount() : 0;
-if (cellCount > 0) {
-  // The proto has metadata only and the data is separate to be found in 
the cellScanner.
-  if (cellScanner == null) {
-throw new DoNotRetryIOException("Cell count of " + cellCount + " but 
no cellScanner: "
-+ TextFormat.shortDebugString(proto));
-  }
-  for (int i = 0; i < cellCount; i++) {
-if (!cellScanner.advance()) {
-  throw new DoNotRetryIOException("Cell count of " + cellCount + " but 
at index " + i
-  + " no cell returned: " + TextFormat.shortDebugString(proto));
-}
-Cell cell = cellScanner.current();
-if (get == null) {
-  get = new Get(CellUtil.cloneRow(cell));
-}
-get.addColumn(CellUtil.cloneFamily(cell), 
CellUtil.cloneQualifier(cell));
-  }
-} else {
-  get = new Get(row);
-  for (ColumnValue column : proto.getColumnValueList()) {
-byte[] family = column.getFamily().toByteArray();
-for (QualifierValue qv : column.getQualifierValueList()) {
-  byte[] qualifier = qv.getQualifier().toByteArray();
-  if (!qv.hasValue()) {
-throw new DoNotRetryIOException("Missing required field: qualifier 
value");
-  }
-  get.addColumn(family, qualifier);
-}
-  }
-}
-if (proto.hasTimeRange()) {
-  TimeRange timeRange = toTimeRange(proto.getTimeRange());
-  get.setTimeRange(timeRange.getMin(), timeRange.getMax());
-}
-for (NameBytesPair attribute : proto.getAttributeList()) {
-  get.setAttribute(attribute.getName(), 
attribute.getValue().toByteArray());
-}
-return get;
-  }
-
   public static ClientProtos.Scan.ReadType toReadType(Scan.ReadType readType) {
 switch (readType) {
   case DEFAULT:
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index cdb34fd..1890a4d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -724,8 +724,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   r = region.append(append, nonceGroup, nonce);
 } else {
   // convert duplicate append to get
-  List results = region.get(ProtobufUtil.toGet(mutation, 
cellScanner), false,
-  nonceGroup, nonce);
+  List results = region.get(toGet(append), false, nonceGroup, 
nonce);
   r = Result.create(results);
 }
 success = true;
@@ -771,8 +770,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   r = region.increment(increment, nonceGroup

[hbase] branch branch-2.3 updated: HBASE-24030 Add necessary validations to HRegion.checkAndMutate() and HRegion.checkAndRowMutate() (#1315)

2020-03-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.3 by this push:
 new f5e9b3b  HBASE-24030 Add necessary validations to 
HRegion.checkAndMutate() and HRegion.checkAndRowMutate() (#1315)
f5e9b3b is described below

commit f5e9b3b6f10f7c6a23aa4cce67613d6e79a0fa5e
Author: Toshihiro Suzuki 
AuthorDate: Sun Mar 22 11:55:52 2020 +0900

HBASE-24030 Add necessary validations to HRegion.checkAndMutate() and 
HRegion.checkAndRowMutate() (#1315)

Signed-off-by: Viraj Jasani 
Signed-off-by: Jan Hentschel 
---
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 16 -
 .../hadoop/hbase/regionserver/TestHRegion.java | 73 ++
 2 files changed, 86 insertions(+), 3 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index c58a591..74b3a91 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -4180,7 +4181,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   @Override
   public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, 
CompareOperator op,
 ByteArrayComparable comparator, TimeRange timeRange, Mutation mutation) 
throws IOException {
-checkMutationType(mutation, row);
 return doCheckAndRowMutate(row, family, qualifier, op, comparator, null, 
timeRange, null,
   mutation);
   }
@@ -4216,6 +4216,12 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 // need these commented out checks.
 // if (rowMutations == null && mutation == null) throw new 
DoNotRetryIOException("Both null");
 // if (rowMutations != null && mutation != null) throw new 
DoNotRetryIOException("Both set");
+if (mutation != null) {
+  checkMutationType(mutation);
+  checkRow(mutation, row);
+} else {
+  checkRow(rowMutations, row);
+}
 checkReadOnly();
 // TODO, add check for value length also move this check to the client
 checkResources();
@@ -4331,13 +4337,17 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
   }
 
-  private void checkMutationType(final Mutation mutation, final byte [] row)
+  private void checkMutationType(final Mutation mutation)
   throws DoNotRetryIOException {
 boolean isPut = mutation instanceof Put;
 if (!isPut && !(mutation instanceof Delete)) {
   throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action must be 
Put or Delete");
 }
-if (!Bytes.equals(row, mutation.getRow())) {
+  }
+
+  private void checkRow(final Row action, final byte[] row)
+throws DoNotRetryIOException {
+if (!Bytes.equals(row, action.getRow())) {
   throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action's getRow 
must match");
 }
   }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index f58b82a..69da92d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -2272,6 +2273,78 @@ public class TestHRegion {
 assertTrue(region.get(new Get(row).addColumn(FAMILY, 
Bytes.toBytes("A"))).isEmpty());
   }
 
+  @Test
+  public void testCheckAndMutate_wrongMutationType() throws Throwable {
+// Setting up region
+this.region = initHRegion(tableName, method, CONF, fam1);
+
+try {
+  region.checkAndMutate(row, fam1, qual1, CompareOperator.EQUAL, new 
BinaryComparator(value1),
+new Increment(row).addColumn(fam1, q

[hbase] branch branch-2 updated: HBASE-24030 Add necessary validations to HRegion.checkAndMutate() and HRegion.checkAndRowMutate() (#1315)

2020-03-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 5104aa8  HBASE-24030 Add necessary validations to 
HRegion.checkAndMutate() and HRegion.checkAndRowMutate() (#1315)
5104aa8 is described below

commit 5104aa80fa5ee5b7a7a75945b0c8f1db648a2912
Author: Toshihiro Suzuki 
AuthorDate: Sun Mar 22 11:55:52 2020 +0900

HBASE-24030 Add necessary validations to HRegion.checkAndMutate() and 
HRegion.checkAndRowMutate() (#1315)

Signed-off-by: Viraj Jasani 
Signed-off-by: Jan Hentschel 
---
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 16 -
 .../hadoop/hbase/regionserver/TestHRegion.java | 73 ++
 2 files changed, 86 insertions(+), 3 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index c58a591..74b3a91 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -4180,7 +4181,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   @Override
   public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, 
CompareOperator op,
 ByteArrayComparable comparator, TimeRange timeRange, Mutation mutation) 
throws IOException {
-checkMutationType(mutation, row);
 return doCheckAndRowMutate(row, family, qualifier, op, comparator, null, 
timeRange, null,
   mutation);
   }
@@ -4216,6 +4216,12 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 // need these commented out checks.
 // if (rowMutations == null && mutation == null) throw new 
DoNotRetryIOException("Both null");
 // if (rowMutations != null && mutation != null) throw new 
DoNotRetryIOException("Both set");
+if (mutation != null) {
+  checkMutationType(mutation);
+  checkRow(mutation, row);
+} else {
+  checkRow(rowMutations, row);
+}
 checkReadOnly();
 // TODO, add check for value length also move this check to the client
 checkResources();
@@ -4331,13 +4337,17 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
   }
 
-  private void checkMutationType(final Mutation mutation, final byte [] row)
+  private void checkMutationType(final Mutation mutation)
   throws DoNotRetryIOException {
 boolean isPut = mutation instanceof Put;
 if (!isPut && !(mutation instanceof Delete)) {
   throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action must be 
Put or Delete");
 }
-if (!Bytes.equals(row, mutation.getRow())) {
+  }
+
+  private void checkRow(final Row action, final byte[] row)
+throws DoNotRetryIOException {
+if (!Bytes.equals(row, action.getRow())) {
   throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action's getRow 
must match");
 }
   }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index f58b82a..69da92d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -2272,6 +2273,78 @@ public class TestHRegion {
 assertTrue(region.get(new Get(row).addColumn(FAMILY, 
Bytes.toBytes("A"))).isEmpty());
   }
 
+  @Test
+  public void testCheckAndMutate_wrongMutationType() throws Throwable {
+// Setting up region
+this.region = initHRegion(tableName, method, CONF, fam1);
+
+try {
+  region.checkAndMutate(row, fam1, qual1, CompareOperator.EQUAL, new 
BinaryComparator(value1),
+new Increment(row).addColumn(fam1, qual1, 1));
+  fail(&q

[hbase] branch master updated: HBASE-24030 Add necessary validations to HRegion.checkAndMutate() and HRegion.checkAndRowMutate() (#1315)

2020-03-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 9b4e75a  HBASE-24030 Add necessary validations to 
HRegion.checkAndMutate() and HRegion.checkAndRowMutate() (#1315)
9b4e75a is described below

commit 9b4e75a7f6b876e00c6360dd69adb13d3200b1f2
Author: Toshihiro Suzuki 
AuthorDate: Sun Mar 22 11:55:52 2020 +0900

HBASE-24030 Add necessary validations to HRegion.checkAndMutate() and 
HRegion.checkAndRowMutate() (#1315)

Signed-off-by: Viraj Jasani 
Signed-off-by: Jan Hentschel 
---
 .../apache/hadoop/hbase/regionserver/HRegion.java  | 16 -
 .../hadoop/hbase/regionserver/TestHRegion.java | 73 ++
 2 files changed, 86 insertions(+), 3 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index faa3a06..d13cc62 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -115,6 +115,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -4207,7 +4208,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   @Override
   public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, 
CompareOperator op,
 ByteArrayComparable comparator, TimeRange timeRange, Mutation mutation) 
throws IOException {
-checkMutationType(mutation, row);
 return doCheckAndRowMutate(row, family, qualifier, op, comparator, null, 
timeRange, null,
   mutation);
   }
@@ -4243,6 +4243,12 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 // need these commented out checks.
 // if (rowMutations == null && mutation == null) throw new 
DoNotRetryIOException("Both null");
 // if (rowMutations != null && mutation != null) throw new 
DoNotRetryIOException("Both set");
+if (mutation != null) {
+  checkMutationType(mutation);
+  checkRow(mutation, row);
+} else {
+  checkRow(rowMutations, row);
+}
 checkReadOnly();
 // TODO, add check for value length also move this check to the client
 checkResources();
@@ -4358,13 +4364,17 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
   }
 
-  private void checkMutationType(final Mutation mutation, final byte [] row)
+  private void checkMutationType(final Mutation mutation)
   throws DoNotRetryIOException {
 boolean isPut = mutation instanceof Put;
 if (!isPut && !(mutation instanceof Delete)) {
   throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action must be 
Put or Delete");
 }
-if (!Bytes.equals(row, mutation.getRow())) {
+  }
+
+  private void checkRow(final Row action, final byte[] row)
+throws DoNotRetryIOException {
+if (!Bytes.equals(row, action.getRow())) {
   throw new org.apache.hadoop.hbase.DoNotRetryIOException("Action's getRow 
must match");
 }
   }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 3720773..423208e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -2268,6 +2269,78 @@ public class TestHRegion {
 assertTrue(region.get(new Get(row).addColumn(FAMILY, 
Bytes.toBytes("A"))).isEmpty());
   }
 
+  @Test
+  public void testCheckAndMutate_wrongMutationType() throws Throwable {
+// Setting up region
+this.region = initHRegion(tableName, method, CONF, fam1);
+
+try {
+  region.checkAndMutate(row, fam1, qual1, CompareOperator.EQUAL, new 
BinaryComparator(value1),
+new Increment(row).addColumn(fam1, q

[hbase] branch master updated: HBASE-24031 TestHRegion.testCheckAndMutate_WithFilters is flaky (#1316)

2020-03-21 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new b53fa3c  HBASE-24031 TestHRegion.testCheckAndMutate_WithFilters is 
flaky (#1316)
b53fa3c is described below

commit b53fa3c51fe9e27d0ad9f5232ff249f8832106f8
Author: Toshihiro Suzuki 
AuthorDate: Sun Mar 22 05:04:16 2020 +0900

HBASE-24031 TestHRegion.testCheckAndMutate_WithFilters is flaky (#1316)

Signed-off-by: Viraj Jasani 
---
 .../test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 05962e5..3720773 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -2210,12 +2210,12 @@ public class TestHRegion {
   ),
   new RowMutations(row)
 .add((Mutation) new Put(row)
-  .addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d")))
+  .addColumn(FAMILY, Bytes.toBytes("E"), Bytes.toBytes("e")))
 .add((Mutation) new Delete(row).addColumns(FAMILY, 
Bytes.toBytes("A";
 assertTrue(ok);
 
-result = region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("D")));
-assertEquals("d", Bytes.toString(result.getValue(FAMILY, 
Bytes.toBytes("D";
+result = region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("E")));
+assertEquals("e", Bytes.toString(result.getValue(FAMILY, 
Bytes.toBytes("E";
 
 assertTrue(region.get(new Get(row).addColumn(FAMILY, 
Bytes.toBytes("A"))).isEmpty());
   }



[hbase] branch branch-2 updated: HBASE-23146 Support CheckAndMutate with multiple conditions (#1209)

2020-03-03 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new c3edceb  HBASE-23146 Support CheckAndMutate with multiple conditions 
(#1209)
c3edceb is described below

commit c3edceb6aef1c5a0bae6eeafeb74900895963a88
Author: Toshihiro Suzuki 
AuthorDate: Wed Mar 4 15:08:31 2020 +0900

HBASE-23146 Support CheckAndMutate with multiple conditions (#1209)

Signed-off-by: Duo Zhang 
---
 .../org/apache/hadoop/hbase/client/AsyncTable.java |  55 ++
 .../apache/hadoop/hbase/client/AsyncTableImpl.java |  31 
 .../org/apache/hadoop/hbase/client/HTable.java | 123 ++---
 .../hadoop/hbase/client/RawAsyncTableImpl.java |  87 +++--
 .../java/org/apache/hadoop/hbase/client/Table.java |  48 +
 .../hbase/shaded/protobuf/RequestConverter.java|  89 -
 .../src/main/protobuf/Client.proto |   9 +-
 hbase-protocol/src/main/protobuf/Client.proto  |   9 +-
 .../hadoop/hbase/rest/client/RemoteHTable.java |  34 ++--
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 137 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  79 ++--
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 111 +++
 .../apache/hadoop/hbase/regionserver/Region.java   |  53 ++
 .../hbase/regionserver/RegionCoprocessorHost.java  | 151 ++-
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 203 +
 .../hadoop/hbase/client/TestCheckAndMutate.java| 190 +++
 .../hbase/client/TestMalformedCellFromClient.java  |   5 +-
 .../hbase/coprocessor/SimpleRegionObserver.java|  97 --
 .../coprocessor/TestRegionObserverInterface.java   |  51 +-
 .../hadoop/hbase/regionserver/RegionAsTable.java   |   6 +
 .../hadoop/hbase/regionserver/TestHRegion.java | 123 +
 .../hadoop/hbase/thrift2/client/ThriftTable.java   |   6 +
 22 files changed, 1493 insertions(+), 204 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
index bfcc187..e10f1f82 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
@@ -29,6 +29,7 @@ import java.util.function.Function;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -290,6 +291,60 @@ public interface AsyncTable {
   }
 
   /**
+   * Atomically checks if a row matches the specified filter. If it does, it 
adds the
+   * Put/Delete/RowMutations.
+   * 
+   * Use the returned {@link CheckAndMutateWithFilterBuilder} to construct 
your request and then
+   * execute it. This is a fluent style API, the code is like:
+   *
+   * 
+   * 
+   * table.checkAndMutate(row, filter).thenPut(put)
+   * .thenAccept(succ -> {
+   *   if (succ) {
+   * System.out.println("Check and put succeeded");
+   *   } else {
+   * System.out.println("Check and put failed");
+   *   }
+   * });
+   * 
+   * 
+   */
+  CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter);
+
+  /**
+   * A helper class for sending checkAndMutate request with a filter.
+   */
+  interface CheckAndMutateWithFilterBuilder {
+
+/**
+ * @param timeRange time range to check.
+ */
+CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange);
+
+/**
+ * @param put data to put if check succeeds
+ * @return {@code true} if the new put was executed, {@code false} 
otherwise. The return value
+ * will be wrapped by a {@link CompletableFuture}.
+ */
+CompletableFuture thenPut(Put put);
+
+/**
+ * @param delete data to delete if check succeeds
+ * @return {@code true} if the new delete was executed, {@code false} 
otherwise. The return
+ * value will be wrapped by a {@link CompletableFuture}.
+ */
+CompletableFuture thenDelete(Delete delete);
+
+/**
+ * @param mutation mutations to perform if check succeeds
+ * @return true if the new mutation was executed, false otherwise. The 
return value will be
+ * wrapped by a {@link CompletableFuture}.
+ */
+CompletableFuture thenMutate(RowMutations mutation);
+  }
+
+  /**
* Performs multiple mutations atomically on a single row. Currently {@link 
Put} and
* {@link Delete} are supported.
* @param mutation object that specifies the set of mutations to perform 
atomica

[hbase] branch master updated: HBASE-23146 Support CheckAndMutate with multiple conditions (#1114)

2020-02-25 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new ecbed33  HBASE-23146 Support CheckAndMutate with multiple conditions 
(#1114)
ecbed33 is described below

commit ecbed33092ace031ad260026c7676e6c5886c267
Author: Toshihiro Suzuki 
AuthorDate: Wed Feb 26 08:09:04 2020 +0900

HBASE-23146 Support CheckAndMutate with multiple conditions (#1114)

Signed-off-by: Duo Zhang 
---
 .../org/apache/hadoop/hbase/client/AsyncTable.java |  55 ++
 .../apache/hadoop/hbase/client/AsyncTableImpl.java |  31 
 .../hadoop/hbase/client/RawAsyncTableImpl.java |  87 +++--
 .../java/org/apache/hadoop/hbase/client/Table.java |  48 +
 .../hadoop/hbase/client/TableOverAsyncTable.java   | 105 ++-
 .../hbase/shaded/protobuf/RequestConverter.java|  89 -
 .../src/main/protobuf/Client.proto |   9 +-
 hbase-protocol/src/main/protobuf/Client.proto  |   9 +-
 .../hadoop/hbase/rest/client/RemoteHTable.java |   7 +
 .../hadoop/hbase/coprocessor/RegionObserver.java   | 137 +-
 .../apache/hadoop/hbase/regionserver/HRegion.java  |  81 +---
 .../hadoop/hbase/regionserver/RSRpcServices.java   | 111 +++
 .../apache/hadoop/hbase/regionserver/Region.java   |  53 ++
 .../hbase/regionserver/RegionCoprocessorHost.java  | 151 ++-
 .../hadoop/hbase/client/DummyAsyncTable.java   |   6 +
 .../apache/hadoop/hbase/client/TestAsyncTable.java | 203 +
 .../hadoop/hbase/client/TestCheckAndMutate.java| 190 +++
 .../hbase/client/TestMalformedCellFromClient.java  |   5 +-
 .../hbase/coprocessor/SimpleRegionObserver.java|  97 --
 .../coprocessor/TestRegionObserverInterface.java   |  51 +-
 .../hadoop/hbase/regionserver/RegionAsTable.java   |   6 +
 .../hadoop/hbase/regionserver/TestHRegion.java | 123 +
 .../hadoop/hbase/thrift2/client/ThriftTable.java   |   6 +
 23 files changed, 1454 insertions(+), 206 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
index bfcc187..e10f1f82 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
@@ -29,6 +29,7 @@ import java.util.function.Function;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -290,6 +291,60 @@ public interface AsyncTable {
   }
 
   /**
+   * Atomically checks if a row matches the specified filter. If it does, it 
adds the
+   * Put/Delete/RowMutations.
+   * 
+   * Use the returned {@link CheckAndMutateWithFilterBuilder} to construct 
your request and then
+   * execute it. This is a fluent style API, the code is like:
+   *
+   * 
+   * 
+   * table.checkAndMutate(row, filter).thenPut(put)
+   * .thenAccept(succ -> {
+   *   if (succ) {
+   * System.out.println("Check and put succeeded");
+   *   } else {
+   * System.out.println("Check and put failed");
+   *   }
+   * });
+   * 
+   * 
+   */
+  CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter);
+
+  /**
+   * A helper class for sending checkAndMutate request with a filter.
+   */
+  interface CheckAndMutateWithFilterBuilder {
+
+/**
+ * @param timeRange time range to check.
+ */
+CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange);
+
+/**
+ * @param put data to put if check succeeds
+ * @return {@code true} if the new put was executed, {@code false} 
otherwise. The return value
+ * will be wrapped by a {@link CompletableFuture}.
+ */
+CompletableFuture thenPut(Put put);
+
+/**
+ * @param delete data to delete if check succeeds
+ * @return {@code true} if the new delete was executed, {@code false} 
otherwise. The return
+ * value will be wrapped by a {@link CompletableFuture}.
+ */
+CompletableFuture thenDelete(Delete delete);
+
+/**
+ * @param mutation mutations to perform if check succeeds
+ * @return true if the new mutation was executed, false otherwise. The 
return value will be
+ * wrapped by a {@link CompletableFuture}.
+ */
+CompletableFuture thenMutate(RowMutations mutation);
+  }
+
+  /**
* Performs multiple mutations atomically on a single row. Currently {@link 
Put} and
* {@link Delete} are supported.
* @param mutation object

[hbase] branch branch-2.1 updated: HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)

2020-01-11 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 5598d26  HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)
5598d26 is described below

commit 5598d26a4c8aa9db4989eebcc314fd5285228672
Author: Toshihiro Suzuki 
AuthorDate: Sat Jan 11 21:06:07 2020 +0900

HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)

Signed-off-by: stack 
---
 bin/hbase   |  5 -
 conf/log4j-hbtop.properties |  2 +-
 .../java/org/apache/hadoop/hbase/hbtop/RecordFilter.java|  3 +++
 .../hadoop/hbase/hbtop/mode/RequestCountPerSecond.java  | 13 +++--
 .../hadoop/hbase/hbtop/screen/field/FieldScreenView.java|  4 ++--
 .../hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java   |  4 ++--
 .../hadoop/hbase/hbtop/{RecordTest.java => TestRecord.java} |  4 ++--
 .../hbtop/{RecordFilterTest.java => TestRecordFilter.java}  |  4 ++--
 .../test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java  |  4 ++--
 .../field/{FieldValueTest.java => TestFieldValue.java}  |  4 ++--
 .../hbtop/mode/{ModeTestBase.java => TestModeBase.java} |  2 +-
 .../mode/{NamespaceModeTest.java => TestNamespaceMode.java} |  4 ++--
 .../hbtop/mode/{RegionModeTest.java => TestRegionMode.java} |  4 ++--
 ...{RegionServerModeTest.java => TestRegionServerMode.java} |  4 ++--
 ...untPerSecondTest.java => TestRequestCountPerSecond.java} |  4 ++--
 .../hbtop/mode/{TableModeTest.java => TestTableMode.java}   |  4 ++--
 ...reenPresenterTest.java => TestFieldScreenPresenter.java} |  4 ++--
 ...creenPresenterTest.java => TestHelpScreenPresenter.java} |  4 ++--
 ...creenPresenterTest.java => TestModeScreenPresenter.java} |  4 ++--
 ...rTest.java => TestFilterDisplayModeScreenPresenter.java} |  4 ++--
 ...PresenterTest.java => TestInputModeScreenPresenter.java} |  4 ++--
 ...esenterTest.java => TestMessageModeScreenPresenter.java} |  4 ++--
 .../hbtop/screen/top/{PagingTest.java => TestPaging.java}   |  4 ++--
 .../{TopScreenModelTest.java => TestTopScreenModel.java}|  4 ++--
 ...ScreenPresenterTest.java => TestTopScreenPresenter.java} |  4 ++--
 .../terminal/{CursorTest.java => impl/TestCursor.java}  | 10 ++
 .../terminal/{KeyPressTest.java => impl/TestKeyPress.java}  | 10 ++
 .../TestTerminalPrinter.java}   | 11 +++
 28 files changed, 75 insertions(+), 61 deletions(-)

diff --git a/bin/hbase b/bin/hbase
index d16d3f4..b962fc2 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -637,7 +637,10 @@ elif [ "$COMMAND" = "hbtop" ] ; then
 done
   fi
 
-  HBASE_OPTS="${HBASE_OPTS} 
-Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
+  if [ -f "${HBASE_HOME}/conf/log4j-hbtop.properties" ] ; then
+HBASE_HBTOP_OPTS="${HBASE_HBTOP_OPTS} 
-Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
+  fi
+  HBASE_OPTS="${HBASE_OPTS} ${HBASE_HBTOP_OPTS}"
 else
   CLASS=$COMMAND
 fi
diff --git a/conf/log4j-hbtop.properties b/conf/log4j-hbtop.properties
index 831ee18..4d68d79 100644
--- a/conf/log4j-hbtop.properties
+++ b/conf/log4j-hbtop.properties
@@ -24,4 +24,4 @@ log4j.appender.console.layout=org.apache.log4j.PatternLayout
 log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: 
%m%n
 
 # ZooKeeper will still put stuff at WARN
-log4j.logger.org.apache.zookeeper=ERROR
\ No newline at end of file
+log4j.logger.org.apache.zookeeper=ERROR
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
index aaef965..dfe01d7 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
@@ -55,6 +55,9 @@ public final class RecordFilter {
 return parse(filterString, Arrays.asList(Field.values()), ignoreCase);
   }
 
+  /*
+   * Parse a filter string and build a RecordFilter instance.
+   */
   public static RecordFilter parse(String filterString, List fields, 
boolean ignoreCase) {
 int index = 0;
 
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
index 508cf82..d546070 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
@@ -41,13 +41,14 @@ public class RequestCountPerSecond {
   previousFilteredReadRequestCount = fil

[hbase] branch branch-2.2 updated: HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)

2020-01-11 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 279c0fb  HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)
279c0fb is described below

commit 279c0fba0030260f127c87a6fd0f402f12acd51d
Author: Toshihiro Suzuki 
AuthorDate: Sat Jan 11 21:06:07 2020 +0900

HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)

Signed-off-by: stack 
---
 bin/hbase   |  5 -
 conf/log4j-hbtop.properties |  2 +-
 .../java/org/apache/hadoop/hbase/hbtop/RecordFilter.java|  3 +++
 .../hadoop/hbase/hbtop/mode/RequestCountPerSecond.java  | 13 +++--
 .../hadoop/hbase/hbtop/screen/field/FieldScreenView.java|  4 ++--
 .../hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java   |  4 ++--
 .../hadoop/hbase/hbtop/{RecordTest.java => TestRecord.java} |  4 ++--
 .../hbtop/{RecordFilterTest.java => TestRecordFilter.java}  |  4 ++--
 .../test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java  |  4 ++--
 .../field/{FieldValueTest.java => TestFieldValue.java}  |  4 ++--
 .../hbtop/mode/{ModeTestBase.java => TestModeBase.java} |  2 +-
 .../mode/{NamespaceModeTest.java => TestNamespaceMode.java} |  4 ++--
 .../hbtop/mode/{RegionModeTest.java => TestRegionMode.java} |  4 ++--
 ...{RegionServerModeTest.java => TestRegionServerMode.java} |  4 ++--
 ...untPerSecondTest.java => TestRequestCountPerSecond.java} |  4 ++--
 .../hbtop/mode/{TableModeTest.java => TestTableMode.java}   |  4 ++--
 ...reenPresenterTest.java => TestFieldScreenPresenter.java} |  4 ++--
 ...creenPresenterTest.java => TestHelpScreenPresenter.java} |  4 ++--
 ...creenPresenterTest.java => TestModeScreenPresenter.java} |  4 ++--
 ...rTest.java => TestFilterDisplayModeScreenPresenter.java} |  4 ++--
 ...PresenterTest.java => TestInputModeScreenPresenter.java} |  4 ++--
 ...esenterTest.java => TestMessageModeScreenPresenter.java} |  4 ++--
 .../hbtop/screen/top/{PagingTest.java => TestPaging.java}   |  4 ++--
 .../{TopScreenModelTest.java => TestTopScreenModel.java}|  4 ++--
 ...ScreenPresenterTest.java => TestTopScreenPresenter.java} |  4 ++--
 .../terminal/{CursorTest.java => impl/TestCursor.java}  | 10 ++
 .../terminal/{KeyPressTest.java => impl/TestKeyPress.java}  | 10 ++
 .../TestTerminalPrinter.java}   | 11 +++
 28 files changed, 75 insertions(+), 61 deletions(-)

diff --git a/bin/hbase b/bin/hbase
index bdd2b11..3a297dd 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -637,7 +637,10 @@ elif [ "$COMMAND" = "hbtop" ] ; then
 done
   fi
 
-  HBASE_OPTS="${HBASE_OPTS} 
-Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
+  if [ -f "${HBASE_HOME}/conf/log4j-hbtop.properties" ] ; then
+HBASE_HBTOP_OPTS="${HBASE_HBTOP_OPTS} 
-Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
+  fi
+  HBASE_OPTS="${HBASE_OPTS} ${HBASE_HBTOP_OPTS}"
 else
   CLASS=$COMMAND
 fi
diff --git a/conf/log4j-hbtop.properties b/conf/log4j-hbtop.properties
index 831ee18..4d68d79 100644
--- a/conf/log4j-hbtop.properties
+++ b/conf/log4j-hbtop.properties
@@ -24,4 +24,4 @@ log4j.appender.console.layout=org.apache.log4j.PatternLayout
 log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: 
%m%n
 
 # ZooKeeper will still put stuff at WARN
-log4j.logger.org.apache.zookeeper=ERROR
\ No newline at end of file
+log4j.logger.org.apache.zookeeper=ERROR
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
index aaef965..dfe01d7 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
@@ -55,6 +55,9 @@ public final class RecordFilter {
 return parse(filterString, Arrays.asList(Field.values()), ignoreCase);
   }
 
+  /*
+   * Parse a filter string and build a RecordFilter instance.
+   */
   public static RecordFilter parse(String filterString, List fields, 
boolean ignoreCase) {
 int index = 0;
 
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
index 508cf82..d546070 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
@@ -41,13 +41,14 @@ public class RequestCountPerSecond {
   previousFilteredReadRequestCount = fil

[hbase] branch branch-2 updated: HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)

2020-01-11 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 1ad28a6  HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)
1ad28a6 is described below

commit 1ad28a6f403b2a0d76d4bb5f6325fcbde4bc2db8
Author: Toshihiro Suzuki 
AuthorDate: Sat Jan 11 21:06:07 2020 +0900

HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)

Signed-off-by: stack 
---
 bin/hbase   |  5 -
 conf/log4j-hbtop.properties |  2 +-
 .../java/org/apache/hadoop/hbase/hbtop/RecordFilter.java|  3 +++
 .../hadoop/hbase/hbtop/mode/RequestCountPerSecond.java  | 13 +++--
 .../hadoop/hbase/hbtop/screen/field/FieldScreenView.java|  4 ++--
 .../hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java   |  4 ++--
 .../hadoop/hbase/hbtop/{RecordTest.java => TestRecord.java} |  4 ++--
 .../hbtop/{RecordFilterTest.java => TestRecordFilter.java}  |  4 ++--
 .../test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java  |  4 ++--
 .../field/{FieldValueTest.java => TestFieldValue.java}  |  4 ++--
 .../hbtop/mode/{ClientModeTest.java => TestClientMode.java} |  4 ++--
 .../hbtop/mode/{ModeTestBase.java => TestModeBase.java} |  2 +-
 .../mode/{NamespaceModeTest.java => TestNamespaceMode.java} |  4 ++--
 .../hbtop/mode/{RegionModeTest.java => TestRegionMode.java} |  4 ++--
 ...{RegionServerModeTest.java => TestRegionServerMode.java} |  4 ++--
 ...untPerSecondTest.java => TestRequestCountPerSecond.java} |  4 ++--
 .../hbtop/mode/{TableModeTest.java => TestTableMode.java}   |  4 ++--
 .../hbtop/mode/{UserModeTest.java => TestUserMode.java} |  4 ++--
 ...reenPresenterTest.java => TestFieldScreenPresenter.java} |  4 ++--
 ...creenPresenterTest.java => TestHelpScreenPresenter.java} |  4 ++--
 ...creenPresenterTest.java => TestModeScreenPresenter.java} |  4 ++--
 ...rTest.java => TestFilterDisplayModeScreenPresenter.java} |  4 ++--
 ...PresenterTest.java => TestInputModeScreenPresenter.java} |  4 ++--
 ...esenterTest.java => TestMessageModeScreenPresenter.java} |  4 ++--
 .../hbtop/screen/top/{PagingTest.java => TestPaging.java}   |  4 ++--
 .../{TopScreenModelTest.java => TestTopScreenModel.java}|  4 ++--
 ...ScreenPresenterTest.java => TestTopScreenPresenter.java} |  4 ++--
 .../terminal/{CursorTest.java => impl/TestCursor.java}  | 10 ++
 .../terminal/{KeyPressTest.java => impl/TestKeyPress.java}  | 10 ++
 .../TestTerminalPrinter.java}   | 11 +++
 30 files changed, 79 insertions(+), 65 deletions(-)

diff --git a/bin/hbase b/bin/hbase
index e19fd1d..ab4913d 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -645,7 +645,10 @@ elif [ "$COMMAND" = "hbtop" ] ; then
 done
   fi
 
-  HBASE_OPTS="${HBASE_OPTS} 
-Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
+  if [ -f "${HBASE_HOME}/conf/log4j-hbtop.properties" ] ; then
+HBASE_HBTOP_OPTS="${HBASE_HBTOP_OPTS} 
-Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
+  fi
+  HBASE_OPTS="${HBASE_OPTS} ${HBASE_HBTOP_OPTS}"
 else
   CLASS=$COMMAND
 fi
diff --git a/conf/log4j-hbtop.properties b/conf/log4j-hbtop.properties
index 831ee18..4d68d79 100644
--- a/conf/log4j-hbtop.properties
+++ b/conf/log4j-hbtop.properties
@@ -24,4 +24,4 @@ log4j.appender.console.layout=org.apache.log4j.PatternLayout
 log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: 
%m%n
 
 # ZooKeeper will still put stuff at WARN
-log4j.logger.org.apache.zookeeper=ERROR
\ No newline at end of file
+log4j.logger.org.apache.zookeeper=ERROR
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
index c7093dd..78adf7c 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
@@ -55,6 +55,9 @@ public final class RecordFilter {
 return parse(filterString, Arrays.asList(Field.values()), ignoreCase);
   }
 
+  /*
+   * Parse a filter string and build a RecordFilter instance.
+   */
   public static RecordFilter parse(String filterString, List fields, 
boolean ignoreCase) {
 int index = 0;
 
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
index 508cf82..d546070 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mo

[hbase] branch master updated: HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)

2020-01-11 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 79e799a  HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)
79e799a is described below

commit 79e799ab7bab14fa2ce239d98fffc2f23c14d22d
Author: Toshihiro Suzuki 
AuthorDate: Sat Jan 11 21:06:07 2020 +0900

HBASE-23165 [hbtop] Some modifications from HBASE-22988 (#987)

Signed-off-by: stack 
---
 bin/hbase   |  5 -
 conf/log4j-hbtop.properties |  2 +-
 .../java/org/apache/hadoop/hbase/hbtop/RecordFilter.java|  3 +++
 .../hadoop/hbase/hbtop/mode/RequestCountPerSecond.java  | 13 +++--
 .../hadoop/hbase/hbtop/screen/field/FieldScreenView.java|  4 ++--
 .../hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java   |  4 ++--
 .../hadoop/hbase/hbtop/{RecordTest.java => TestRecord.java} |  4 ++--
 .../hbtop/{RecordFilterTest.java => TestRecordFilter.java}  |  4 ++--
 .../test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java  |  4 ++--
 .../field/{FieldValueTest.java => TestFieldValue.java}  |  4 ++--
 .../hbtop/mode/{ClientModeTest.java => TestClientMode.java} |  4 ++--
 .../hbtop/mode/{ModeTestBase.java => TestModeBase.java} |  2 +-
 .../mode/{NamespaceModeTest.java => TestNamespaceMode.java} |  4 ++--
 .../hbtop/mode/{RegionModeTest.java => TestRegionMode.java} |  4 ++--
 ...{RegionServerModeTest.java => TestRegionServerMode.java} |  4 ++--
 ...untPerSecondTest.java => TestRequestCountPerSecond.java} |  4 ++--
 .../hbtop/mode/{TableModeTest.java => TestTableMode.java}   |  4 ++--
 .../hbtop/mode/{UserModeTest.java => TestUserMode.java} |  4 ++--
 ...reenPresenterTest.java => TestFieldScreenPresenter.java} |  4 ++--
 ...creenPresenterTest.java => TestHelpScreenPresenter.java} |  4 ++--
 ...creenPresenterTest.java => TestModeScreenPresenter.java} |  4 ++--
 ...rTest.java => TestFilterDisplayModeScreenPresenter.java} |  4 ++--
 ...PresenterTest.java => TestInputModeScreenPresenter.java} |  4 ++--
 ...esenterTest.java => TestMessageModeScreenPresenter.java} |  4 ++--
 .../hbtop/screen/top/{PagingTest.java => TestPaging.java}   |  4 ++--
 .../{TopScreenModelTest.java => TestTopScreenModel.java}|  4 ++--
 ...ScreenPresenterTest.java => TestTopScreenPresenter.java} |  4 ++--
 .../terminal/{CursorTest.java => impl/TestCursor.java}  | 10 ++
 .../terminal/{KeyPressTest.java => impl/TestKeyPress.java}  | 10 ++
 .../TestTerminalPrinter.java}   | 11 +++
 30 files changed, 79 insertions(+), 65 deletions(-)

diff --git a/bin/hbase b/bin/hbase
index 8177898..8f1d0bb 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -673,7 +673,10 @@ elif [ "$COMMAND" = "hbtop" ] ; then
 done
   fi
 
-  HBASE_OPTS="${HBASE_OPTS} 
-Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
+  if [ -f "${HBASE_HOME}/conf/log4j-hbtop.properties" ] ; then
+HBASE_HBTOP_OPTS="${HBASE_HBTOP_OPTS} 
-Dlog4j.configuration=file:${HBASE_HOME}/conf/log4j-hbtop.properties"
+  fi
+  HBASE_OPTS="${HBASE_OPTS} ${HBASE_HBTOP_OPTS}"
 else
   CLASS=$COMMAND
 fi
diff --git a/conf/log4j-hbtop.properties b/conf/log4j-hbtop.properties
index 831ee18..4d68d79 100644
--- a/conf/log4j-hbtop.properties
+++ b/conf/log4j-hbtop.properties
@@ -24,4 +24,4 @@ log4j.appender.console.layout=org.apache.log4j.PatternLayout
 log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: 
%m%n
 
 # ZooKeeper will still put stuff at WARN
-log4j.logger.org.apache.zookeeper=ERROR
\ No newline at end of file
+log4j.logger.org.apache.zookeeper=ERROR
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
index c7093dd..78adf7c 100644
--- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
+++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java
@@ -55,6 +55,9 @@ public final class RecordFilter {
 return parse(filterString, Arrays.asList(Field.values()), ignoreCase);
   }
 
+  /*
+   * Parse a filter string and build a RecordFilter instance.
+   */
   public static RecordFilter parse(String filterString, List fields, 
boolean ignoreCase) {
 int index = 0;
 
diff --git 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
index 508cf82..d546070 100644
--- 
a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java
+++ 
b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mo

[hbase] branch master updated: HBASE-23065 [hbtop] Top-N heavy hitter user and client drill downs

2019-12-16 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 29d1a97  HBASE-23065 [hbtop] Top-N heavy hitter user and client drill 
downs
29d1a97 is described below

commit 29d1a97482a1b84c2372c1ad93190431ef558454
Author: Ankit Singhal 
AuthorDate: Sun Nov 17 15:07:52 2019 -0800

HBASE-23065 [hbtop] Top-N heavy hitter user and client drill downs

Signed-off-by: Toshihiro Suzuki 
Signed-off-by: Josh Elser 
Signed-off-by: Andrew Purtell 
---
 .../org/apache/hadoop/hbase/ServerMetrics.java |   5 +
 .../apache/hadoop/hbase/ServerMetricsBuilder.java  |  38 +++--
 .../java/org/apache/hadoop/hbase/UserMetrics.java  |  86 +++
 .../apache/hadoop/hbase/UserMetricsBuilder.java| 151 
 .../regionserver/MetricsUserAggregateSource.java   |   4 +
 .../hbase/regionserver/MetricsUserSource.java  |  37 +
 .../apache/hadoop/metrics2/MetricHistogram.java|   6 +
 .../MetricsUserAggregateSourceImpl.java|  10 +-
 .../hbase/regionserver/MetricsUserSourceImpl.java  |  72 +-
 .../hadoop/metrics2/lib/MutableHistogram.java  |   4 +
 .../hadoop/metrics2/lib/MutableRangeHistogram.java |   6 +-
 .../apache/hadoop/hbase/hbtop/RecordFilter.java|   4 +
 .../org/apache/hadoop/hbase/hbtop/field/Field.java |   6 +-
 .../hbase/hbtop/mode/ClientModeStrategy.java   | 157 +
 .../org/apache/hadoop/hbase/hbtop/mode/Mode.java   |  10 +-
 .../hadoop/hbase/hbtop/mode/ModeStrategy.java  |   3 +-
 .../hadoop/hbase/hbtop/mode/ModeStrategyUtils.java |  63 +
 .../hbase/hbtop/mode/NamespaceModeStrategy.java|  26 +---
 .../hbase/hbtop/mode/RegionModeStrategy.java   |  28 +++-
 .../hbase/hbtop/mode/RegionServerModeStrategy.java |  28 ++--
 .../hadoop/hbase/hbtop/mode/TableModeStrategy.java |  13 +-
 .../hadoop/hbase/hbtop/mode/UserModeStrategy.java  |  70 +
 .../hbase/hbtop/screen/top/TopScreenModel.java |  27 +++-
 .../hbase/hbtop/screen/top/TopScreenPresenter.java |   6 +-
 .../org/apache/hadoop/hbase/hbtop/TestUtils.java   | 101 -
 .../hadoop/hbase/hbtop/mode/ClientModeTest.java|  72 ++
 .../hadoop/hbase/hbtop/mode/ModeTestBase.java  |   6 +-
 .../hadoop/hbase/hbtop/mode/UserModeTest.java  |  70 +
 .../src/main/protobuf/ClusterStatus.proto  |  28 
 .../src/main/protobuf/ClusterStatus.proto  |  31 
 .../apache/hadoop/hbase/regionserver/HRegion.java  |   8 +-
 .../hadoop/hbase/regionserver/HRegionServer.java   |  24 +++-
 .../hadoop/hbase/regionserver/MetricsRegion.java   |  11 +-
 .../hbase/regionserver/MetricsRegionServer.java|   2 +-
 .../hbase/regionserver/MetricsUserAggregate.java   |   9 ++
 .../regionserver/MetricsUserAggregateFactory.java  |  14 +-
 .../regionserver/MetricsUserAggregateImpl.java |  79 +--
 .../hadoop/hbase/TestClientClusterMetrics.java | 143 ++-
 .../hbase/master/TestRegionsRecoveryChore.java |   5 +
 .../hbase/regionserver/TestMetricsRegion.java  |   5 +-
 40 files changed, 1359 insertions(+), 109 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
index 391e62f..21fad92 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
@@ -94,6 +94,11 @@ public interface ServerMetrics {
   Map getRegionMetrics();
 
   /**
+   * @return metrics per user
+   */
+  Map getUserMetrics();
+
+  /**
* Return the RegionServer-level and Region-level coprocessors
* @return string set of loaded RegionServer-level and Region-level 
coprocessors
*/
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
index b408f02..88e26f4 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
@@ -77,6 +77,8 @@ public final class ServerMetricsBuilder {
 .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList()))
   .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream()
 
.map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList()))
+.setUserMetrics(serverLoadPB.getUserLoadsList().stream()
+
.map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList()))
   .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream()
   
.map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList()))
   .setReplicationLoadSink(serverLoadPB.hasReplLoadSink()
@@ -100,19 +102,19

[hbase] branch branch-1.4 updated: HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile is a reference file

2019-12-08 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.4 by this push:
 new 5d2bd77  HBASE-22096 /storeFile.jsp shows CorruptHFileException when 
the storeFile is a reference file
5d2bd77 is described below

commit 5d2bd77725ded7aeb78f6ddb848d07f836780216
Author: Toshihiro Suzuki 
AuthorDate: Wed Dec 4 21:44:42 2019 +0900

HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile 
is a reference file

Signed-off-by: Lijin Bin 
---
 .../src/main/resources/hbase-webapps/regionserver/storeFile.jsp  | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git 
a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp 
b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
index 05d8783..0b7117c 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
@@ -21,16 +21,18 @@
   import="java.io.ByteArrayOutputStream"
   import="java.io.PrintStream"
   import="org.apache.hadoop.conf.Configuration"
+  import="org.apache.hadoop.fs.FileSystem"
   import="org.apache.hadoop.fs.Path"
   import="org.apache.hadoop.hbase.HBaseConfiguration"
   import="org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter"
   import="org.apache.hadoop.hbase.regionserver.HRegionServer"
-  import="org.apache.hadoop.hbase.regionserver.StoreFile"
+  import="org.apache.hadoop.hbase.regionserver.StoreFileInfo"
   %>
 <%
   String storeFile = request.getParameter("name");
   HRegionServer rs = (HRegionServer) 
getServletContext().getAttribute(HRegionServer.REGIONSERVER);
   Configuration conf = rs.getConfiguration();
+  FileSystem fs = FileSystem.get(conf);
 %>
 

[hbase] branch branch-1.4 updated: HBASE-23359 RS going down with NPE when splitting a region with compaction disabled in branch-1 (#899)

2019-12-08 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-1.4
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1.4 by this push:
 new c612a2e  HBASE-23359 RS going down with NPE when splitting a region 
with compaction disabled in branch-1 (#899)
c612a2e is described below

commit c612a2e3cc8118d60f4571a50ab4068bf1152478
Author: Toshihiro Suzuki 
AuthorDate: Wed Dec 4 21:24:03 2019 +0900

HBASE-23359 RS going down with NPE when splitting a region with compaction 
disabled in branch-1 (#899)

Signed-off-by: Balazs Meszaros 
Signed-off-by Anoop Sam John 
---
 .../java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index 09a3d06..2982e93 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -608,7 +608,9 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
   if (runnable instanceof CompactionRunner) {
 CompactionRunner runner = (CompactionRunner)runnable;
 LOG.debug("Compaction Rejected: " + runner);
-runner.store.cancelRequestedCompaction(runner.compaction);
+if (runner.compaction != null) {
+  runner.store.cancelRequestedCompaction(runner.compaction);
+}
   }
 }
   }



[hbase] branch branch-2.1 updated: HBASE-23303 Add security headers to REST server/info page (#843)

2019-12-08 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 0a3d212  HBASE-23303 Add security headers to REST server/info page 
(#843)
0a3d212 is described below

commit 0a3d21249f0ad72d84691bed51f30cac7bb9d41b
Author: Andor Molnár 
AuthorDate: Sun Dec 8 14:06:40 2019 +0100

HBASE-23303 Add security headers to REST server/info page (#843)

Signed-off-by: Toshihiro Suzuki 
Signed-off-by: Sean Busbey 
---
 .../hbase/http/ClickjackingPreventionFilter.java   |  47 +
 .../org/apache/hadoop/hbase/http/HttpServer.java   |  11 ++-
 .../hadoop/hbase/http/SecurityHeadersFilter.java   |  81 +++
 .../hbase/http/TestSecurityHeadersFilter.java  | 106 
 .../org/apache/hadoop/hbase/rest/RESTServer.java   |  62 +++-
 .../hbase/rest/TestSecurityHeadersFilter.java  | 110 +
 6 files changed, 372 insertions(+), 45 deletions(-)

diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
index 9944d29..d2764f6 100644
--- 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
+++ 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hbase.http;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
@@ -24,32 +28,37 @@ import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
 
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
 public class ClickjackingPreventionFilter implements Filter {
+  private FilterConfig filterConfig;
+  private static final String DEFAULT_XFRAMEOPTIONS = "DENY";
 
-private FilterConfig filterConfig;
-
-@Override
-public void init(FilterConfig filterConfig) throws ServletException {
-this.filterConfig = filterConfig;
-}
+  @Override
+  public void init(FilterConfig filterConfig) throws ServletException {
+this.filterConfig = filterConfig;
+  }
 
-@Override
-public void doFilter(ServletRequest req, ServletResponse res,
- FilterChain chain)
-throws IOException, ServletException {
-HttpServletResponse httpRes = (HttpServletResponse) res;
-httpRes.addHeader("X-Frame-Options", 
filterConfig.getInitParameter("xframeoptions"));
-chain.doFilter(req, res);
-}
+  @Override
+  public void doFilter(ServletRequest req, ServletResponse res, FilterChain 
chain)
+throws IOException, ServletException {
+HttpServletResponse httpRes = (HttpServletResponse) res;
+httpRes.addHeader("X-Frame-Options", 
filterConfig.getInitParameter("xframeoptions"));
+chain.doFilter(req, res);
+  }
 
-@Override
-public void destroy() {
-}
+  @Override
+  public void destroy() {
+  }
 
+  public static Map getDefaultParameters(Configuration conf) {
+Map params = new HashMap<>();
+params.put("xframeoptions", 
conf.get("hbase.http.filter.xframeoptions.mode",
+DEFAULT_XFRAMEOPTIONS));
+return params;
+  }
 }
diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index a34b26c..7fba6a6 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -560,10 +560,15 @@ public class HttpServer implements FilterContainer {
 addDefaultApps(contexts, appDir, conf);
 
 addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
-Map params = new HashMap<>();
-params.put("xframeoptions", 
conf.get("hbase.http.filter.xframeoptions.mode", "DENY"));
+
 addGlobalFilter("clickjackingprevention",
-ClickjackingPreventionFilter.class.getName(), params);
+ClickjackingPreventionFilter.class.getName(),
+ClickjackingPreventionFilter.getDefaultParameters(conf));
+
+addGlobalFilter("securityheaders",
+SecurityHeadersFilter.class.getName(),
+SecurityHeadersFilter.getDefaultPara

[hbase] branch branch-2.2 updated: HBASE-23303 Add security headers to REST server/info page (#843)

2019-12-08 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 5e8d71c  HBASE-23303 Add security headers to REST server/info page 
(#843)
5e8d71c is described below

commit 5e8d71ca1e2915cacf4951833fb8c5f15dd16fbe
Author: Andor Molnár 
AuthorDate: Sun Dec 8 14:06:40 2019 +0100

HBASE-23303 Add security headers to REST server/info page (#843)

Signed-off-by: Toshihiro Suzuki 
Signed-off-by: Sean Busbey 
---
 .../hbase/http/ClickjackingPreventionFilter.java   |  47 +
 .../org/apache/hadoop/hbase/http/HttpServer.java   |  11 ++-
 .../hadoop/hbase/http/SecurityHeadersFilter.java   |  81 +++
 .../hbase/http/TestSecurityHeadersFilter.java  | 106 
 .../org/apache/hadoop/hbase/rest/RESTServer.java   |  62 +++-
 .../hbase/rest/TestSecurityHeadersFilter.java  | 110 +
 6 files changed, 372 insertions(+), 45 deletions(-)

diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
index 9944d29..d2764f6 100644
--- 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
+++ 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hbase.http;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
@@ -24,32 +28,37 @@ import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
 
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
 public class ClickjackingPreventionFilter implements Filter {
+  private FilterConfig filterConfig;
+  private static final String DEFAULT_XFRAMEOPTIONS = "DENY";
 
-private FilterConfig filterConfig;
-
-@Override
-public void init(FilterConfig filterConfig) throws ServletException {
-this.filterConfig = filterConfig;
-}
+  @Override
+  public void init(FilterConfig filterConfig) throws ServletException {
+this.filterConfig = filterConfig;
+  }
 
-@Override
-public void doFilter(ServletRequest req, ServletResponse res,
- FilterChain chain)
-throws IOException, ServletException {
-HttpServletResponse httpRes = (HttpServletResponse) res;
-httpRes.addHeader("X-Frame-Options", 
filterConfig.getInitParameter("xframeoptions"));
-chain.doFilter(req, res);
-}
+  @Override
+  public void doFilter(ServletRequest req, ServletResponse res, FilterChain 
chain)
+throws IOException, ServletException {
+HttpServletResponse httpRes = (HttpServletResponse) res;
+httpRes.addHeader("X-Frame-Options", 
filterConfig.getInitParameter("xframeoptions"));
+chain.doFilter(req, res);
+  }
 
-@Override
-public void destroy() {
-}
+  @Override
+  public void destroy() {
+  }
 
+  public static Map getDefaultParameters(Configuration conf) {
+Map params = new HashMap<>();
+params.put("xframeoptions", 
conf.get("hbase.http.filter.xframeoptions.mode",
+DEFAULT_XFRAMEOPTIONS));
+return params;
+  }
 }
diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index a34b26c..7fba6a6 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -560,10 +560,15 @@ public class HttpServer implements FilterContainer {
 addDefaultApps(contexts, appDir, conf);
 
 addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
-Map params = new HashMap<>();
-params.put("xframeoptions", 
conf.get("hbase.http.filter.xframeoptions.mode", "DENY"));
+
 addGlobalFilter("clickjackingprevention",
-ClickjackingPreventionFilter.class.getName(), params);
+ClickjackingPreventionFilter.class.getName(),
+ClickjackingPreventionFilter.getDefaultParameters(conf));
+
+addGlobalFilter("securityheaders",
+SecurityHeadersFilter.class.getName(),
+SecurityHeadersFilter.getDefaultPara

[hbase] branch branch-2 updated: HBASE-23303 Add security headers to REST server/info page (#843)

2019-12-08 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 188742a  HBASE-23303 Add security headers to REST server/info page 
(#843)
188742a is described below

commit 188742a82febd30b1604356333a0cef463c446f2
Author: Andor Molnár 
AuthorDate: Sun Dec 8 14:06:40 2019 +0100

HBASE-23303 Add security headers to REST server/info page (#843)

Signed-off-by: Toshihiro Suzuki 
Signed-off-by: Sean Busbey 
---
 .../hbase/http/ClickjackingPreventionFilter.java   |  11 +++
 .../org/apache/hadoop/hbase/http/HttpServer.java   |  11 ++-
 .../hadoop/hbase/http/SecurityHeadersFilter.java   |  81 +++
 .../hbase/http/TestSecurityHeadersFilter.java  | 106 
 .../org/apache/hadoop/hbase/rest/RESTServer.java   |  62 +++-
 .../hbase/rest/TestSecurityHeadersFilter.java  | 110 +
 6 files changed, 355 insertions(+), 26 deletions(-)

diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
index 7ce1301..0f0c715 100644
--- 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
+++ 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hbase.http;
 
 import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
 
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
@@ -27,6 +29,7 @@ import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 
 import org.apache.yetus.audience.InterfaceAudience;
@@ -34,6 +37,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
 public class ClickjackingPreventionFilter implements Filter {
   private FilterConfig filterConfig;
+  private static final String DEFAULT_XFRAMEOPTIONS = "DENY";
 
   @Override
   public void init(FilterConfig filterConfig) throws ServletException {
@@ -51,4 +55,11 @@ public class ClickjackingPreventionFilter implements Filter {
   @Override
   public void destroy() {
   }
+
+  public static Map getDefaultParameters(Configuration conf) {
+Map params = new HashMap<>();
+params.put("xframeoptions", 
conf.get("hbase.http.filter.xframeoptions.mode",
+DEFAULT_XFRAMEOPTIONS));
+return params;
+  }
 }
diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index e96e057..661af4a 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -596,10 +596,15 @@ public class HttpServer implements FilterContainer {
 addDefaultApps(contexts, appDir, conf);
 
 addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
-Map params = new HashMap<>();
-params.put("xframeoptions", 
conf.get("hbase.http.filter.xframeoptions.mode", "DENY"));
+
 addGlobalFilter("clickjackingprevention",
-ClickjackingPreventionFilter.class.getName(), params);
+ClickjackingPreventionFilter.class.getName(),
+ClickjackingPreventionFilter.getDefaultParameters(conf));
+
+addGlobalFilter("securityheaders",
+SecurityHeadersFilter.class.getName(),
+SecurityHeadersFilter.getDefaultParameters(conf));
+
 final FilterInitializer[] initializers = getFilterInitializers(conf);
 if (initializers != null) {
   conf = new Configuration(conf);
diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java
 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java
new file mode 100644
index 000..b83fef1
--- /dev/null
+++ 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, sof

[hbase] branch master updated (60d9430 -> 978546b)

2019-12-08 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from 60d9430  HBASE-23309: Adding the flexibility to ChainWalEntryFilter to 
filter the whole entry if all cells get filtered (#837)
 add 978546b  HBASE-23303 Add security headers to REST server/info page 
(#843)

No new revisions were added by this update.

Summary of changes:
 .../hbase/http/ClickjackingPreventionFilter.java   |  11 +++
 .../org/apache/hadoop/hbase/http/HttpServer.java   |  11 ++-
 .../hadoop/hbase/http/SecurityHeadersFilter.java   |  81 +++
 .../hbase/http/TestSecurityHeadersFilter.java  | 106 
 .../org/apache/hadoop/hbase/rest/RESTServer.java   |  62 +++-
 .../hbase/rest/TestSecurityHeadersFilter.java  | 110 +
 6 files changed, 355 insertions(+), 26 deletions(-)
 create mode 100644 
hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java
 create mode 100644 
hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java
 create mode 100644 
hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java



[hbase] branch branch-1 updated: HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile is a reference file

2019-12-04 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new ec55c2a  HBASE-22096 /storeFile.jsp shows CorruptHFileException when 
the storeFile is a reference file
ec55c2a is described below

commit ec55c2a5d976ae1b2f797854419146bbef2fccc2
Author: Toshihiro Suzuki 
AuthorDate: Wed Dec 4 21:44:42 2019 +0900

HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile 
is a reference file

Signed-off-by: Lijin Bin 
---
 .../src/main/resources/hbase-webapps/regionserver/storeFile.jsp  | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git 
a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp 
b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
index 05d8783..0b7117c 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
@@ -21,16 +21,18 @@
   import="java.io.ByteArrayOutputStream"
   import="java.io.PrintStream"
   import="org.apache.hadoop.conf.Configuration"
+  import="org.apache.hadoop.fs.FileSystem"
   import="org.apache.hadoop.fs.Path"
   import="org.apache.hadoop.hbase.HBaseConfiguration"
   import="org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter"
   import="org.apache.hadoop.hbase.regionserver.HRegionServer"
-  import="org.apache.hadoop.hbase.regionserver.StoreFile"
+  import="org.apache.hadoop.hbase.regionserver.StoreFileInfo"
   %>
 <%
   String storeFile = request.getParameter("name");
   HRegionServer rs = (HRegionServer) 
getServletContext().getAttribute(HRegionServer.REGIONSERVER);
   Configuration conf = rs.getConfiguration();
+  FileSystem fs = FileSystem.get(conf);
 %>
 

[hbase] branch branch-1 updated: HBASE-23359 RS going down with NPE when splitting a region with compaction disabled in branch-1 (#899)

2019-12-04 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-1 by this push:
 new 737eaa6  HBASE-23359 RS going down with NPE when splitting a region 
with compaction disabled in branch-1 (#899)
737eaa6 is described below

commit 737eaa635a8d6e81e5ebac3b31c81096e24d3635
Author: Toshihiro Suzuki 
AuthorDate: Wed Dec 4 21:24:03 2019 +0900

HBASE-23359 RS going down with NPE when splitting a region with compaction 
disabled in branch-1 (#899)

Signed-off-by: Balazs Meszaros 
Signed-off-by Anoop Sam John 
---
 .../java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index ed4bd0d..f3ff124 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -653,7 +653,9 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
   if (runnable instanceof CompactionRunner) {
 CompactionRunner runner = (CompactionRunner)runnable;
 LOG.debug("Compaction Rejected: " + runner);
-runner.store.cancelRequestedCompaction(runner.compaction);
+if (runner.compaction != null) {
+  runner.store.cancelRequestedCompaction(runner.compaction);
+}
   }
 }
   }



[hbase] branch branch-2.2 updated: HBASE-22529 Add sanity check for in-memory compaction policy

2019-12-04 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 1804e35  HBASE-22529 Add sanity check for in-memory compaction policy
1804e35 is described below

commit 1804e35f6f327cefd605b39d956a19a10ed838ab
Author: Junegunn Choi 
AuthorDate: Wed Dec 4 13:55:53 2019 +0900

HBASE-22529 Add sanity check for in-memory compaction policy

Signed-off-by: Toshihiro Suzuki 
---
 .../java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java  | 7 +++
 .../org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java | 5 +
 2 files changed, 12 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
index a2ee1ff..3852a41 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
@@ -188,6 +188,13 @@ public final class TableDescriptorChecker {
 "  must be greater than zero.";
 warnOrThrowExceptionForFailure(logWarn, message, null);
   }
+
+  // check in-memory compaction
+  try {
+hcd.getInMemoryCompaction();
+  } catch (IllegalArgumentException e) {
+warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
+  }
 }
   }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
index e7bc8b3..e55f3ff 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
@@ -151,6 +151,11 @@ public class TestIllegalTableDescriptor {
 hcd.setScope(0);
 checkTableIsLegal(htd);
 
+hcd.setValue(ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION, 
"INVALID");
+checkTableIsIllegal(htd);
+hcd.setValue(ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION, "NONE");
+checkTableIsLegal(htd);
+
 try {
   hcd.setDFSReplication((short) -1);
   fail("Illegal value for setDFSReplication did not throw");



[hbase] branch branch-2 updated: HBASE-22529 Add sanity check for in-memory compaction policy

2019-12-04 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 40e1c9e  HBASE-22529 Add sanity check for in-memory compaction policy
40e1c9e is described below

commit 40e1c9e31031eec5f909e52fbee19bcab7ccefc4
Author: Junegunn Choi 
AuthorDate: Wed Dec 4 13:55:53 2019 +0900

HBASE-22529 Add sanity check for in-memory compaction policy

Signed-off-by: Toshihiro Suzuki 
---
 .../java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java  | 7 +++
 .../org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java | 5 +
 2 files changed, 12 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
index a2ee1ff..3852a41 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
@@ -188,6 +188,13 @@ public final class TableDescriptorChecker {
 "  must be greater than zero.";
 warnOrThrowExceptionForFailure(logWarn, message, null);
   }
+
+  // check in-memory compaction
+  try {
+hcd.getInMemoryCompaction();
+  } catch (IllegalArgumentException e) {
+warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
+  }
 }
   }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
index e7bc8b3..e55f3ff 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
@@ -151,6 +151,11 @@ public class TestIllegalTableDescriptor {
 hcd.setScope(0);
 checkTableIsLegal(htd);
 
+hcd.setValue(ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION, 
"INVALID");
+checkTableIsIllegal(htd);
+hcd.setValue(ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION, "NONE");
+checkTableIsLegal(htd);
+
 try {
   hcd.setDFSReplication((short) -1);
   fail("Illegal value for setDFSReplication did not throw");



[hbase] branch master updated: HBASE-22529 Add sanity check for in-memory compaction policy

2019-12-04 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 5e34a59  HBASE-22529 Add sanity check for in-memory compaction policy
5e34a59 is described below

commit 5e34a59d90e56be190a855fe68f2c85f5deb2ce2
Author: Junegunn Choi 
AuthorDate: Wed Dec 4 13:55:53 2019 +0900

HBASE-22529 Add sanity check for in-memory compaction policy

Signed-off-by: Toshihiro Suzuki 
---
 .../java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java  | 7 +++
 .../org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java | 5 +
 2 files changed, 12 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
index a2ee1ff..3852a41 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java
@@ -188,6 +188,13 @@ public final class TableDescriptorChecker {
 "  must be greater than zero.";
 warnOrThrowExceptionForFailure(logWarn, message, null);
   }
+
+  // check in-memory compaction
+  try {
+hcd.getInMemoryCompaction();
+  } catch (IllegalArgumentException e) {
+warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
+  }
 }
   }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
index e7bc8b3..e55f3ff 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
@@ -151,6 +151,11 @@ public class TestIllegalTableDescriptor {
 hcd.setScope(0);
 checkTableIsLegal(htd);
 
+hcd.setValue(ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION, 
"INVALID");
+checkTableIsIllegal(htd);
+hcd.setValue(ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION, "NONE");
+checkTableIsLegal(htd);
+
 try {
   hcd.setDFSReplication((short) -1);
   fail("Illegal value for setDFSReplication did not throw");



[hbase] branch branch-2.1 updated: HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile is a reference file (addendum)

2019-12-03 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 75764fa  HBASE-22096 /storeFile.jsp shows CorruptHFileException when 
the storeFile is a reference file (addendum)
75764fa is described below

commit 75764fa2a9dbb3c9769d7e37ef6479e71e2fb754
Author: Toshihiro Suzuki 
AuthorDate: Wed Dec 4 09:26:15 2019 +0900

HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile 
is a reference file (addendum)

Signed-off-by: Sean Busbey 
---
 .../src/main/resources/hbase-webapps/regionserver/storeFile.jsp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp 
b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
index b538cb7..5c0a4e1 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
@@ -54,7 +54,7 @@
  printer.setConf(conf);
  String[] options = {"-s"};
  printer.parseOptions(options);
- StoreFileInfo sfi = new StoreFileInfo(conf, fs, new Path(storeFile), 
true);
+ StoreFileInfo sfi = new StoreFileInfo(conf, fs, new Path(storeFile));
  printer.processFile(sfi.getFileStatus().getPath(), true);
  String text = byteStream.toString();%>
  <%=



[hbase] branch branch-2.2 updated: HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile is a reference file (addendum)

2019-12-03 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 3b0d223  HBASE-22096 /storeFile.jsp shows CorruptHFileException when 
the storeFile is a reference file (addendum)
3b0d223 is described below

commit 3b0d22375d375abf8208e74b290250db3dfc23d3
Author: Toshihiro Suzuki 
AuthorDate: Wed Dec 4 09:26:15 2019 +0900

HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile 
is a reference file (addendum)

Signed-off-by: Sean Busbey 
---
 .../src/main/resources/hbase-webapps/regionserver/storeFile.jsp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp 
b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
index b538cb7..5c0a4e1 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
@@ -54,7 +54,7 @@
  printer.setConf(conf);
  String[] options = {"-s"};
  printer.parseOptions(options);
- StoreFileInfo sfi = new StoreFileInfo(conf, fs, new Path(storeFile), 
true);
+ StoreFileInfo sfi = new StoreFileInfo(conf, fs, new Path(storeFile));
  printer.processFile(sfi.getFileStatus().getPath(), true);
  String text = byteStream.toString();%>
  <%=



[hbase] branch branch-2.1 updated: HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile is a reference file (#888)

2019-12-02 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 6ce03dd  HBASE-22096 /storeFile.jsp shows CorruptHFileException when 
the storeFile is a reference file (#888)
6ce03dd is described below

commit 6ce03ddad58bd87b4c9233a184355be4c5adcef7
Author: Toshihiro Suzuki 
AuthorDate: Tue Dec 3 15:15:40 2019 +0900

HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile 
is a reference file (#888)

Signed-off-by: Lijin Bin 
---
 .../src/main/resources/hbase-webapps/regionserver/storeFile.jsp | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp 
b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
index c1de41c..b538cb7 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
@@ -21,14 +21,17 @@
   import="java.io.ByteArrayOutputStream"
   import="java.io.PrintStream"
   import="org.apache.hadoop.conf.Configuration"
+  import="org.apache.hadoop.fs.FileSystem"
   import="org.apache.hadoop.fs.Path"
   import="org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter"
   import="org.apache.hadoop.hbase.regionserver.HRegionServer"
+  import="org.apache.hadoop.hbase.regionserver.StoreFileInfo"
 %>
 <%
   String storeFile = request.getParameter("name");
   HRegionServer rs = (HRegionServer) 
getServletContext().getAttribute(HRegionServer.REGIONSERVER);
   Configuration conf = rs.getConfiguration();
+  FileSystem fs = FileSystem.get(conf);
   pageContext.setAttribute("pageTitle", "HBase RegionServer: " + 
rs.getServerName());
 %>
 
@@ -51,7 +54,8 @@
  printer.setConf(conf);
  String[] options = {"-s"};
  printer.parseOptions(options);
- printer.processFile(new Path(storeFile), true);
+ StoreFileInfo sfi = new StoreFileInfo(conf, fs, new Path(storeFile), 
true);
+ printer.processFile(sfi.getFileStatus().getPath(), true);
  String text = byteStream.toString();%>
  <%=
text



[hbase] branch branch-2.2 updated: HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile is a reference file (#888)

2019-12-02 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 20a3c3f  HBASE-22096 /storeFile.jsp shows CorruptHFileException when 
the storeFile is a reference file (#888)
20a3c3f is described below

commit 20a3c3f5f903a829a58f6a306399781782dc9375
Author: Toshihiro Suzuki 
AuthorDate: Tue Dec 3 15:15:40 2019 +0900

HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile 
is a reference file (#888)

Signed-off-by: Lijin Bin 
---
 .../src/main/resources/hbase-webapps/regionserver/storeFile.jsp | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp 
b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
index c1de41c..b538cb7 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
@@ -21,14 +21,17 @@
   import="java.io.ByteArrayOutputStream"
   import="java.io.PrintStream"
   import="org.apache.hadoop.conf.Configuration"
+  import="org.apache.hadoop.fs.FileSystem"
   import="org.apache.hadoop.fs.Path"
   import="org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter"
   import="org.apache.hadoop.hbase.regionserver.HRegionServer"
+  import="org.apache.hadoop.hbase.regionserver.StoreFileInfo"
 %>
 <%
   String storeFile = request.getParameter("name");
   HRegionServer rs = (HRegionServer) 
getServletContext().getAttribute(HRegionServer.REGIONSERVER);
   Configuration conf = rs.getConfiguration();
+  FileSystem fs = FileSystem.get(conf);
   pageContext.setAttribute("pageTitle", "HBase RegionServer: " + 
rs.getServerName());
 %>
 
@@ -51,7 +54,8 @@
  printer.setConf(conf);
  String[] options = {"-s"};
  printer.parseOptions(options);
- printer.processFile(new Path(storeFile), true);
+ StoreFileInfo sfi = new StoreFileInfo(conf, fs, new Path(storeFile), 
true);
+ printer.processFile(sfi.getFileStatus().getPath(), true);
  String text = byteStream.toString();%>
  <%=
text



[hbase] branch branch-2 updated: HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile is a reference file (#888)

2019-12-02 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new e558fc9  HBASE-22096 /storeFile.jsp shows CorruptHFileException when 
the storeFile is a reference file (#888)
e558fc9 is described below

commit e558fc9cd8a13d50b5277e95033ab1f5a831a43d
Author: Toshihiro Suzuki 
AuthorDate: Tue Dec 3 15:15:40 2019 +0900

HBASE-22096 /storeFile.jsp shows CorruptHFileException when the storeFile 
is a reference file (#888)

Signed-off-by: Lijin Bin 
---
 .../src/main/resources/hbase-webapps/regionserver/storeFile.jsp | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git 
a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp 
b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
index c1de41c..b538cb7 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
@@ -21,14 +21,17 @@
   import="java.io.ByteArrayOutputStream"
   import="java.io.PrintStream"
   import="org.apache.hadoop.conf.Configuration"
+  import="org.apache.hadoop.fs.FileSystem"
   import="org.apache.hadoop.fs.Path"
   import="org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter"
   import="org.apache.hadoop.hbase.regionserver.HRegionServer"
+  import="org.apache.hadoop.hbase.regionserver.StoreFileInfo"
 %>
 <%
   String storeFile = request.getParameter("name");
   HRegionServer rs = (HRegionServer) 
getServletContext().getAttribute(HRegionServer.REGIONSERVER);
   Configuration conf = rs.getConfiguration();
+  FileSystem fs = FileSystem.get(conf);
   pageContext.setAttribute("pageTitle", "HBase RegionServer: " + 
rs.getServerName());
 %>
 
@@ -51,7 +54,8 @@
  printer.setConf(conf);
  String[] options = {"-s"};
  printer.parseOptions(options);
- printer.processFile(new Path(storeFile), true);
+ StoreFileInfo sfi = new StoreFileInfo(conf, fs, new Path(storeFile), 
true);
+ printer.processFile(sfi.getFileStatus().getPath(), true);
  String text = byteStream.toString();%>
  <%=
text



[hbase] branch master updated (27cfe1b -> 0f166ed)

2019-12-02 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git.


from 27cfe1b  HBASE-23345 Table need to replication unless all of cfs are 
excluded (#881)
 add 0f166ed  HBASE-22096 /storeFile.jsp shows CorruptHFileException when 
the storeFile is a reference file (#888)

No new revisions were added by this update.

Summary of changes:
 .../src/main/resources/hbase-webapps/regionserver/storeFile.jsp | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)



[hbase] branch branch-2.1 updated: HBASE-23115 Unit change for StoreFileSize and MemStoreSize (#710)

2019-10-11 Thread brfrn169
This is an automated email from the ASF dual-hosted git repository.

brfrn169 pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new ad8d2a5  HBASE-23115 Unit change for StoreFileSize and MemStoreSize 
(#710)
ad8d2a5 is described below

commit ad8d2a5ce2f831d23151bf048e367bafba30e738
Author: Karthik Palanisamy 
AuthorDate: Fri Oct 11 07:04:43 2019 -0700

HBASE-23115 Unit change for StoreFileSize and MemStoreSize (#710)

Signed-off-by: Toshihiro Suzuki 
---
 .../hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon | 61 ++-
 .../hbase/tmpl/master/RegionServerListTmpl.jamon   | 65 ++--
 .../hbase/tmpl/regionserver/RegionListTmpl.jamon   | 52 
 .../resources/hbase-webapps/master/rsgroup.jsp | 69 --
 .../main/resources/hbase-webapps/master/table.jsp  | 56 ++
 5 files changed, 233 insertions(+), 70 deletions(-)

diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon
index 41fe487..61ddf09 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RSGroupListTmpl.jamon
@@ -42,7 +42,6 @@ ServerManager serverManager;
 <%java>
 List groups = 
RSGroupTableAccessor.getAllRSGroupInfo(master.getConnection());
 
-
 <%if (groups != null && groups.size() > 0)%>
 
 <%java>
@@ -170,6 +169,10 @@ if (master.getServerManager() != null) {
 
 
 <%java>
+final String ZEROMB = "0 MB";
+String usedHeapStr = ZEROMB;
+String maxHeapStr = ZEROMB;
+String memstoreSizeStr = ZEROMB;
 for (RSGroupInfo rsGroupInfo: rsGroupInfos) {
   String rsGroupName = rsGroupInfo.getName();
   long usedHeap = 0;
@@ -184,15 +187,25 @@ if (master.getServerManager() != null) {
 rm -> rm.getMemStoreSize().get(Size.Unit.MEGABYTE)).sum();
 }
   }
+
+  if (usedHeap > 0) {
+usedHeapStr = TraditionalBinaryPrefix.long2String(usedHeap
+  * TraditionalBinaryPrefix.MEGA.value, 
"B", 1);
+  }
+  if (maxHeap > 0) {
+maxHeapStr = TraditionalBinaryPrefix.long2String(maxHeap
+  * TraditionalBinaryPrefix.MEGA.value, 
"B", 1);
+  }
+  if (memstoreSize > 0) {
+memstoreSizeStr = TraditionalBinaryPrefix.long2String(memstoreSize
+  * TraditionalBinaryPrefix.MEGA.value, 
"B", 1);
+  }
 
 
 <& rsGroupLink; rsGroupName=rsGroupName; &>
-<% TraditionalBinaryPrefix.long2String(usedHeap
-* TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
-<% TraditionalBinaryPrefix.long2String(maxHeap
-* TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
-<% TraditionalBinaryPrefix.long2String(memstoreSize
-* TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
+<% usedHeapStr %>
+<% maxHeapStr %>
+<% memstoreSizeStr %>
 
 
 <%java>
@@ -259,6 +272,12 @@ if (master.getServerManager() != null) {
 Bloom Size
 
 <%java>
+final String ZEROKB = "0 KB";
+final String ZEROMB = "0 MB";
+String uncompressedStorefileSizeStr = ZEROMB;
+String storefileSizeStr = ZEROMB;
+String indexSizeStr = ZEROKB;
+String bloomSizeStr = ZEROKB;
 for (RSGroupInfo rsGroupInfo: rsGroupInfos) {
   String rsGroupName = rsGroupInfo.getName();
   int numStores = 0;
@@ -282,19 +301,31 @@ if (master.getServerManager() != null) {
   count++;
 }
   }
+   if (uncompressedStorefileSize > 0) {
+  uncompressedStorefileSizeStr = TraditionalBinaryPrefix.
+  long2String(uncompressedStorefileSize * 
TraditionalBinaryPrefix.MEGA.value, "B", 1);
+   }
+   if (storefileSize > 0) {
+   storefileSizeStr = TraditionalBinaryPrefix.
+   long2String(storefileSize * TraditionalBinaryPrefix.MEGA.value, 
"B", 1);
+   }
+   if (indexSize > 0) {
+  indexSizeStr = TraditionalBinaryPrefix.
+  long2String(indexSize * TraditionalBinaryPrefix.KILO.value, "B", 1);
+   }
+   if (bloomSize > 0) {
+   bloomSizeStr = TraditionalBinaryPrefix.
+   long2String(bloomSize * TraditionalBinaryPrefix.KILO.value, "B", 1);
+   }
 
 
 <& rsGroupLink; rsGroupName=rsGroupName; &>
 <% numStores %>
 <% numStorefiles %>
-<% TraditionalBinaryPrefix.long2String(
-uncompressedStorefileSize * TraditionalBinaryPrefix.MEGA.value, &

  1   2   >