hbase git commit: HBASE-19637 Add .checkstyle to gitignore

2017-12-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 7a054d3e6 -> e9c332c7f


HBASE-19637 Add .checkstyle to gitignore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e9c332c7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e9c332c7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e9c332c7

Branch: refs/heads/branch-1.3
Commit: e9c332c7f50c8be0603c6f068485a47771afa58f
Parents: 7a054d3
Author: zhangduo 
Authored: Wed Dec 27 11:19:02 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 11:22:49 2017 +0800

--
 .gitignore | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e9c332c7/.gitignore
--
diff --git a/.gitignore b/.gitignore
index b9c6fb2..0fce7d4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,8 @@ hbase-*/test
 *.ipr
 patchprocess/
 dependency-reduced-pom.xml
+link_report/
+linklint-*.zip
+linklint/
+.checkstyle
+**/.checkstyle



hbase git commit: HBASE-19637 Add .checkstyle to gitignore

2017-12-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 a3e32c49b -> a0a17765c


HBASE-19637 Add .checkstyle to gitignore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a0a17765
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a0a17765
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a0a17765

Branch: refs/heads/branch-1.2
Commit: a0a17765c98d8899bb504baed9593997678f1f5e
Parents: a3e32c4
Author: zhangduo 
Authored: Wed Dec 27 11:19:02 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 11:23:34 2017 +0800

--
 .gitignore | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a0a17765/.gitignore
--
diff --git a/.gitignore b/.gitignore
index b9c6fb2..0fce7d4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,8 @@ hbase-*/test
 *.ipr
 patchprocess/
 dependency-reduced-pom.xml
+link_report/
+linklint-*.zip
+linklint/
+.checkstyle
+**/.checkstyle



hbase git commit: HBASE-19637 Add .checkstyle to gitignore

2017-12-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 dbe3907ec -> 0ca69fae3


HBASE-19637 Add .checkstyle to gitignore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0ca69fae
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0ca69fae
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0ca69fae

Branch: refs/heads/branch-1.4
Commit: 0ca69fae3874c4104a357662dc22f669dbdd369c
Parents: dbe3907
Author: zhangduo 
Authored: Wed Dec 27 11:19:02 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 11:22:01 2017 +0800

--
 .gitignore | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0ca69fae/.gitignore
--
diff --git a/.gitignore b/.gitignore
index b9c6fb2..0fce7d4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,8 @@ hbase-*/test
 *.ipr
 patchprocess/
 dependency-reduced-pom.xml
+link_report/
+linklint-*.zip
+linklint/
+.checkstyle
+**/.checkstyle



hbase git commit: HBASE-19637 Add .checkstyle to gitignore

2017-12-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1 64d2ebf9f -> c3bf558b6


HBASE-19637 Add .checkstyle to gitignore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c3bf558b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c3bf558b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c3bf558b

Branch: refs/heads/branch-1
Commit: c3bf558b69684f699aa95500f270d706f4994271
Parents: 64d2ebf
Author: zhangduo 
Authored: Wed Dec 27 11:19:02 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 11:21:40 2017 +0800

--
 .gitignore | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c3bf558b/.gitignore
--
diff --git a/.gitignore b/.gitignore
index b9c6fb2..0fce7d4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,8 @@ hbase-*/test
 *.ipr
 patchprocess/
 dependency-reduced-pom.xml
+link_report/
+linklint-*.zip
+linklint/
+.checkstyle
+**/.checkstyle



hbase git commit: HBASE-19637 Add .checkstyle to gitignore

2017-12-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 0e85a880f -> 3317b8738


HBASE-19637 Add .checkstyle to gitignore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3317b873
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3317b873
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3317b873

Branch: refs/heads/master
Commit: 3317b8738d9028b935fc0cc0e645372d1027a5a7
Parents: 0e85a88
Author: zhangduo 
Authored: Wed Dec 27 11:19:02 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 11:24:35 2017 +0800

--
 .gitignore | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3317b873/.gitignore
--
diff --git a/.gitignore b/.gitignore
index 405edc0..0fce7d4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,3 +18,5 @@ dependency-reduced-pom.xml
 link_report/
 linklint-*.zip
 linklint/
+.checkstyle
+**/.checkstyle



hbase git commit: HBASE-19637 Add .checkstyle to gitignore

2017-12-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 9c9beb79e -> fbe633f14


HBASE-19637 Add .checkstyle to gitignore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fbe633f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fbe633f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fbe633f1

Branch: refs/heads/branch-2
Commit: fbe633f14a41637bba8dbfb80129b5c5996a6e95
Parents: 9c9beb7
Author: zhangduo 
Authored: Wed Dec 27 11:19:02 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 11:24:58 2017 +0800

--
 .gitignore | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fbe633f1/.gitignore
--
diff --git a/.gitignore b/.gitignore
index b9c6fb2..0fce7d4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,8 @@ hbase-*/test
 *.ipr
 patchprocess/
 dependency-reduced-pom.xml
+link_report/
+linklint-*.zip
+linklint/
+.checkstyle
+**/.checkstyle



hbase git commit: HBASE-19615 CompositeImmutableSegment ArrayList Instead of LinkedList

2017-12-26 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master 0e7fff582 -> 0e85a880f


HBASE-19615 CompositeImmutableSegment ArrayList Instead of LinkedList

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0e85a880
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0e85a880
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0e85a880

Branch: refs/heads/master
Commit: 0e85a880fb174b7fa221e24bc1f6ea32a98acc62
Parents: 0e7fff5
Author: BELUGA BEHR 
Authored: Wed Dec 27 09:59:38 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Wed Dec 27 10:04:53 2017 +0800

--
 .../hadoop/hbase/regionserver/CompositeImmutableSegment.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0e85a880/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
index 5e8a8b3..bf9ff13 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.SortedSet;
 
@@ -56,7 +55,7 @@ public class CompositeImmutableSegment extends 
ImmutableSegment {
   @VisibleForTesting
   @Override
   public List getAllSegments() {
-return new LinkedList<>(segments);
+return new ArrayList<>(segments);
   }
 
   @Override



hbase git commit: HBASE-19615 CompositeImmutableSegment ArrayList Instead of LinkedList

2017-12-26 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 f0781d49e -> 9c9beb79e


HBASE-19615 CompositeImmutableSegment ArrayList Instead of LinkedList

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9c9beb79
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9c9beb79
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9c9beb79

Branch: refs/heads/branch-2
Commit: 9c9beb79ef9060dd162c44257e41bfe77063fdd7
Parents: f0781d4
Author: BELUGA BEHR 
Authored: Wed Dec 27 09:59:38 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Wed Dec 27 10:00:51 2017 +0800

--
 .../hadoop/hbase/regionserver/CompositeImmutableSegment.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9c9beb79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
index 5e8a8b3..bf9ff13 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.SortedSet;
 
@@ -56,7 +55,7 @@ public class CompositeImmutableSegment extends 
ImmutableSegment {
   @VisibleForTesting
   @Override
   public List getAllSegments() {
-return new LinkedList<>(segments);
+return new ArrayList<>(segments);
   }
 
   @Override



hbase git commit: HBASE-19624 TestIOFencing hangs

2017-12-26 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master 5a561e088 -> 0e7fff582


HBASE-19624 TestIOFencing hangs


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0e7fff58
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0e7fff58
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0e7fff58

Branch: refs/heads/master
Commit: 0e7fff58292122512a4aa444a53c71608e9a5bce
Parents: 5a561e0
Author: Chia-Ping Tsai 
Authored: Wed Dec 27 09:33:53 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Wed Dec 27 09:33:53 2017 +0800

--
 .../java/org/apache/hadoop/hbase/regionserver/CompactSplit.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0e7fff58/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
index e143511..28fc1a3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
@@ -397,6 +397,7 @@ public class CompactSplit implements CompactionRequester, 
PropagatingConfigurati
 }
   } catch (InterruptedException ie) {
 LOG.warn("Interrupted waiting for " + name + " to finish...");
+t.shutdownNow();
   }
 }
   }



[07/17] hbase git commit: HBASE-19524 Master side changes for moving peer modification from zk watcher to procedure

2017-12-26 Thread zhangduo
HBASE-19524 Master side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/427bce7c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/427bce7c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/427bce7c

Branch: refs/heads/HBASE-19397
Commit: 427bce7c14b0c33d5df9696b13a9f3f247517c4d
Parents: 5f202cc
Author: zhangduo 
Authored: Mon Dec 18 15:22:36 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../procedure2/RemoteProcedureDispatcher.java   |   3 +-
 .../src/main/protobuf/MasterProcedure.proto |  21 +++-
 .../src/main/protobuf/RegionServerStatus.proto  |   3 +-
 .../src/main/protobuf/Replication.proto |   5 +
 .../replication/ReplicationPeersZKImpl.java |   4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java | 100 ---
 .../hadoop/hbase/master/MasterRpcServices.java  |   4 +-
 .../hadoop/hbase/master/MasterServices.java |  26 +++--
 .../assignment/RegionTransitionProcedure.java   |  11 +-
 .../master/procedure/MasterProcedureEnv.java|   5 +
 .../master/procedure/ProcedurePrepareLatch.java |   2 +-
 .../master/replication/AddPeerProcedure.java|  97 ++
 .../replication/DisablePeerProcedure.java   |  70 +
 .../master/replication/EnablePeerProcedure.java |  69 +
 .../master/replication/ModifyPeerProcedure.java |  97 +++---
 .../master/replication/RefreshPeerCallable.java |  67 -
 .../replication/RefreshPeerProcedure.java   |  28 --
 .../master/replication/RemovePeerProcedure.java |  69 +
 .../master/replication/ReplicationManager.java  |  76 +++---
 .../replication/UpdatePeerConfigProcedure.java  |  92 +
 .../hbase/regionserver/HRegionServer.java   |   6 +-
 .../regionserver/RefreshPeerCallable.java   |  70 +
 .../hbase/master/MockNoopMasterServices.java|  23 +++--
 .../replication/DummyModifyPeerProcedure.java   |  13 ++-
 24 files changed, 736 insertions(+), 225 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/427bce7c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index e9a6906..1235b33 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -247,9 +247,8 @@ public abstract class RemoteProcedureDispatcher

[09/17] hbase git commit: HBASE-19564 Procedure id is missing in the response of peer related operations

2017-12-26 Thread zhangduo
HBASE-19564 Procedure id is missing in the response of peer related operations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d21d4378
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d21d4378
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d21d4378

Branch: refs/heads/HBASE-19397
Commit: d21d4378cccdb28af6b2f5c9e27dc05c619d65f7
Parents: 7e27d59
Author: zhangduo 
Authored: Wed Dec 20 20:57:37 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../hadoop/hbase/master/MasterRpcServices.java  | 24 ++--
 .../master/replication/ModifyPeerProcedure.java |  4 +---
 2 files changed, 13 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d21d4378/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 700b363..9f71bab 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1886,10 +1886,10 @@ public class MasterRpcServices extends RSRpcServices
   public AddReplicationPeerResponse addReplicationPeer(RpcController 
controller,
   AddReplicationPeerRequest request) throws ServiceException {
 try {
-  master.addReplicationPeer(request.getPeerId(),
-ReplicationPeerConfigUtil.convert(request.getPeerConfig()), 
request.getPeerState()
-.getState().equals(ReplicationState.State.ENABLED));
-  return AddReplicationPeerResponse.newBuilder().build();
+  long procId = master.addReplicationPeer(request.getPeerId(),
+ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
+
request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
+  return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1899,8 +1899,8 @@ public class MasterRpcServices extends RSRpcServices
   public RemoveReplicationPeerResponse removeReplicationPeer(RpcController 
controller,
   RemoveReplicationPeerRequest request) throws ServiceException {
 try {
-  master.removeReplicationPeer(request.getPeerId());
-  return RemoveReplicationPeerResponse.newBuilder().build();
+  long procId = master.removeReplicationPeer(request.getPeerId());
+  return 
RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1910,8 +1910,8 @@ public class MasterRpcServices extends RSRpcServices
   public EnableReplicationPeerResponse enableReplicationPeer(RpcController 
controller,
   EnableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.enableReplicationPeer(request.getPeerId());
-  return EnableReplicationPeerResponse.newBuilder().build();
+  long procId = master.enableReplicationPeer(request.getPeerId());
+  return 
EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1921,8 +1921,8 @@ public class MasterRpcServices extends RSRpcServices
   public DisableReplicationPeerResponse disableReplicationPeer(RpcController 
controller,
   DisableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.disableReplicationPeer(request.getPeerId());
-  return DisableReplicationPeerResponse.newBuilder().build();
+  long procId = master.disableReplicationPeer(request.getPeerId());
+  return 
DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1948,9 +1948,9 @@ public class MasterRpcServices extends RSRpcServices
   public UpdateReplicationPeerConfigResponse 
updateReplicationPeerConfig(RpcController controller,
   UpdateReplicationPeerConfigRequest request) throws ServiceException {
 try {
-  master.updateReplicationPeerConfig(request.getPeerId(),
+  long procId = master.updateReplicationPeerConfig(request.getPeerId(),
 ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
-  return UpdateReplicationPeerConfigResponse.newBuilder().build();
+  return 
UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
 } catch 

[17/17] hbase git commit: HBASE-19592 Add UTs to test retry on update zk failure

2017-12-26 Thread zhangduo
HBASE-19592 Add UTs to test retry on update zk failure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/350f5d12
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/350f5d12
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/350f5d12

Branch: refs/heads/HBASE-19397
Commit: 350f5d126072c1c52251fbd4542b42f905a9561e
Parents: 621ab2c
Author: zhangduo 
Authored: Tue Dec 26 20:39:00 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:43 2017 +0800

--
 .../replication/ReplicationPeerManager.java |   5 +-
 .../TestReplicationProcedureRetry.java  | 200 +++
 2 files changed, 202 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/350f5d12/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index b78cbce..f4ccce8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Used to add/remove a replication peer.
  */
 @InterfaceAudience.Private
-public final class ReplicationPeerManager {
+public class ReplicationPeerManager {
 
   private final ReplicationPeerStorage peerStorage;
 
@@ -61,8 +61,7 @@ public final class ReplicationPeerManager {
 
   private final ConcurrentMap peers;
 
-  private ReplicationPeerManager(ReplicationPeerStorage peerStorage,
-  ReplicationQueueStorage queueStorage,
+  ReplicationPeerManager(ReplicationPeerStorage peerStorage, 
ReplicationQueueStorage queueStorage,
   ConcurrentMap peers) {
 this.peerStorage = peerStorage;
 this.queueStorage = queueStorage;

http://git-wip-us.apache.org/repos/asf/hbase/blob/350f5d12/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
new file mode 100644
index 000..ab35b46
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.invocation.InvocationOnMock;
+
+/**
+ * 

[02/17] hbase git commit: HBASE-19496 (addendum) don't store the duplicate cp names in ServerMetrics

2017-12-26 Thread zhangduo
HBASE-19496 (addendum) don't store the duplicate cp names in ServerMetrics


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5a561e08
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5a561e08
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5a561e08

Branch: refs/heads/HBASE-19397
Commit: 5a561e0882313fae5376e8d6b77f26d9f5efe7c6
Parents: 80c7e4e
Author: Chia-Ping Tsai 
Authored: Tue Dec 26 20:53:02 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Wed Dec 27 07:05:16 2017 +0800

--
 .../java/org/apache/hadoop/hbase/ServerLoad.java   |  3 ++-
 .../org/apache/hadoop/hbase/ServerMetrics.java |  5 +++--
 .../apache/hadoop/hbase/ServerMetricsBuilder.java  | 17 ++---
 .../coprocessor/TestCoprocessorTableEndpoint.java  |  2 +-
 4 files changed, 16 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5a561e08/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 2a56e57..7509a85 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -23,6 +23,7 @@ package org.apache.hadoop.hbase;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
 import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
@@ -403,7 +404,7 @@ public class ServerLoad implements ServerMetrics {
   }
 
   @Override
-  public List getCoprocessorNames() {
+  public Set getCoprocessorNames() {
 return metrics.getCoprocessorNames();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a561e08/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
index 1ef3126..f33e978 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
 import edu.umd.cs.findbugs.annotations.Nullable;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -73,9 +74,9 @@ public interface ServerMetrics {
 
   /**
* Return the RegionServer-level and Region-level coprocessors
-   * @return string list of loaded RegionServer-level and Region-level 
coprocessors
+   * @return string set of loaded RegionServer-level and Region-level 
coprocessors
*/
-  List getCoprocessorNames();
+  Set getCoprocessorNames();
 
   /**
* @return the timestamp (server side) of generating this metrics

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a561e08/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
index e501c43..2d71f80 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
@@ -18,10 +18,13 @@
 package org.apache.hadoop.hbase;
 
 import edu.umd.cs.findbugs.annotations.Nullable;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
+import java.util.TreeSet;
 import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
@@ -73,7 +76,7 @@ public final class ServerMetricsBuilder {
 .build();
   }
 
-  public static List toCoprocessor(List 
names) {
+  public static List toCoprocessor(Collection 
names) {
 return names.stream()
 .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build())
 .collect(Collectors.toList());
@@ -116,7 +119,7 @@ public final class ServerMetricsBuilder {
   @Nullable
   private ReplicationLoadSink sink = null;
   private final Map regionStatus = new 

[13/17] hbase git commit: HBASE-19579 Add peer lock test for shell command list_locks

2017-12-26 Thread zhangduo
HBASE-19579 Add peer lock test for shell command list_locks

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e889a959
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e889a959
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e889a959

Branch: refs/heads/HBASE-19397
Commit: e889a959b809a8fcbb1e5c44926618b58b6bd992
Parents: f4366ea
Author: Guanghao Zhang 
Authored: Sat Dec 23 21:04:27 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../src/main/protobuf/LockService.proto  |  1 +
 .../src/test/ruby/shell/list_locks_test.rb   | 19 +++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e889a959/hbase-protocol-shaded/src/main/protobuf/LockService.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto 
b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index b8d180c..0675070 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -77,6 +77,7 @@ enum LockedResourceType {
   NAMESPACE = 2;
   TABLE = 3;
   REGION = 4;
+  PEER = 5;
 }
 
 message LockedResource {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e889a959/hbase-shell/src/test/ruby/shell/list_locks_test.rb
--
diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb 
b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
index f465a6b..ef1c0ce 100644
--- a/hbase-shell/src/test/ruby/shell/list_locks_test.rb
+++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
@@ -67,6 +67,25 @@ module Hbase
 proc_id)
 end
 
+define_test 'list peer locks' do
+  lock = create_exclusive_lock(0)
+  peer_id = '1'
+
+  @scheduler.waitPeerExclusiveLock(lock, peer_id)
+  output = capture_stdout { @list_locks.command }
+  @scheduler.wakePeerExclusiveLock(lock, peer_id)
+
+  assert_equal(
+"PEER(1)\n" \
+"Lock type: EXCLUSIVE, procedure: {" \
+  
"\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \
+  "\"procId\"=>\"0\", \"submittedTime\"=>\"0\", 
\"state\"=>\"RUNNABLE\", " \
+  "\"lastUpdate\"=>\"0\", " \
+  "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", 
\"description\"=>\"description\"}]" \
+"}\n\n",
+output)
+end
+
 define_test 'list server locks' do
   lock = create_exclusive_lock(0)
 



[12/17] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2017-12-26 Thread zhangduo
HBASE-19543 Abstract a replication storage interface to extract the zk specific 
code


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ca9765fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ca9765fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ca9765fc

Branch: refs/heads/HBASE-19397
Commit: ca9765fcb75ee035e0d35377c8dc9a08ac8170cf
Parents: 3885f32
Author: zhangduo 
Authored: Fri Dec 22 14:37:28 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../hadoop/hbase/util/CollectionUtils.java  |   3 +
 hbase-replication/pom.xml   |  12 +
 .../replication/ReplicationPeerStorage.java |  74 
 .../replication/ReplicationQueueStorage.java| 164 +++
 .../replication/ReplicationStateZKBase.java |   1 -
 .../replication/ReplicationStorageFactory.java  |  49 +++
 .../replication/ZKReplicationPeerStorage.java   | 164 +++
 .../replication/ZKReplicationQueueStorage.java  | 425 +++
 .../replication/ZKReplicationStorageBase.java   |  75 
 .../TestZKReplicationPeerStorage.java   | 171 
 .../TestZKReplicationQueueStorage.java  | 171 
 .../org/apache/hadoop/hbase/master/HMaster.java |  36 +-
 .../hadoop/hbase/master/MasterServices.java |   6 +-
 .../master/procedure/MasterProcedureEnv.java|  24 +-
 .../master/replication/AddPeerProcedure.java|   6 +-
 .../replication/DisablePeerProcedure.java   |   7 +-
 .../master/replication/EnablePeerProcedure.java |   6 +-
 .../master/replication/ModifyPeerProcedure.java |  41 +-
 .../master/replication/RemovePeerProcedure.java |   6 +-
 .../master/replication/ReplicationManager.java  | 199 -
 .../replication/ReplicationPeerManager.java | 331 +++
 .../replication/UpdatePeerConfigProcedure.java  |   7 +-
 .../replication/TestReplicationAdmin.java   |  62 ++-
 .../hbase/master/MockNoopMasterServices.java|  10 +-
 .../hbase/master/TestMasterNoCluster.java   |   4 +-
 .../TestReplicationDisableInactivePeer.java |   6 +-
 26 files changed, 1749 insertions(+), 311 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ca9765fc/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
index 875b124..8bbb6f1 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
@@ -107,6 +107,9 @@ public class CollectionUtils {
 return list.get(list.size() - 1);
   }
 
+  public static  List nullToEmpty(List list) {
+return list != null ? list : Collections.emptyList();
+  }
   /**
* In HBASE-16648 we found that ConcurrentHashMap.get is much faster than 
computeIfAbsent if the
* value already exists. Notice that the implementation does not guarantee 
that the supplier will

http://git-wip-us.apache.org/repos/asf/hbase/blob/ca9765fc/hbase-replication/pom.xml
--
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
index ab22199..4e3cea0 100644
--- a/hbase-replication/pom.xml
+++ b/hbase-replication/pom.xml
@@ -121,6 +121,18 @@
   org.apache.hbase
   hbase-zookeeper
 
+
+  org.apache.hbase
+  hbase-common
+  test-jar
+  test
+
+
+  org.apache.hbase
+  hbase-zookeeper
+  test-jar
+  test
+
 
 
   org.apache.commons

http://git-wip-us.apache.org/repos/asf/hbase/blob/ca9765fc/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
new file mode 100644
index 000..e00cd0d
--- /dev/null
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain 

[11/17] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2017-12-26 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/ca9765fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
deleted file mode 100644
index b6f8784..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.replication;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.replication.BaseReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Manages and performs all replication admin operations.
- * 
- * Used to add/remove a replication peer.
- */
-@InterfaceAudience.Private
-public class ReplicationManager {
-  private final ReplicationQueuesClient replicationQueuesClient;
-  private final ReplicationPeers replicationPeers;
-
-  public ReplicationManager(Configuration conf, ZKWatcher zkw, Abortable 
abortable)
-  throws IOException {
-try {
-  this.replicationQueuesClient = ReplicationFactory
-  .getReplicationQueuesClient(new 
ReplicationQueuesClientArguments(conf, abortable, zkw));
-  this.replicationQueuesClient.init();
-  this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf,
-this.replicationQueuesClient, abortable);
-  this.replicationPeers.init();
-} catch (Exception e) {
-  throw new IOException("Failed to construct ReplicationManager", e);
-}
-  }
-
-  public void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig, boolean enabled)
-  throws ReplicationException {
-checkPeerConfig(peerConfig);
-replicationPeers.registerPeer(peerId, peerConfig, enabled);
-replicationPeers.peerConnected(peerId);
-  }
-
-  public void removeReplicationPeer(String peerId) throws ReplicationException 
{
-replicationPeers.peerDisconnected(peerId);
-replicationPeers.unregisterPeer(peerId);
-  }
-
-  public void enableReplicationPeer(String peerId) throws ReplicationException 
{
-this.replicationPeers.enablePeer(peerId);
-  }
-
-  public void disableReplicationPeer(String peerId) throws 
ReplicationException {
-this.replicationPeers.disablePeer(peerId);
-  }
-
-  public ReplicationPeerConfig getPeerConfig(String peerId)
-  throws ReplicationException, ReplicationPeerNotFoundException {
-ReplicationPeerConfig peerConfig = 
replicationPeers.getReplicationPeerConfig(peerId);
-if (peerConfig == null) {
-  throw new ReplicationPeerNotFoundException(peerId);
-}
-return peerConfig;
-  }
-
-  public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig)
-  throws ReplicationException, IOException {
-checkPeerConfig(peerConfig);
-this.replicationPeers.updatePeerConfig(peerId, peerConfig);
-  }
-
-  public List listReplicationPeers(Pattern pattern)
-  throws ReplicationException {
-List peers = new ArrayList<>();
-List peerIds = replicationPeers.getAllPeerIds();
-for (String peerId : 

[05/17] hbase git commit: HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface

2017-12-26 Thread zhangduo
HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7defbdc2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7defbdc2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7defbdc2

Branch: refs/heads/HBASE-19397
Commit: 7defbdc23a727d3541e774b11a21ffcd8632655e
Parents: e889a95
Author: Guanghao Zhang 
Authored: Tue Dec 26 11:39:34 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../replication/VerifyReplication.java  |   5 -
 .../hbase/replication/ReplicationPeer.java  |  42 ++--
 .../hbase/replication/ReplicationPeerImpl.java  | 170 ++
 .../replication/ReplicationPeerZKImpl.java  | 233 ---
 .../hbase/replication/ReplicationPeers.java |   4 +-
 .../replication/ReplicationPeersZKImpl.java |  23 +-
 .../replication/TestReplicationStateBasic.java  |   7 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  29 +--
 8 files changed, 217 insertions(+), 296 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7defbdc2/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 01df2bd..da231e6 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -330,7 +329,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   private static Pair 
getPeerQuorumConfig(
   final Configuration conf, String peerId) throws IOException {
 ZKWatcher localZKW = null;
-ReplicationPeerZKImpl peer = null;
 try {
   localZKW = new ZKWatcher(conf, "VerifyReplication",
   new Abortable() {
@@ -351,9 +349,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   throw new IOException(
   "An error occurred while trying to connect to the remove peer 
cluster", e);
 } finally {
-  if (peer != null) {
-peer.close();
-  }
   if (localZKW != null) {
 localZKW.close();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7defbdc2/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index b66d76d..4846018 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
 
-
 /**
  * ReplicationPeer manages enabled / disabled state for the peer.
  */
@@ -49,65 +48,52 @@ public interface ReplicationPeer {
   String getId();
 
   /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig();
-
-  /**
-   * Get the peer config object. if loadFromBackingStore is true, it will load 
from backing store
-   * directly and update its load peer config. otherwise, just return the 
local cached peer config.
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig(boolean loadFromBackingStore)
-  throws ReplicationException;
-
-  /**
* Returns the state of the peer by reading local cache.
* @return the enabled state
*/
   PeerState getPeerState();
 
   /**
-   * Returns the state of peer, if loadFromBackingStore is true, it will load 
from backing store
-   * directly and 

[03/17] hbase git commit: HBASE-19630 Add peer cluster key check when add new replication peer

2017-12-26 Thread zhangduo
HBASE-19630 Add peer cluster key check when add new replication peer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/621ab2cc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/621ab2cc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/621ab2cc

Branch: refs/heads/HBASE-19397
Commit: 621ab2cc3dffbb7086d73d22d050d78a1de959e5
Parents: 7defbdc
Author: Guanghao Zhang 
Authored: Tue Dec 26 21:10:00 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../replication/ReplicationPeerManager.java | 54 
 .../replication/TestReplicationAdmin.java   | 22 
 2 files changed, 54 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/621ab2cc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 84abfeb..b78cbce 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master.replication;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -42,6 +43,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -216,36 +218,36 @@ public final class ReplicationPeerManager {
 return desc != null ? Optional.of(desc.getPeerConfig()) : Optional.empty();
   }
 
-  /**
-   * If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
-   * Then allow config exclude namespaces or exclude table-cfs which can't be 
replicated to peer
-   * cluster.
-   * 
-   * If replicate_all flag is false, it means all user tables can't be 
replicated to peer cluster.
-   * Then allow to config namespaces or table-cfs which will be replicated to 
peer cluster.
-   */
-  private static void checkPeerConfig(ReplicationPeerConfig peerConfig)
-  throws DoNotRetryIOException {
+  private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws 
DoNotRetryIOException {
+checkClusterKey(peerConfig.getClusterKey());
+
 if (peerConfig.replicateAllUserTables()) {
-  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty()) ||
-(peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
-throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly " +
-  "when you want replicate all cluster");
+  // If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
+  // Then allow config exclude namespaces or exclude table-cfs which can't 
be replicated to peer
+  // cluster.
+  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty())
+  || (peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
+throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly "
++ "when you want replicate all cluster");
   }
   
checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(),
 peerConfig.getExcludeTableCFsMap());
 } else {
-  if ((peerConfig.getExcludeNamespaces() != null &&
-!peerConfig.getExcludeNamespaces().isEmpty()) ||
-(peerConfig.getExcludeTableCFsMap() != null &&
-  !peerConfig.getExcludeTableCFsMap().isEmpty())) {
+  // If replicate_all flag is false, it means all user tables can't be 
replicated to peer
+  // cluster. Then allow to config namespaces or table-cfs which will be 
replicated to peer
+  // cluster.
+  if ((peerConfig.getExcludeNamespaces() != null
+  && !peerConfig.getExcludeNamespaces().isEmpty())
+  || (peerConfig.getExcludeTableCFsMap() != null
+  && !peerConfig.getExcludeTableCFsMap().isEmpty())) {
 throw new DoNotRetryIOException(
-"Need clean 

[14/17] hbase git commit: HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer Procedure classes

2017-12-26 Thread zhangduo
HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer 
Procedure classes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2a138510
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2a138510
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2a138510

Branch: refs/heads/HBASE-19397
Commit: 2a1385109af6b99192125252f200f675cfa87a67
Parents: 3ac9109
Author: zhangduo 
Authored: Thu Dec 21 21:59:46 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../hadoop/hbase/master/replication/AddPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/DisablePeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/EnablePeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/ModifyPeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/RefreshPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/RemovePeerProcedure.java   | 6 +++---
 .../hbase/master/replication/UpdatePeerConfigProcedure.java| 6 +++---
 7 files changed, 21 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2a138510/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
index c3862d8..066c3e7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -28,6 +26,8 @@ import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AddPeerStateData;
 
@@ -37,7 +37,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.A
 @InterfaceAudience.Private
 public class AddPeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(AddPeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(AddPeerProcedure.class);
 
   private ReplicationPeerConfig peerConfig;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2a138510/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
index 0b32db9..9a28de6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
@@ -19,11 +19,11 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The procedure for disabling a replication peer.
@@ -31,7 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DisablePeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(DisablePeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(DisablePeerProcedure.class);
 
   public DisablePeerProcedure() {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2a138510/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java
--

[16/17] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2017-12-26 Thread zhangduo
HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f4366eab
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f4366eab
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f4366eab

Branch: refs/heads/HBASE-19397
Commit: f4366eab71ff9030d7fa9a171129fb67b9bac203
Parents: ca9765f
Author: zhangduo 
Authored: Mon Dec 25 18:49:56 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../hbase/replication/ReplicationFactory.java   |  19 +-
 .../replication/ReplicationPeersZKImpl.java |  24 +-
 .../replication/ReplicationQueueStorage.java|  26 +-
 .../replication/ReplicationQueuesClient.java|  93 -
 .../ReplicationQueuesClientArguments.java   |  40 --
 .../ReplicationQueuesClientZKImpl.java  | 176 -
 .../replication/ZKReplicationQueueStorage.java  |  90 -
 .../replication/TestReplicationStateBasic.java  | 378 +++
 .../replication/TestReplicationStateZKImpl.java | 148 
 .../TestZKReplicationQueueStorage.java  |  74 
 .../cleaner/ReplicationZKNodeCleaner.java   |  71 ++--
 .../cleaner/ReplicationZKNodeCleanerChore.java  |   5 +-
 .../replication/ReplicationPeerManager.java |  31 +-
 .../master/ReplicationHFileCleaner.java | 108 ++
 .../master/ReplicationLogCleaner.java   |  35 +-
 .../regionserver/DumpReplicationQueues.java |  77 ++--
 .../hbase/util/hbck/ReplicationChecker.java |  14 +-
 .../client/TestAsyncReplicationAdminApi.java|  31 +-
 .../replication/TestReplicationAdmin.java   |   2 +
 .../hbase/master/cleaner/TestLogsCleaner.java   |  29 +-
 .../cleaner/TestReplicationHFileCleaner.java|  58 +--
 .../cleaner/TestReplicationZKNodeCleaner.java   |  12 +-
 .../replication/TestReplicationStateBasic.java  | 378 ---
 .../replication/TestReplicationStateZKImpl.java | 227 ---
 .../TestReplicationSourceManagerZkImpl.java |  84 ++---
 25 files changed, 907 insertions(+), 1323 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f4366eab/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 9f4ad18..6c1c213 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -37,20 +36,14 @@ public class ReplicationFactory {
   args);
   }
 
-  public static ReplicationQueuesClient
-  getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws 
Exception {
-return (ReplicationQueuesClient) ConstructorUtils
-.invokeConstructor(ReplicationQueuesClientZKImpl.class, args);
-  }
-
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- Abortable abortable) {
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  Abortable abortable) {
 return getReplicationPeers(zk, conf, null, abortable);
   }
 
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- final 
ReplicationQueuesClient queuesClient, Abortable abortable) {
-return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable);
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  ReplicationQueueStorage queueStorage, Abortable abortable) {
+return new ReplicationPeersZKImpl(zk, conf, queueStorage, abortable);
   }
 
   public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,

http://git-wip-us.apache.org/repos/asf/hbase/blob/f4366eab/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 8e2c5f4..f2e5647 100644
--- 

[04/17] hbase git commit: HBASE-19525 RS side changes for moving peer modification from zk watcher to procedure

2017-12-26 Thread zhangduo
HBASE-19525 RS side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3885f322
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3885f322
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3885f322

Branch: refs/heads/HBASE-19397
Commit: 3885f322733a02bdc6b93cf792cf18d4702bff13
Parents: 2a13851
Author: huzheng 
Authored: Wed Dec 20 10:47:18 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java |  11 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  13 +-
 .../hbase/replication/ReplicationListener.java  |  14 --
 .../hbase/replication/ReplicationPeer.java  |  28 ++-
 .../replication/ReplicationPeerZKImpl.java  | 180 ---
 .../replication/ReplicationPeersZKImpl.java |  19 +-
 .../replication/ReplicationTrackerZKImpl.java   |  73 +-
 .../regionserver/ReplicationSourceService.java  |   9 +-
 .../handler/RSProcedureHandler.java |   3 +
 .../replication/BaseReplicationEndpoint.java|   2 +-
 .../regionserver/PeerProcedureHandler.java  |  38 
 .../regionserver/PeerProcedureHandlerImpl.java  |  81 +++
 .../regionserver/RefreshPeerCallable.java   |  39 +++-
 .../replication/regionserver/Replication.java   |  10 +
 .../regionserver/ReplicationSource.java |   9 +-
 .../regionserver/ReplicationSourceManager.java  |  37 ++-
 .../replication/TestReplicationAdmin.java   |   2 +-
 .../TestReplicationAdminUsingProcedure.java | 226 +++
 .../replication/DummyModifyPeerProcedure.java   |  48 
 .../TestDummyModifyPeerProcedure.java   |  80 ---
 .../TestReplicationTrackerZKImpl.java   |  51 -
 .../TestReplicationSourceManager.java   |  32 ++-
 22 files changed, 533 insertions(+), 472 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3885f322/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 267dc7a..d5285dc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.InvalidProtocolBufferException;
@@ -199,7 +201,7 @@ public final class ProtobufUtil {
* byte array that is bytes.length plus {@link 
ProtobufMagic#PB_MAGIC}.length.
*/
   public static byte [] prependPBMagic(final byte [] bytes) {
-return Bytes.add(ProtobufMagic.PB_MAGIC, bytes);
+return Bytes.add(PB_MAGIC, bytes);
   }
 
   /**
@@ -224,10 +226,11 @@ public final class ProtobufUtil {
* @param bytes bytes to check
* @throws DeserializationException if we are missing the pb magic prefix
*/
-  public static void expectPBMagicPrefix(final byte [] bytes) throws 
DeserializationException {
+  public static void expectPBMagicPrefix(final byte[] bytes) throws 
DeserializationException {
 if (!isPBMagicPrefix(bytes)) {
-  throw new DeserializationException("Missing pb magic " +
-  Bytes.toString(ProtobufMagic.PB_MAGIC) + " prefix");
+  String bytesPrefix = bytes == null ? "null" : 
Bytes.toStringBinary(bytes, 0, PB_MAGIC.length);
+  throw new DeserializationException(
+  "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " 
+ bytesPrefix);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3885f322/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 17b1141..8954d04 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.shaded.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -282,7 +284,7 @@ 

[08/17] hbase git commit: HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure

2017-12-26 Thread zhangduo
HBASE-19536 Client side changes for moving peer modification from zk watcher to 
procedure

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7e27d59d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7e27d59d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7e27d59d

Branch: refs/heads/HBASE-19397
Commit: 7e27d59d01bdd79415745055c67332c71f534bbd
Parents: 427bce7
Author: Guanghao Zhang 
Authored: Tue Dec 19 15:50:57 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  87 ++-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 149 ++-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  82 +-
 3 files changed, 238 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7e27d59d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index ff2722e..cf8e198 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2463,7 +2463,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @throws IOException if a remote or network exception occurs
*/
   default void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig)
@@ -2474,7 +2474,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @param enabled peer state, true if ENABLED and false if DISABLED
* @throws IOException if a remote or network exception occurs
*/
@@ -2482,6 +2482,37 @@ public interface Admin extends Abortable, Closeable {
   throws IOException;
 
   /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  default Future addReplicationPeerAsync(String peerId, 
ReplicationPeerConfig peerConfig)
+  throws IOException {
+return addReplicationPeerAsync(peerId, peerConfig, true);
+  }
+
+  /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @param enabled peer state, true if ENABLED and false if DISABLED
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig 
peerConfig,
+  boolean enabled) throws IOException;
+
+  /**
* Remove a peer and stop the replication.
* @param peerId a short name that identifies the peer
* @throws IOException if a remote or network exception occurs
@@ -2489,6 +2520,18 @@ public interface Admin extends Abortable, Closeable {
   void removeReplicationPeer(String peerId) throws IOException;
 
   /**
+   * Remove a replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow 

[06/17] hbase git commit: HBASE-19216 Implement a general framework to execute remote procedure on RS

2017-12-26 Thread zhangduo
HBASE-19216 Implement a general framework to execute remote procedure on RS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5f202cc7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5f202cc7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5f202cc7

Branch: refs/heads/HBASE-19397
Commit: 5f202cc75d5b5e3b642a3f9b52865d971f57f4e9
Parents: 5a561e0
Author: zhangduo 
Authored: Fri Dec 15 21:06:44 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../hbase/procedure2/LockedResourceType.java|   4 +-
 .../procedure2/RemoteProcedureDispatcher.java   |  23 +-
 .../src/main/protobuf/Admin.proto   |   9 +-
 .../src/main/protobuf/MasterProcedure.proto |  30 +++
 .../src/main/protobuf/RegionServerStatus.proto  |  15 ++
 .../apache/hadoop/hbase/executor/EventType.java |  26 ++-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  33 ++-
 .../hadoop/hbase/master/MasterRpcServices.java  |  13 ++
 .../assignment/RegionTransitionProcedure.java   |  18 +-
 .../procedure/MasterProcedureScheduler.java | 224 +--
 .../procedure/PeerProcedureInterface.java   |  34 +++
 .../master/procedure/RSProcedureDispatcher.java |  90 
 .../master/replication/ModifyPeerProcedure.java | 127 +++
 .../master/replication/RefreshPeerCallable.java |  67 ++
 .../replication/RefreshPeerProcedure.java   | 197 
 .../hbase/procedure2/RSProcedureCallable.java   |  43 
 .../hbase/regionserver/HRegionServer.java   |  69 +-
 .../hbase/regionserver/RSRpcServices.java   |  56 +++--
 .../handler/RSProcedureHandler.java |  51 +
 .../assignment/TestAssignmentManager.java   |  20 +-
 .../replication/DummyModifyPeerProcedure.java   |  41 
 .../TestDummyModifyPeerProcedure.java   |  80 +++
 .../security/access/TestAccessController.java   |   6 +-
 24 files changed, 1109 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5f202cc7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
index c5fe62b..dc9b5d4 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,5 +22,5 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public enum LockedResourceType {
-  SERVER, NAMESPACE, TABLE, REGION
+  SERVER, NAMESPACE, TABLE, REGION, PEER
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5f202cc7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 54f2b08..e9a6906 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -226,13 +226,30 @@ public abstract class RemoteProcedureDispatcher {
+/**
+ * For building the remote operation.
+ */
 RemoteOperation remoteCallBuild(TEnv env, TRemote remote);
-void remoteCallCompleted(TEnv env, TRemote remote, RemoteOperation 
response);
+
+/**
+ * Called when the executeProcedure call is failed.
+ */
 void remoteCallFailed(TEnv env, TRemote remote, IOException exception);
+
+/**
+ * Called when RS tells the remote procedure is succeeded through the
+ * {@code reportProcedureDone} method.
+ */
+void remoteOperationCompleted(TEnv env);
+
+/**
+ * Called when RS tells the remote procedure is failed through the {@code 
reportProcedureDone}
+ * method.
+ * @param error the error message
+ */
+void remoteOperationFailed(TEnv 

[10/17] hbase git commit: HBASE-19520 Add UTs for the new lock type PEER

2017-12-26 Thread zhangduo
HBASE-19520 Add UTs for the new lock type PEER

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3ac91094
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3ac91094
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3ac91094

Branch: refs/heads/HBASE-19397
Commit: 3ac91094bb37902103f10ee57f8ab6ca12b911cd
Parents: d21d437
Author: Guanghao Zhang 
Authored: Wed Dec 20 16:43:38 2017 +0800
Committer: zhangduo 
Committed: Wed Dec 27 09:40:34 2017 +0800

--
 .../procedure/MasterProcedureScheduler.java |   9 +-
 .../procedure/TestMasterProcedureScheduler.java |  65 -
 ...TestMasterProcedureSchedulerConcurrency.java | 135 +++
 3 files changed, 201 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3ac91094/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index dd85f5c..5f4665c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -389,6 +389,13 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 while (tableIter.hasNext()) {
   count += tableIter.next().size();
 }
+
+// Peer queues
+final AvlTreeIterator peerIter = new AvlTreeIterator<>(peerMap);
+while (peerIter.hasNext()) {
+  count += peerIter.next().size();
+}
+
 return count;
   }
 
@@ -1041,7 +1048,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
* @see #wakePeerExclusiveLock(Procedure, String)
* @param procedure the procedure trying to acquire the lock
* @param peerId peer to lock
-   * @return true if the procedure has to wait for the per to be available
+   * @return true if the procedure has to wait for the peer to be available
*/
   public boolean waitPeerExclusiveLock(Procedure procedure, String peerId) {
 schedLock();

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ac91094/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 0291165..fd77e1f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -905,6 +905,27 @@ public class TestMasterProcedureScheduler {
 }
   }
 
+  public static class TestPeerProcedure extends TestProcedure implements 
PeerProcedureInterface {
+private final String peerId;
+private final PeerOperationType opType;
+
+public TestPeerProcedure(long procId, String peerId, PeerOperationType 
opType) {
+  super(procId);
+  this.peerId = peerId;
+  this.opType = opType;
+}
+
+@Override
+public String getPeerId() {
+  return peerId;
+}
+
+@Override
+public PeerOperationType getPeerOperationType() {
+  return opType;
+}
+  }
+
   private static LockProcedure createLockProcedure(LockType lockType, long 
procId) throws Exception {
 LockProcedure procedure = new LockProcedure();
 
@@ -927,22 +948,19 @@ public class TestMasterProcedureScheduler {
 return createLockProcedure(LockType.SHARED, procId);
   }
 
-  private static void assertLockResource(LockedResource resource,
-  LockedResourceType resourceType, String resourceName)
-  {
+  private static void assertLockResource(LockedResource resource, 
LockedResourceType resourceType,
+  String resourceName) {
 assertEquals(resourceType, resource.getResourceType());
 assertEquals(resourceName, resource.getResourceName());
   }
 
-  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure)
-  {
+  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure) {
 assertEquals(LockType.EXCLUSIVE, resource.getLockType());
 assertEquals(procedure, resource.getExclusiveLockOwnerProcedure());
 assertEquals(0, resource.getSharedLockCount());
   }
 
-  private static void 

[15/17] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2017-12-26 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/f4366eab/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 93b8649..1faaae3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -48,17 +49,18 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
+import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.AtomicLongMap;
 
 /**
@@ -303,57 +305,53 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
   }
 
   public String dumpQueues(ClusterConnection connection, ZKWatcher zkw, 
Set peerIds,
-   boolean hdfs) throws Exception {
-ReplicationQueuesClient queuesClient;
+  boolean hdfs) throws Exception {
+ReplicationQueueStorage queueStorage;
 ReplicationPeers replicationPeers;
 ReplicationQueues replicationQueues;
 ReplicationTracker replicationTracker;
-ReplicationQueuesClientArguments replicationArgs =
-new ReplicationQueuesClientArguments(getConf(), new 
WarnOnlyAbortable(), zkw);
+ReplicationQueuesArguments replicationArgs =
+new ReplicationQueuesArguments(getConf(), new WarnOnlyAbortable(), 
zkw);
 StringBuilder sb = new StringBuilder();
 
-queuesClient = 
ReplicationFactory.getReplicationQueuesClient(replicationArgs);
-queuesClient.init();
+queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, 
getConf());
 replicationQueues = 
ReplicationFactory.getReplicationQueues(replicationArgs);
-replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), 
queuesClient, connection);
+replicationPeers =
+ReplicationFactory.getReplicationPeers(zkw, getConf(), queueStorage, 
connection);
 replicationTracker = ReplicationFactory.getReplicationTracker(zkw, 
replicationPeers, getConf(),
   new WarnOnlyAbortable(), new WarnOnlyStoppable());
-List liveRegionServers = 
replicationTracker.getListOfRegionServers();
+Set liveRegionServers = new 
HashSet<>(replicationTracker.getListOfRegionServers());
 
 // Loops each peer on each RS and dumps the queues
-try {
-  List regionservers = queuesClient.getListOfReplicators();
-  if (regionservers == null || regionservers.isEmpty()) {
-return sb.toString();
+List regionservers = queueStorage.getListOfReplicators();
+if (regionservers == null || regionservers.isEmpty()) {
+  return sb.toString();
+}
+for (ServerName regionserver : regionservers) {
+  List queueIds = queueStorage.getAllQueues(regionserver);
+  replicationQueues.init(regionserver.getServerName());
+  if (!liveRegionServers.contains(regionserver.getServerName())) {
+deadRegionServers.add(regionserver.getServerName());
   }
-  for (String regionserver : regionservers) {
-List queueIds = queuesClient.getAllQueues(regionserver);
-replicationQueues.init(regionserver);
-if (!liveRegionServers.contains(regionserver)) {
-  deadRegionServers.add(regionserver);
-}
-for (String queueId : queueIds) {
-  ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
-  List wals = queuesClient.getLogsInQueue(regionserver, 
queueId);
-  if 

[01/17] hbase git commit: HBASE-19621 (addendum) Revisit the methods in ReplicationPeerConfigBuilder [Forced Update!]

2017-12-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19397 aa3bc1d9b -> 350f5d126 (forced update)


HBASE-19621 (addendum) Revisit the methods in ReplicationPeerConfigBuilder


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80c7e4ea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80c7e4ea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80c7e4ea

Branch: refs/heads/HBASE-19397
Commit: 80c7e4ea7994186ab1876bb53bd7239d2559a481
Parents: 7ce1943
Author: Guanghao Zhang 
Authored: Tue Dec 26 20:56:56 2017 +0800
Committer: Guanghao Zhang 
Committed: Tue Dec 26 21:55:49 2017 +0800

--
 .../hadoop/hbase/replication/ReplicationPeersZKImpl.java | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/80c7e4ea/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 2f6d52c..289d2aa 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -363,9 +363,14 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 }
 // Update existingConfig's peer config and peer data with the new values, 
but don't touch config
 // or data that weren't explicitly changed
-ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(newConfig);
-builder.putAllConfiguration(existingConfig.getConfiguration());
-builder.putAllPeerData(existingConfig.getPeerData());
+ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(existingConfig);
+builder.putAllConfiguration(newConfig.getConfiguration())
+.putAllPeerData(newConfig.getPeerData())
+.setReplicateAllUserTables(newConfig.replicateAllUserTables())
+
.setNamespaces(newConfig.getNamespaces()).setTableCFsMap(newConfig.getTableCFsMap())
+.setExcludeNamespaces(newConfig.getExcludeNamespaces())
+.setExcludeTableCFsMap(newConfig.getExcludeTableCFsMap())
+.setBandwidth(newConfig.getBandwidth());
 
 try {
   ZKUtil.setData(this.zookeeper, getPeerNode(id),



hbase git commit: HBASE-19624 TestIOFencing hangs

2017-12-26 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b6781c9bc -> f0781d49e


HBASE-19624 TestIOFencing hangs


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f0781d49
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f0781d49
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f0781d49

Branch: refs/heads/branch-2
Commit: f0781d49e4c1d1996f980c214c2217a71c3f056c
Parents: b6781c9
Author: Chia-Ping Tsai 
Authored: Wed Dec 27 09:33:53 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Wed Dec 27 09:38:17 2017 +0800

--
 .../java/org/apache/hadoop/hbase/regionserver/CompactSplit.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f0781d49/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
index e143511..28fc1a3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
@@ -397,6 +397,7 @@ public class CompactSplit implements CompactionRequester, 
PropagatingConfigurati
 }
   } catch (InterruptedException ie) {
 LOG.warn("Interrupted waiting for " + name + " to finish...");
+t.shutdownNow();
   }
 }
   }



hbase git commit: HBASE-19496 (addendum) don't store the duplicate cp names in ServerMetrics

2017-12-26 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master 80c7e4ea7 -> 5a561e088


HBASE-19496 (addendum) don't store the duplicate cp names in ServerMetrics


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5a561e08
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5a561e08
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5a561e08

Branch: refs/heads/master
Commit: 5a561e0882313fae5376e8d6b77f26d9f5efe7c6
Parents: 80c7e4e
Author: Chia-Ping Tsai 
Authored: Tue Dec 26 20:53:02 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Wed Dec 27 07:05:16 2017 +0800

--
 .../java/org/apache/hadoop/hbase/ServerLoad.java   |  3 ++-
 .../org/apache/hadoop/hbase/ServerMetrics.java |  5 +++--
 .../apache/hadoop/hbase/ServerMetricsBuilder.java  | 17 ++---
 .../coprocessor/TestCoprocessorTableEndpoint.java  |  2 +-
 4 files changed, 16 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5a561e08/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 2a56e57..7509a85 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -23,6 +23,7 @@ package org.apache.hadoop.hbase;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
 import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
@@ -403,7 +404,7 @@ public class ServerLoad implements ServerMetrics {
   }
 
   @Override
-  public List getCoprocessorNames() {
+  public Set getCoprocessorNames() {
 return metrics.getCoprocessorNames();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a561e08/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
index 1ef3126..f33e978 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
 import edu.umd.cs.findbugs.annotations.Nullable;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -73,9 +74,9 @@ public interface ServerMetrics {
 
   /**
* Return the RegionServer-level and Region-level coprocessors
-   * @return string list of loaded RegionServer-level and Region-level 
coprocessors
+   * @return string set of loaded RegionServer-level and Region-level 
coprocessors
*/
-  List getCoprocessorNames();
+  Set getCoprocessorNames();
 
   /**
* @return the timestamp (server side) of generating this metrics

http://git-wip-us.apache.org/repos/asf/hbase/blob/5a561e08/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
index e501c43..2d71f80 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
@@ -18,10 +18,13 @@
 package org.apache.hadoop.hbase;
 
 import edu.umd.cs.findbugs.annotations.Nullable;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
+import java.util.TreeSet;
 import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
@@ -73,7 +76,7 @@ public final class ServerMetricsBuilder {
 .build();
   }
 
-  public static List toCoprocessor(List 
names) {
+  public static List toCoprocessor(Collection 
names) {
 return names.stream()
 .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build())
 .collect(Collectors.toList());
@@ -116,7 +119,7 @@ public final class ServerMetricsBuilder {
   @Nullable
   private ReplicationLoadSink sink = null;
   private 

hbase git commit: HBASE-19496 (addendum) don't store the duplicate cp names in ServerMetrics

2017-12-26 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 920005a2d -> b6781c9bc


HBASE-19496 (addendum) don't store the duplicate cp names in ServerMetrics


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6781c9b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6781c9b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6781c9b

Branch: refs/heads/branch-2
Commit: b6781c9bcc7a17ff2c60d9ae5495ce65db704849
Parents: 920005a
Author: Chia-Ping Tsai 
Authored: Tue Dec 26 20:53:02 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Wed Dec 27 07:08:31 2017 +0800

--
 .../java/org/apache/hadoop/hbase/ServerLoad.java   |  3 ++-
 .../org/apache/hadoop/hbase/ServerMetrics.java |  5 +++--
 .../apache/hadoop/hbase/ServerMetricsBuilder.java  | 17 ++---
 .../coprocessor/TestCoprocessorTableEndpoint.java  |  2 +-
 4 files changed, 16 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b6781c9b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 2a56e57..7509a85 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -23,6 +23,7 @@ package org.apache.hadoop.hbase;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
 import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
@@ -403,7 +404,7 @@ public class ServerLoad implements ServerMetrics {
   }
 
   @Override
-  public List getCoprocessorNames() {
+  public Set getCoprocessorNames() {
 return metrics.getCoprocessorNames();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6781c9b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
index 1ef3126..f33e978 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
 import edu.umd.cs.findbugs.annotations.Nullable;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -73,9 +74,9 @@ public interface ServerMetrics {
 
   /**
* Return the RegionServer-level and Region-level coprocessors
-   * @return string list of loaded RegionServer-level and Region-level 
coprocessors
+   * @return string set of loaded RegionServer-level and Region-level 
coprocessors
*/
-  List getCoprocessorNames();
+  Set getCoprocessorNames();
 
   /**
* @return the timestamp (server side) of generating this metrics

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6781c9b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
index e501c43..2d71f80 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
@@ -18,10 +18,13 @@
 package org.apache.hadoop.hbase;
 
 import edu.umd.cs.findbugs.annotations.Nullable;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
+import java.util.TreeSet;
 import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
@@ -73,7 +76,7 @@ public final class ServerMetricsBuilder {
 .build();
   }
 
-  public static List toCoprocessor(List 
names) {
+  public static List toCoprocessor(Collection 
names) {
 return names.stream()
 .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build())
 .collect(Collectors.toList());
@@ -116,7 +119,7 @@ public final class ServerMetricsBuilder {
   @Nullable
   private ReplicationLoadSink sink = null;
   

[48/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html 
b/apidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
index 6bff1a3..366cbf9 100644
--- 
a/apidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
+++ 
b/apidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var methods = 
{"i0":6,"i1":18,"i2":18,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -117,7 +117,7 @@ public interface 
-All MethodsInstance MethodsAbstract Methods
+All MethodsInstance MethodsAbstract MethodsDefault Methods
 
 Modifier and Type
 Method and Description
@@ -127,47 +127,57 @@ public interface build()
 
 
-ReplicationPeerConfigBuilder
-setBandwidth(longbandwidth)
+default ReplicationPeerConfigBuilder
+putAllConfiguration(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringconfiguration)
 
 
-ReplicationPeerConfigBuilder
-setClusterKey(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclusterKey)
-Set the clusterKey which is the concatenation of the slave 
cluster's:
- 
hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
-
+default ReplicationPeerConfigBuilder
+putAllPeerData(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Mapbyte[],byte[]peerData)
 
 
 ReplicationPeerConfigBuilder
-setConfiguration(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringconfiguration)
+putConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringvalue)
 
 
 ReplicationPeerConfigBuilder
-setExcludeNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringnamespaces)
+putPeerData(byte[]key,
+   byte[]value)
 
 
 ReplicationPeerConfigBuilder
-setExcludeTableCFsMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringtableCFsMap)
+setBandwidth(longbandwidth)
 
 
 ReplicationPeerConfigBuilder
-setNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringnamespaces)
+setClusterKey(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclusterKey)
+Set the clusterKey which is the concatenation of the slave 
cluster's:
+ 
hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
+
 
 
 ReplicationPeerConfigBuilder
-setPeerData(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Mapbyte[],byte[]peerData)
+setExcludeNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or 

[30/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/client/Put.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Put.html 
b/devapidocs/org/apache/hadoop/hbase/client/Put.html
index 437ad94..699a55b 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Put.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Put.html
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class Put
+public class Put
 extends Mutation
 implements HeapSize, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableRow
 Used to perform Put operations for a single row.
@@ -251,7 +251,7 @@ implements 
 
 Put
-add(Cellkv)
+add(Cellcell)
 Add the specified KeyValue to this Put operation.
 
 
@@ -442,7 +442,7 @@ implements 
 
 Put
-publicPut(byte[]row)
+publicPut(byte[]row)
 Create a Put operation for the specified row.
 
 Parameters:
@@ -456,7 +456,7 @@ implements 
 
 Put
-publicPut(byte[]row,
+publicPut(byte[]row,
longts)
 Create a Put operation for the specified row, using a given 
timestamp.
 
@@ -472,7 +472,7 @@ implements 
 
 Put
-publicPut(byte[]rowArray,
+publicPut(byte[]rowArray,
introwOffset,
introwLength)
 We make a copy of the passed in row key to keep local.
@@ -490,7 +490,7 @@ implements 
 
 Put
-publicPut(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferrow,
+publicPut(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferrow,
longts)
 
 Parameters:
@@ -505,7 +505,7 @@ implements 
 
 Put
-publicPut(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferrow)
+publicPut(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferrow)
 
 Parameters:
 row - row key; we make a copy of what we are passed to keep 
local.
@@ -518,7 +518,7 @@ implements 
 
 Put
-publicPut(byte[]rowArray,
+publicPut(byte[]rowArray,
introwOffset,
introwLength,
longts)
@@ -538,7 +538,7 @@ implements 
 
 Put
-publicPut(byte[]row,
+publicPut(byte[]row,
booleanrowIsImmutable)
 Create a Put operation for an immutable row key.
 
@@ -556,7 +556,7 @@ implements 
 
 Put
-publicPut(byte[]row,
+publicPut(byte[]row,
longts,
booleanrowIsImmutable)
 Create a Put operation for an immutable row key, using a 
given timestamp.
@@ -576,7 +576,7 @@ implements 
 
 Put
-publicPut(PutputToCopy)
+publicPut(PutputToCopy)
 Copy constructor.  Creates a Put operation cloned from the 
specified Put.
 
 Parameters:
@@ -590,7 +590,7 @@ implements 
 
 Put
-publicPut(byte[]row,
+publicPut(byte[]row,
longts,
http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellfamilyMap)
 Construct the Put with user defined data. NOTED:
@@ -618,7 +618,7 @@ implements 
 
 addColumn
-publicPutaddColumn(byte[]family,
+publicPutaddColumn(byte[]family,
  byte[]qualifier,
  byte[]value)
 Add the specified column and value to this Put 
operation.
@@ -639,7 +639,7 @@ implements 
 addImmutable
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
-publicPutaddImmutable(byte[]family,
+publicPutaddImmutable(byte[]family,
 byte[]qualifier,
 byte[]value)
 Deprecated.As of release 2.0.0, this will be removed in HBase 
3.0.0.
@@ -655,7 +655,7 @@ public
 
 addColumn
-publicPutaddColumn(byte[]family,
+publicPutaddColumn(byte[]family,
  byte[]qualifier,
  longts,
  byte[]value)
@@ -679,7 +679,7 @@ public
 addImmutable
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
-publicPutaddImmutable(byte[]family,
+publicPutaddImmutable(byte[]family,
 byte[]qualifier,
 longts,
 byte[]value)
@@ -696,7 +696,7 @@ public
 
 addColumn
-publicPutaddColumn(byte[]family,
+publicPutaddColumn(byte[]family,
  http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferqualifier,
 

[37/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 7185dea..2ecc07f 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-3471
+3466
 0
 0
-19198
+19162
 
 Files
 
@@ -1717,7 +1717,7 @@
 org/apache/hadoop/hbase/client/Mutation.java
 0
 0
-23
+24
 
 org/apache/hadoop/hbase/client/NoOpRetryableCallerInterceptor.java
 0
@@ -9129,16 +9129,6 @@
 0
 1
 
-org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java
-0
-0
-2
-
-org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java
-0
-0
-7
-
 org/apache/hadoop/hbase/replication/TestMasterReplication.java
 0
 0
@@ -9204,25 +9194,15 @@
 0
 2
 
-org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java
-0
-0
-25
-
 org/apache/hadoop/hbase/replication/TestReplicationSyncUpTool.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java
 0
 0
 1
-
-org/apache/hadoop/hbase/replication/TestReplicationTableBase.java
-0
-0
-2
 
 org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
 0
@@ -11124,746 +11104,741 @@
 0
 6
 
-org/apache/hadoop/hbase/util/IterableUtils.java
-0
-0
-1
-
 org/apache/hadoop/hbase/util/JRubyFormat.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/JSONBean.java
 0
 0
 12
-
+
 org/apache/hadoop/hbase/util/JSONMetricUtil.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/JVM.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/util/JVMClusterUtil.java
 0
 0
 12
-
+
 org/apache/hadoop/hbase/util/JenkinsHash.java
 0
 0
 39
-
+
 org/apache/hadoop/hbase/util/JsonMapper.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/JvmPauseMonitor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/util/KeyLocker.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/LoadTestTool.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/util/MD5Hash.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/util/Methods.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/MockServer.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/ModifyRegionUtils.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/util/MultiHConnection.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/MultiThreadedAction.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/util/MultiThreadedReader.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
 0
 0
 19
-
+
 org/apache/hadoop/hbase/util/MultiThreadedWriter.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/MunkresAssignment.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/MurmurHash3.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/Order.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/util/OrderedBytes.java
 0
 0
 44
-
+
 org/apache/hadoop/hbase/util/Pair.java
 0
 0
 16
-
+
 org/apache/hadoop/hbase/util/PairOfSameType.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/util/PoolMap.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/util/PositionedByteRange.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/PrettyPrinter.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/ProtoUtil.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/util/RedundantKVGenerator.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/ReflectionUtils.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/RegionMover.java
 0
 0
 35
-
+
 org/apache/hadoop/hbase/util/RegionSplitCalculator.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/util/RegionSplitter.java
 0
 0
 13
-
+
 org/apache/hadoop/hbase/util/RetryCounter.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/RollingStatCalculator.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/util/RowBloomContext.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/ServerCommandLine.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/ShutdownHookManager.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/SimpleByteRange.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/util/SimplePositionedByteRange.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/util/Sleeper.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/util/StealJobQueue.java
 0
 0
 2
-
+

hbase-site git commit: INFRA-10751 Empty commit

2017-12-26 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b618ac405 -> 4cddebd1e


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/4cddebd1
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/4cddebd1
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/4cddebd1

Branch: refs/heads/asf-site
Commit: 4cddebd1e045c54e80a152218da915afba1ff0ed
Parents: b618ac4
Author: jenkins 
Authored: Tue Dec 26 15:21:03 2017 +
Committer: jenkins 
Committed: Tue Dec 26 15:21:03 2017 +

--

--




[25/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
index 0bd71b3..896f633 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MasterRpcServices
+public class MasterRpcServices
 extends RSRpcServices
 implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService.BlockingInterface
 Implements the master RPC services.
@@ -779,7 +779,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -788,7 +788,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 master
-private finalHMaster master
+private finalHMaster master
 
 
 
@@ -805,7 +805,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 MasterRpcServices
-publicMasterRpcServices(HMasterm)
+publicMasterRpcServices(HMasterm)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -827,7 +827,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createConfigurationSubset
-privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuildercreateConfigurationSubset()
+privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuildercreateConfigurationSubset()
 
 Returns:
 Subset of configuration to pass initializing regionservers: e.g.
@@ -841,7 +841,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 addConfig
-privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuilderaddConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builderresp,
+privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuilderaddConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builderresp,

  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
 
 
@@ -851,7 +851,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createRpcServer
-protectedRpcServerInterfacecreateRpcServer(Serverserver,
+protectedRpcServerInterfacecreateRpcServer(Serverserver,
  
org.apache.hadoop.conf.Configurationconf,
  RpcSchedulerFactoryrpcSchedulerFactory,
  http://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddressbindAddress,
@@ -871,7 +871,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createPriority
-protectedPriorityFunctioncreatePriority()
+protectedPriorityFunctioncreatePriority()
 
 Overrides:
 createPriorityin
 classRSRpcServices
@@ -884,7 +884,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 switchBalancer
-booleanswitchBalancer(booleanb,
+booleanswitchBalancer(booleanb,
MasterRpcServices.BalanceSwitchModemode)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Assigns balancer switch according to BalanceSwitchMode
@@ -905,7 +905,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 synchronousBalanceSwitch
-booleansynchronousBalanceSwitch(booleanb)
+booleansynchronousBalanceSwitch(booleanb)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -919,7 +919,7 @@ implements 

[32/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
index c0bc51d..158e274 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
@@ -533,6 +533,8 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Add the specified KeyValue to this operation.
 
+Overrides:
+addin
 classMutation
 Parameters:
 cell - individual Cell
 Returns:
@@ -548,7 +550,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 addColumn
-publicIncrementaddColumn(byte[]family,
+publicIncrementaddColumn(byte[]family,
byte[]qualifier,
longamount)
 Increment the column from the specific family with the 
specified qualifier
@@ -571,7 +573,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 getTimeRange
-publicTimeRangegetTimeRange()
+publicTimeRangegetTimeRange()
 Gets the TimeRange used for this increment.
 
 Returns:
@@ -585,7 +587,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setTimeRange
-publicIncrementsetTimeRange(longminStamp,
+publicIncrementsetTimeRange(longminStamp,
   longmaxStamp)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Sets the TimeRange to be used on the Get for this increment.
@@ -615,7 +617,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setTimestamp
-publicIncrementsetTimestamp(longtimestamp)
+publicIncrementsetTimestamp(longtimestamp)
 Description copied from 
class:Mutation
 Set the timestamp of the delete.
 
@@ -630,7 +632,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setReturnResults
-publicIncrementsetReturnResults(booleanreturnResults)
+publicIncrementsetReturnResults(booleanreturnResults)
 
 Overrides:
 setReturnResultsin
 classMutation
@@ -647,7 +649,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 isReturnResults
-publicbooleanisReturnResults()
+publicbooleanisReturnResults()
 
 Overrides:
 isReturnResultsin
 classMutation
@@ -662,7 +664,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 numFamilies
-publicintnumFamilies()
+publicintnumFamilies()
 Method for retrieving the number of families to increment 
from
 
 Overrides:
@@ -678,7 +680,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 hasFamilies
-publicbooleanhasFamilies()
+publicbooleanhasFamilies()
 Method for checking if any families have been inserted into 
this Increment
 
 Returns:
@@ -692,7 +694,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 getFamilyMapOfLongs
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonggetFamilyMapOfLongs()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonggetFamilyMapOfLongs()
 Before 0.95, when you called Increment#getFamilyMap(), you 
got back
  a map of families to a list of Longs.  Now, Mutation.getFamilyCellMap()
 returns
  families by list of Cells.  This method has been added so you can have the
@@ -711,7 +713,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 Description copied from 
class:Operation
 Produces a string representation of this Operation. It 
defaults to a JSON
  representation, but falls back to a string representation of the
@@ -730,7 +732,7 @@ implements 

[40/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html 
b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
index 86fc15e..d02bcdf 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
@@ -38,2586 +38,2590 @@
 030import java.nio.ByteBuffer;
 031import 
java.nio.charset.StandardCharsets;
 032import java.security.SecureRandom;
-033import java.util.Arrays;
-034import java.util.Collection;
-035import java.util.Comparator;
-036import java.util.Iterator;
-037import java.util.List;
-038
-039import org.apache.hadoop.hbase.Cell;
-040import 
org.apache.hadoop.hbase.CellComparator;
-041import 
org.apache.hadoop.hbase.KeyValue;
-042import 
org.apache.hadoop.io.RawComparator;
-043import 
org.apache.hadoop.io.WritableComparator;
-044import 
org.apache.hadoop.io.WritableUtils;
-045import 
org.apache.yetus.audience.InterfaceAudience;
-046import org.slf4j.Logger;
-047import org.slf4j.LoggerFactory;
-048
-049import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-050import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-051
-052import com.google.protobuf.ByteString;
-053
-054import sun.misc.Unsafe;
-055
-056/**
-057 * Utility class that handles byte 
arrays, conversions to/from other types,
-058 * comparisons, hash code generation, 
manufacturing keys for HashMaps or
-059 * HashSets, and can be used as key in 
maps or trees.
-060 */
-061@SuppressWarnings("restriction")
-062@InterfaceAudience.Public
-063@edu.umd.cs.findbugs.annotations.SuppressWarnings(
-064
value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
-065justification="It has been like this 
forever")
-066public class Bytes implements 
ComparableBytes {
-067
-068  // Using the charset canonical name for 
String/byte[] conversions is much
-069  // more efficient due to use of cached 
encoders/decoders.
-070  private static final String UTF8_CSN = 
StandardCharsets.UTF_8.name();
-071
-072  //HConstants.EMPTY_BYTE_ARRAY should be 
updated if this changed
-073  private static final byte [] 
EMPTY_BYTE_ARRAY = new byte [0];
-074
-075  private static final Logger LOG = 
LoggerFactory.getLogger(Bytes.class);
-076
-077  /**
-078   * Size of boolean in bytes
-079   */
-080  public static final int SIZEOF_BOOLEAN 
= Byte.SIZE / Byte.SIZE;
-081
-082  /**
-083   * Size of byte in bytes
-084   */
-085  public static final int SIZEOF_BYTE = 
SIZEOF_BOOLEAN;
-086
-087  /**
-088   * Size of char in bytes
-089   */
-090  public static final int SIZEOF_CHAR = 
Character.SIZE / Byte.SIZE;
-091
-092  /**
-093   * Size of double in bytes
-094   */
-095  public static final int SIZEOF_DOUBLE = 
Double.SIZE / Byte.SIZE;
-096
-097  /**
-098   * Size of float in bytes
-099   */
-100  public static final int SIZEOF_FLOAT = 
Float.SIZE / Byte.SIZE;
-101
-102  /**
-103   * Size of int in bytes
-104   */
-105  public static final int SIZEOF_INT = 
Integer.SIZE / Byte.SIZE;
-106
-107  /**
-108   * Size of long in bytes
-109   */
-110  public static final int SIZEOF_LONG = 
Long.SIZE / Byte.SIZE;
-111
-112  /**
-113   * Size of short in bytes
-114   */
-115  public static final int SIZEOF_SHORT = 
Short.SIZE / Byte.SIZE;
-116
-117  /**
-118   * Mask to apply to a long to reveal 
the lower int only. Use like this:
-119   * int i = (int)(0xL ^ 
some_long_value);
-120   */
-121  public static final long 
MASK_FOR_LOWER_INT_IN_LONG = 0xL;
-122
-123  /**
-124   * Estimate of size cost to pay beyond 
payload in jvm for instance of byte [].
-125   * Estimate based on study of jhat and 
jprofiler numbers.
-126   */
-127  // JHat says BU is 56 bytes.
-128  // SizeOf which uses 
java.lang.instrument says 24 bytes. (3 longs?)
-129  public static final int 
ESTIMATED_HEAP_TAX = 16;
-130
-131  private static final boolean 
UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
-132
-133  /**
-134   * Returns length of the byte array, 
returning 0 if the array is null.
-135   * Useful for calculating sizes.
-136   * @param b byte array, which can be 
null
-137   * @return 0 if b is null, otherwise 
returns length
-138   */
-139  final public static int len(byte[] b) 
{
-140return b == null ? 0 : b.length;
-141  }
-142
-143  private byte[] bytes;
-144  private int offset;
-145  private int length;
-146
-147  /**
-148   * Create a zero-size sequence.
-149   */
-150  public Bytes() {
-151super();
-152  }
-153
-154  /**
-155   * Create a Bytes using the byte array 
as the initial value.
-156   * @param bytes This array becomes the 
backing storage for the object.
-157   */
-158  public Bytes(byte[] bytes) {
-159this(bytes, 

[39/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.html 
b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.html
index 86fc15e..d02bcdf 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.html
@@ -38,2586 +38,2590 @@
 030import java.nio.ByteBuffer;
 031import 
java.nio.charset.StandardCharsets;
 032import java.security.SecureRandom;
-033import java.util.Arrays;
-034import java.util.Collection;
-035import java.util.Comparator;
-036import java.util.Iterator;
-037import java.util.List;
-038
-039import org.apache.hadoop.hbase.Cell;
-040import 
org.apache.hadoop.hbase.CellComparator;
-041import 
org.apache.hadoop.hbase.KeyValue;
-042import 
org.apache.hadoop.io.RawComparator;
-043import 
org.apache.hadoop.io.WritableComparator;
-044import 
org.apache.hadoop.io.WritableUtils;
-045import 
org.apache.yetus.audience.InterfaceAudience;
-046import org.slf4j.Logger;
-047import org.slf4j.LoggerFactory;
-048
-049import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-050import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-051
-052import com.google.protobuf.ByteString;
-053
-054import sun.misc.Unsafe;
-055
-056/**
-057 * Utility class that handles byte 
arrays, conversions to/from other types,
-058 * comparisons, hash code generation, 
manufacturing keys for HashMaps or
-059 * HashSets, and can be used as key in 
maps or trees.
-060 */
-061@SuppressWarnings("restriction")
-062@InterfaceAudience.Public
-063@edu.umd.cs.findbugs.annotations.SuppressWarnings(
-064
value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
-065justification="It has been like this 
forever")
-066public class Bytes implements 
ComparableBytes {
-067
-068  // Using the charset canonical name for 
String/byte[] conversions is much
-069  // more efficient due to use of cached 
encoders/decoders.
-070  private static final String UTF8_CSN = 
StandardCharsets.UTF_8.name();
-071
-072  //HConstants.EMPTY_BYTE_ARRAY should be 
updated if this changed
-073  private static final byte [] 
EMPTY_BYTE_ARRAY = new byte [0];
-074
-075  private static final Logger LOG = 
LoggerFactory.getLogger(Bytes.class);
-076
-077  /**
-078   * Size of boolean in bytes
-079   */
-080  public static final int SIZEOF_BOOLEAN 
= Byte.SIZE / Byte.SIZE;
-081
-082  /**
-083   * Size of byte in bytes
-084   */
-085  public static final int SIZEOF_BYTE = 
SIZEOF_BOOLEAN;
-086
-087  /**
-088   * Size of char in bytes
-089   */
-090  public static final int SIZEOF_CHAR = 
Character.SIZE / Byte.SIZE;
-091
-092  /**
-093   * Size of double in bytes
-094   */
-095  public static final int SIZEOF_DOUBLE = 
Double.SIZE / Byte.SIZE;
-096
-097  /**
-098   * Size of float in bytes
-099   */
-100  public static final int SIZEOF_FLOAT = 
Float.SIZE / Byte.SIZE;
-101
-102  /**
-103   * Size of int in bytes
-104   */
-105  public static final int SIZEOF_INT = 
Integer.SIZE / Byte.SIZE;
-106
-107  /**
-108   * Size of long in bytes
-109   */
-110  public static final int SIZEOF_LONG = 
Long.SIZE / Byte.SIZE;
-111
-112  /**
-113   * Size of short in bytes
-114   */
-115  public static final int SIZEOF_SHORT = 
Short.SIZE / Byte.SIZE;
-116
-117  /**
-118   * Mask to apply to a long to reveal 
the lower int only. Use like this:
-119   * int i = (int)(0xL ^ 
some_long_value);
-120   */
-121  public static final long 
MASK_FOR_LOWER_INT_IN_LONG = 0xL;
-122
-123  /**
-124   * Estimate of size cost to pay beyond 
payload in jvm for instance of byte [].
-125   * Estimate based on study of jhat and 
jprofiler numbers.
-126   */
-127  // JHat says BU is 56 bytes.
-128  // SizeOf which uses 
java.lang.instrument says 24 bytes. (3 longs?)
-129  public static final int 
ESTIMATED_HEAP_TAX = 16;
-130
-131  private static final boolean 
UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
-132
-133  /**
-134   * Returns length of the byte array, 
returning 0 if the array is null.
-135   * Useful for calculating sizes.
-136   * @param b byte array, which can be 
null
-137   * @return 0 if b is null, otherwise 
returns length
-138   */
-139  final public static int len(byte[] b) 
{
-140return b == null ? 0 : b.length;
-141  }
-142
-143  private byte[] bytes;
-144  private int offset;
-145  private int length;
-146
-147  /**
-148   * Create a zero-size sequence.
-149   */
-150  public Bytes() {
-151super();
-152  }
-153
-154  /**
-155   * Create a Bytes using the byte array 
as the initial value.
-156   * @param bytes This array becomes the 
backing storage for the object.
-157   */
-158  public Bytes(byte[] bytes) {
-159this(bytes, 0, bytes.length);
-160  }
-161
-162  /**
-163   * Set the new Bytes to the contents of 
the passed
-164  

[47/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/org/apache/hadoop/hbase/util/Bytes.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/util/Bytes.html 
b/apidocs/org/apache/hadoop/hbase/util/Bytes.html
index 95054de..b5d7808 100644
--- a/apidocs/org/apache/hadoop/hbase/util/Bytes.html
+++ b/apidocs/org/apache/hadoop/hbase/util/Bytes.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class Bytes
+public class Bytes
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableBytes
 Utility class that handles byte arrays, conversions to/from 
other types,
@@ -1204,7 +1204,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_BOOLEAN
-public static finalint SIZEOF_BOOLEAN
+public static finalint SIZEOF_BOOLEAN
 Size of boolean in bytes
 
 See Also:
@@ -1218,7 +1218,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_BYTE
-public static finalint SIZEOF_BYTE
+public static finalint SIZEOF_BYTE
 Size of byte in bytes
 
 See Also:
@@ -1232,7 +1232,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_CHAR
-public static finalint SIZEOF_CHAR
+public static finalint SIZEOF_CHAR
 Size of char in bytes
 
 See Also:
@@ -1246,7 +1246,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_DOUBLE
-public static finalint SIZEOF_DOUBLE
+public static finalint SIZEOF_DOUBLE
 Size of double in bytes
 
 See Also:
@@ -1260,7 +1260,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_FLOAT
-public static finalint SIZEOF_FLOAT
+public static finalint SIZEOF_FLOAT
 Size of float in bytes
 
 See Also:
@@ -1274,7 +1274,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_INT
-public static finalint SIZEOF_INT
+public static finalint SIZEOF_INT
 Size of int in bytes
 
 See Also:
@@ -1288,7 +1288,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_LONG
-public static finalint SIZEOF_LONG
+public static finalint SIZEOF_LONG
 Size of long in bytes
 
 See Also:
@@ -1302,7 +1302,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_SHORT
-public static finalint SIZEOF_SHORT
+public static finalint SIZEOF_SHORT
 Size of short in bytes
 
 See Also:
@@ -1316,7 +1316,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 MASK_FOR_LOWER_INT_IN_LONG
-public static finallong MASK_FOR_LOWER_INT_IN_LONG
+public static finallong MASK_FOR_LOWER_INT_IN_LONG
 Mask to apply to a long to reveal the lower int only. Use 
like this:
  int i = (int)(0xL ^ some_long_value);
 
@@ -1331,7 +1331,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 ESTIMATED_HEAP_TAX
-public static finalint ESTIMATED_HEAP_TAX
+public static finalint ESTIMATED_HEAP_TAX
 Estimate of size cost to pay beyond payload in jvm for 
instance of byte [].
  Estimate based on study of jhat and jprofiler numbers.
 
@@ -1346,7 +1346,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 BYTES_COMPARATOR
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">Comparatorbyte[] BYTES_COMPARATOR
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">Comparatorbyte[] BYTES_COMPARATOR
 Pass this to TreeMaps where byte [] are keys.
 
 
@@ -1356,7 +1356,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 BYTES_RAWCOMPARATOR
-public static finalorg.apache.hadoop.io.RawComparatorbyte[] 
BYTES_RAWCOMPARATOR
+public static finalorg.apache.hadoop.io.RawComparatorbyte[] 
BYTES_RAWCOMPARATOR
 Use comparing byte arrays, byte-by-byte
 
 
@@ -1374,7 +1374,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Bytes
-publicBytes()
+publicBytes()
 Create a zero-size sequence.
 
 
@@ -1384,7 +1384,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Bytes
-publicBytes(byte[]bytes)
+publicBytes(byte[]bytes)
 Create a Bytes using the byte array as the initial 
value.
 
 Parameters:
@@ -1398,7 +1398,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Bytes
-publicBytes(Bytesibw)
+publicBytes(Bytesibw)
 Set the new Bytes to the contents of the passed
  ibw.
 
@@ -1413,7 +1413,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Bytes

[07/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 9d49b9a..c36fdce 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -165,3380 +165,3375 @@
 157import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
 158import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
 159import 
org.apache.hadoop.hbase.replication.ReplicationException;
-160import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-161import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-162import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-163import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-164import 
org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
-165import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-166import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-167import 
org.apache.hadoop.hbase.security.UserProvider;
-168import 
org.apache.hadoop.hbase.trace.TraceUtil;
-169import 
org.apache.hadoop.hbase.util.Addressing;
-170import 
org.apache.hadoop.hbase.util.Bytes;
-171import 
org.apache.hadoop.hbase.util.CompressionTest;
-172import 
org.apache.hadoop.hbase.util.EncryptionTest;
-173import 
org.apache.hadoop.hbase.util.FSUtils;
-174import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-175import 
org.apache.hadoop.hbase.util.HasThread;
-176import 
org.apache.hadoop.hbase.util.IdLock;
-177import 
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-178import 
org.apache.hadoop.hbase.util.Pair;
-179import 
org.apache.hadoop.hbase.util.Threads;
-180import 
org.apache.hadoop.hbase.util.VersionInfo;
-181import 
org.apache.hadoop.hbase.util.ZKDataMigrator;
-182import 
org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-183import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-184import 
org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-185import 
org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
-186import 
org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-187import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-188import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-189import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-190import 
org.apache.yetus.audience.InterfaceAudience;
-191import 
org.apache.zookeeper.KeeperException;
-192import org.eclipse.jetty.server.Server;
-193import 
org.eclipse.jetty.server.ServerConnector;
-194import 
org.eclipse.jetty.servlet.ServletHolder;
-195import 
org.eclipse.jetty.webapp.WebAppContext;
-196import org.slf4j.Logger;
-197import org.slf4j.LoggerFactory;
-198
-199import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-200import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-201import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-202import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-203import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-204import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-205import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-206import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-207import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-208import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-209
-210/**
-211 * HMaster is the "master server" for 
HBase. An HBase cluster has one active
-212 * master.  If many masters are started, 
all compete.  Whichever wins goes on to
-213 * run the cluster.  All others park 
themselves in their constructor until
-214 * master or cluster shutdown or until 
the active master loses its lease in
-215 * zookeeper.  Thereafter, all running 
master jostle to take over master role.
-216 *
-217 * pThe Master can be asked 
shutdown the cluster. See {@link #shutdown()}.  In
-218 * this case it will tell all 
regionservers to go down and then wait on them
-219 * all reporting in that they are down.  
This master will then shut itself down.
+160import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+161import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+162import 
org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
+163import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
+164import 
org.apache.hadoop.hbase.security.AccessDeniedException;
+165import 
org.apache.hadoop.hbase.security.UserProvider;
+166import 

[10/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.html
index 1f43c53..cc1277d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.html
@@ -34,501 +34,496 @@
 026import java.util.List;
 027import java.util.Map;
 028import java.util.Set;
-029import java.util.TreeMap;
-030import java.util.stream.Collectors;
-031
-032import 
org.apache.commons.lang3.StringUtils;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.CompoundConfiguration;
-035import 
org.apache.hadoop.hbase.HBaseConfiguration;
-036import 
org.apache.hadoop.hbase.TableName;
-037import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-038import 
org.apache.hadoop.hbase.replication.ReplicationException;
-039import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-040import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder;
-041import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-042import 
org.apache.hadoop.hbase.util.Bytes;
-043import 
org.apache.hadoop.hbase.util.Strings;
-044import 
org.apache.yetus.audience.InterfaceAudience;
-045import 
org.apache.yetus.audience.InterfaceStability;
-046import org.slf4j.Logger;
-047import org.slf4j.LoggerFactory;
-048
-049import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-050import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-051import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-052import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-053import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-054import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-055
-056/**
-057 * Helper for TableCFs Operations.
-058 */
-059@InterfaceAudience.Private
-060@InterfaceStability.Stable
-061public final class 
ReplicationPeerConfigUtil {
-062
-063  private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationPeerConfigUtil.class);
-064
-065  private ReplicationPeerConfigUtil() 
{}
-066
-067  public static String 
convertToString(SetString namespaces) {
-068if (namespaces == null) {
-069  return null;
-070}
-071return StringUtils.join(namespaces, 
';');
-072  }
-073
-074  /** convert map to TableCFs Object */
-075  public static 
ReplicationProtos.TableCF[] convert(
-076  MapTableName, ? extends 
CollectionString tableCfs) {
-077if (tableCfs == null) {
-078  return null;
-079}
-080ListReplicationProtos.TableCF 
tableCFList = new ArrayList(tableCfs.entrySet().size());
-081ReplicationProtos.TableCF.Builder 
tableCFBuilder =  ReplicationProtos.TableCF.newBuilder();
-082for (Map.EntryTableName, ? 
extends CollectionString entry : tableCfs.entrySet()) {
-083  tableCFBuilder.clear();
-084  
tableCFBuilder.setTableName(ProtobufUtil.toProtoTableName(entry.getKey()));
-085  CollectionString v = 
entry.getValue();
-086  if (v != null  
!v.isEmpty()) {
-087for (String value : 
entry.getValue()) {
-088  
tableCFBuilder.addFamilies(ByteString.copyFromUtf8(value));
-089}
-090  }
-091  
tableCFList.add(tableCFBuilder.build());
-092}
-093return tableCFList.toArray(new 
ReplicationProtos.TableCF[tableCFList.size()]);
-094  }
-095
-096  public static String 
convertToString(MapTableName, ? extends CollectionString 
tableCfs) {
-097if (tableCfs == null) {
-098  return null;
-099}
-100return convert(convert(tableCfs));
-101  }
-102
-103  /**
-104   *  Convert string to TableCFs 
Object.
-105   *  This is only for read TableCFs 
information from TableCF node.
-106   *  Input String Format: 
ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;ns3.table3.
-107   * */
-108  public static 
ReplicationProtos.TableCF[] convert(String tableCFsConfig) {
-109if (tableCFsConfig == null || 
tableCFsConfig.trim().length() == 0) {
-110  return null;
-111}
-112
-113ReplicationProtos.TableCF.Builder 
tableCFBuilder = ReplicationProtos.TableCF.newBuilder();
-114String[] tables = 
tableCFsConfig.split(";");
-115ListReplicationProtos.TableCF 
tableCFList = new ArrayList(tables.length);
-116
-117for (String tab : tables) {
-118  // 1 ignore empty table config
-119  tab = tab.trim();
-120  if (tab.length() == 0) {
-121continue;
-122  }
-123  // 2 split to "table" and 
"cf1,cf2"
-124  //   for each table: 
"table#cf1,cf2" or "table"
-125  String[] pair = 

[31/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Mutation.html 
b/devapidocs/org/apache/hadoop/hbase/client/Mutation.html
index a5e9ff7..55b12f7 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Mutation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Mutation.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":9,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":42,"i36":10,"i37":10,"i38":10,"i39":10,"i40":9,"i41":9,"i42":9,"i43":10};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":9,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":42,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9,"i42":9,"i43":9,"i44":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
 
 
 Summary:
-Nested|
+Nested|
 Field|
 Constr|
 Method
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public abstract class Mutation
+public abstract class Mutation
 extends OperationWithAttributes
 implements Row, CellScannable, HeapSize
 
@@ -137,6 +137,25 @@ implements 
 
 
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+private static class
+Mutation.CellWrapper
+
+
+
+
 
 
 
@@ -241,39 +260,43 @@ implements Method and Description
 
 
+(package private) Mutation
+add(Cellcell)
+
+
 CellScanner
 cellScanner()
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 cellToStringMap(Cellc)
 
-
+
 (package private) static byte[]
 checkRow(byte[]row)
 
-
+
 (package private) static byte[]
 checkRow(byte[]row,
 intoffset,
 intlength)
 
-
+
 (package private) static void
 checkRow(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in 
java.nio">ByteBufferrow)
 
-
+
 int
 compareTo(Rowd)
 
-
+
 (package private) KeyValue
 createPutKeyValue(byte[]family,
  byte[]qualifier,
  longts,
  byte[]value)
 
-
+
 (package private) KeyValue
 createPutKeyValue(byte[]family,
  byte[]qualifier,
@@ -283,7 +306,7 @@ implements Create a KeyValue with this objects row key and the Put 
identifier.
 
 
-
+
 (package private) KeyValue
 createPutKeyValue(byte[]family,
  http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferqualifier,
@@ -291,75 +314,75 @@ implements http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffervalue,
  Tag[]tags)
 
-
+
 protected long
 extraHeapSize()
 Subclasses should override this method to add the heap size 
of their own fields.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell
 get(byte[]family,
byte[]qualifier)
 Returns a list of all KeyValue objects with matching column 
family and qualifier.
 
 
-
+
 byte[]
 getACL()
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell
 getCellList(byte[]family)
 Creates an empty list if one doesn't exist for the given 
column family
  or else it returns the associated list of Cell objects.
 
 
-
+
 CellVisibility
 getCellVisibility()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
 title="class or interface in java.util">UUID
 getClusterIds()
 
-
+
 Durability
 getDurability()
 Get 

[41/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html 
b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
index 86fc15e..d02bcdf 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
@@ -38,2586 +38,2590 @@
 030import java.nio.ByteBuffer;
 031import 
java.nio.charset.StandardCharsets;
 032import java.security.SecureRandom;
-033import java.util.Arrays;
-034import java.util.Collection;
-035import java.util.Comparator;
-036import java.util.Iterator;
-037import java.util.List;
-038
-039import org.apache.hadoop.hbase.Cell;
-040import 
org.apache.hadoop.hbase.CellComparator;
-041import 
org.apache.hadoop.hbase.KeyValue;
-042import 
org.apache.hadoop.io.RawComparator;
-043import 
org.apache.hadoop.io.WritableComparator;
-044import 
org.apache.hadoop.io.WritableUtils;
-045import 
org.apache.yetus.audience.InterfaceAudience;
-046import org.slf4j.Logger;
-047import org.slf4j.LoggerFactory;
-048
-049import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-050import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-051
-052import com.google.protobuf.ByteString;
-053
-054import sun.misc.Unsafe;
-055
-056/**
-057 * Utility class that handles byte 
arrays, conversions to/from other types,
-058 * comparisons, hash code generation, 
manufacturing keys for HashMaps or
-059 * HashSets, and can be used as key in 
maps or trees.
-060 */
-061@SuppressWarnings("restriction")
-062@InterfaceAudience.Public
-063@edu.umd.cs.findbugs.annotations.SuppressWarnings(
-064
value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
-065justification="It has been like this 
forever")
-066public class Bytes implements 
ComparableBytes {
-067
-068  // Using the charset canonical name for 
String/byte[] conversions is much
-069  // more efficient due to use of cached 
encoders/decoders.
-070  private static final String UTF8_CSN = 
StandardCharsets.UTF_8.name();
-071
-072  //HConstants.EMPTY_BYTE_ARRAY should be 
updated if this changed
-073  private static final byte [] 
EMPTY_BYTE_ARRAY = new byte [0];
-074
-075  private static final Logger LOG = 
LoggerFactory.getLogger(Bytes.class);
-076
-077  /**
-078   * Size of boolean in bytes
-079   */
-080  public static final int SIZEOF_BOOLEAN 
= Byte.SIZE / Byte.SIZE;
-081
-082  /**
-083   * Size of byte in bytes
-084   */
-085  public static final int SIZEOF_BYTE = 
SIZEOF_BOOLEAN;
-086
-087  /**
-088   * Size of char in bytes
-089   */
-090  public static final int SIZEOF_CHAR = 
Character.SIZE / Byte.SIZE;
-091
-092  /**
-093   * Size of double in bytes
-094   */
-095  public static final int SIZEOF_DOUBLE = 
Double.SIZE / Byte.SIZE;
-096
-097  /**
-098   * Size of float in bytes
-099   */
-100  public static final int SIZEOF_FLOAT = 
Float.SIZE / Byte.SIZE;
-101
-102  /**
-103   * Size of int in bytes
-104   */
-105  public static final int SIZEOF_INT = 
Integer.SIZE / Byte.SIZE;
-106
-107  /**
-108   * Size of long in bytes
-109   */
-110  public static final int SIZEOF_LONG = 
Long.SIZE / Byte.SIZE;
-111
-112  /**
-113   * Size of short in bytes
-114   */
-115  public static final int SIZEOF_SHORT = 
Short.SIZE / Byte.SIZE;
-116
-117  /**
-118   * Mask to apply to a long to reveal 
the lower int only. Use like this:
-119   * int i = (int)(0xL ^ 
some_long_value);
-120   */
-121  public static final long 
MASK_FOR_LOWER_INT_IN_LONG = 0xL;
-122
-123  /**
-124   * Estimate of size cost to pay beyond 
payload in jvm for instance of byte [].
-125   * Estimate based on study of jhat and 
jprofiler numbers.
-126   */
-127  // JHat says BU is 56 bytes.
-128  // SizeOf which uses 
java.lang.instrument says 24 bytes. (3 longs?)
-129  public static final int 
ESTIMATED_HEAP_TAX = 16;
-130
-131  private static final boolean 
UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
-132
-133  /**
-134   * Returns length of the byte array, 
returning 0 if the array is null.
-135   * Useful for calculating sizes.
-136   * @param b byte array, which can be 
null
-137   * @return 0 if b is null, otherwise 
returns length
-138   */
-139  final public static int len(byte[] b) 
{
-140return b == null ? 0 : b.length;
-141  }
-142
-143  private byte[] bytes;
-144  private int offset;
-145  private int length;
-146
-147  /**
-148   * Create a zero-size sequence.
-149   */
-150  public Bytes() {
-151super();
-152  }
-153
-154  /**
-155   * Create a Bytes using the byte array 
as the initial value.
-156   * @param bytes This array becomes the 
backing storage for the object.
-157   */
-158  public Bytes(byte[] bytes) {
-159this(bytes, 

[43/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
index 38865a3..8b6f080 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
@@ -26,746 +26,954 @@
 018
 019package org.apache.hadoop.hbase.client;
 020
-021import java.io.IOException;
-022import java.nio.ByteBuffer;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.HashMap;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.NavigableMap;
-029import java.util.TreeMap;
-030import java.util.UUID;
-031import java.util.stream.Collectors;
-032import org.apache.hadoop.hbase.Cell;
-033import 
org.apache.hadoop.hbase.CellScannable;
-034import 
org.apache.hadoop.hbase.CellScanner;
-035import 
org.apache.hadoop.hbase.CellUtil;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.KeyValue;
-038import 
org.apache.hadoop.hbase.PrivateCellUtil;
-039import org.apache.hadoop.hbase.Tag;
-040import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-041import 
org.apache.hadoop.hbase.io.HeapSize;
-042import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-043import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-044import 
org.apache.hadoop.hbase.security.access.AccessControlConstants;
-045import 
org.apache.hadoop.hbase.security.access.AccessControlUtil;
-046import 
org.apache.hadoop.hbase.security.access.Permission;
-047import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-048import 
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
-049import 
org.apache.hadoop.hbase.util.Bytes;
-050import 
org.apache.hadoop.hbase.util.ClassSize;
-051import 
org.apache.yetus.audience.InterfaceAudience;
-052
-053import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-054import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
-055import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
-056import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataInput;
-057import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataOutput;
-058import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteStreams;
+021import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
+022
+023import java.io.IOException;
+024import java.nio.ByteBuffer;
+025import java.util.ArrayList;
+026import java.util.Arrays;
+027import java.util.HashMap;
+028import java.util.Iterator;
+029import java.util.List;
+030import java.util.Map;
+031import java.util.NavigableMap;
+032import java.util.Optional;
+033import java.util.TreeMap;
+034import java.util.UUID;
+035import java.util.stream.Collectors;
+036import 
org.apache.hadoop.hbase.ArrayBackedTag;
+037import org.apache.hadoop.hbase.Cell;
+038import 
org.apache.hadoop.hbase.CellScannable;
+039import 
org.apache.hadoop.hbase.CellScanner;
+040import 
org.apache.hadoop.hbase.CellUtil;
+041import 
org.apache.hadoop.hbase.ExtendedCell;
+042import 
org.apache.hadoop.hbase.HConstants;
+043import 
org.apache.hadoop.hbase.KeyValue;
+044import 
org.apache.hadoop.hbase.PrivateCellUtil;
+045import org.apache.hadoop.hbase.RawCell;
+046import org.apache.hadoop.hbase.Tag;
+047import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+048import 
org.apache.hadoop.hbase.io.HeapSize;
+049import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+050import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+051import 
org.apache.hadoop.hbase.security.access.AccessControlConstants;
+052import 
org.apache.hadoop.hbase.security.access.AccessControlUtil;
+053import 
org.apache.hadoop.hbase.security.access.Permission;
+054import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
+055import 
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
+056import 
org.apache.hadoop.hbase.util.Bytes;
+057import 
org.apache.hadoop.hbase.util.ClassSize;
+058import 
org.apache.yetus.audience.InterfaceAudience;
 059
-060@InterfaceAudience.Public
-061public abstract class Mutation extends 
OperationWithAttributes implements Row, CellScannable,
-062HeapSize {
-063  public static final long 
MUTATION_OVERHEAD = ClassSize.align(
-064  // This
-065  ClassSize.OBJECT +
-066  // row + 
OperationWithAttributes.attributes
-067  2 * ClassSize.REFERENCE +
-068  // Timestamp
-069  1 * Bytes.SIZEOF_LONG +
-070  // durability
-071  ClassSize.REFERENCE +
-072  // familyMap
-073  ClassSize.REFERENCE +
-074  // familyMap
-075  ClassSize.TREEMAP +
-076  // priority
-077  ClassSize.INTEGER
-078  );
-079
-080  /**
-081   * The attribute 

[33/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/client/Append.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Append.html 
b/devapidocs/org/apache/hadoop/hbase/client/Append.html
index 4a72663..b085e40 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Append.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Append.html
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class Append
+public class Append
 extends Mutation
 Performs Append operations on a single row.
  
@@ -163,6 +163,10 @@ extends HEAP_OVERHEAD
 
 
+private static org.slf4j.Logger
+LOG
+
+
 private TimeRange
 tr
 
@@ -393,13 +397,22 @@ extends 
 
 Field Detail
+
+
+
+
+
+LOG
+private static finalorg.slf4j.Logger LOG
+
+
 
 
 
 
 
 HEAP_OVERHEAD
-private static finallong HEAP_OVERHEAD
+private static finallong HEAP_OVERHEAD
 
 
 
@@ -408,7 +421,7 @@ extends 
 
 tr
-privateTimeRange tr
+privateTimeRange tr
 
 
 
@@ -425,7 +438,7 @@ extends 
 
 Append
-publicAppend(byte[]row)
+publicAppend(byte[]row)
 Create a Append operation for the specified row.
  
  At least one column must be appended to.
@@ -441,7 +454,7 @@ extends 
 
 Append
-publicAppend(AppendappendToCopy)
+publicAppend(AppendappendToCopy)
 Copy constructor
 
 Parameters:
@@ -455,7 +468,7 @@ extends 
 
 Append
-publicAppend(byte[]rowArray,
+publicAppend(byte[]rowArray,
   introwOffset,
   introwLength)
 Create a Append operation for the specified row.
@@ -475,7 +488,7 @@ extends 
 
 Append
-publicAppend(byte[]row,
+publicAppend(byte[]row,
   longts,
   http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellfamilyMap)
 Construct the Append with user defined data. NOTED:
@@ -503,7 +516,7 @@ extends 
 
 setTimeRange
-publicAppendsetTimeRange(longminStamp,
+publicAppendsetTimeRange(longminStamp,
longmaxStamp)
 Sets the TimeRange to be used on the Get for this append.
  
@@ -530,7 +543,7 @@ extends 
 
 getTimeRange
-publicTimeRangegetTimeRange()
+publicTimeRangegetTimeRange()
 Gets the TimeRange used for this append.
 
 Returns:
@@ -544,7 +557,7 @@ extends 
 
 extraHeapSize
-protectedlongextraHeapSize()
+protectedlongextraHeapSize()
 Description copied from 
class:Mutation
 Subclasses should override this method to add the heap size 
of their own fields.
 
@@ -561,7 +574,7 @@ extends 
 
 setReturnResults
-publicAppendsetReturnResults(booleanreturnResults)
+publicAppendsetReturnResults(booleanreturnResults)
 
 Overrides:
 setReturnResultsin
 classMutation
@@ -578,7 +591,7 @@ extends 
 
 isReturnResults
-publicbooleanisReturnResults()
+publicbooleanisReturnResults()
 
 Overrides:
 isReturnResultsin
 classMutation
@@ -594,7 +607,7 @@ extends 
 add
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
-publicAppendadd(byte[]family,
+publicAppendadd(byte[]family,
   byte[]qualifier,
   byte[]value)
 Deprecated.As of release 2.0.0, this will be removed in HBase 
3.0.0.
@@ -616,7 +629,7 @@ public
 
 addColumn
-publicAppendaddColumn(byte[]family,
+publicAppendaddColumn(byte[]family,
 byte[]qualifier,
 byte[]value)
 Add the specified column and value to this Append 
operation.
@@ -636,9 +649,11 @@ public
 
 add
-publicAppendadd(Cellcell)
+publicAppendadd(Cellcell)
 Add column and value to this Append operation.
 
+Overrides:
+addin
 classMutation
 Parameters:
 cell - 
 Returns:
@@ -652,7 +667,7 @@ public
 
 setTimestamp
-publicAppendsetTimestamp(longtimestamp)
+publicAppendsetTimestamp(longtimestamp)
 Description copied from 
class:Mutation
 Set the timestamp of the delete.
 
@@ -667,7 +682,7 @@ public
 
 setAttribute
-publicAppendsetAttribute(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
+publicAppendsetAttribute(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
byte[]value)
 Description copied from 
interface:Attributes
 Sets an attribute.
@@ -690,7 +705,7 @@ public
 
 setId
-publicAppendsetId(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
+publicAppendsetId(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
 Description copied from 
class:OperationWithAttributes
 This method 

[51/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/b618ac40
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/b618ac40
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/b618ac40

Branch: refs/heads/asf-site
Commit: b618ac405a915aa29b311e2b4357b03af5f8e0d8
Parents: 637fc86
Author: jenkins 
Authored: Tue Dec 26 15:20:42 2017 +
Committer: jenkins 
Committed: Tue Dec 26 15:20:42 2017 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|6 +-
 apidocs/index-all.html  |   12 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |4 +-
 .../org/apache/hadoop/hbase/client/Append.html  |   48 +-
 .../org/apache/hadoop/hbase/client/Delete.html  |   58 +-
 .../apache/hadoop/hbase/client/Increment.html   |   48 +-
 .../apache/hadoop/hbase/client/Mutation.html|   80 +-
 apidocs/org/apache/hadoop/hbase/client/Put.html |   60 +-
 .../hadoop/hbase/client/class-use/Delete.html   |2 +-
 .../hadoop/hbase/client/class-use/Put.html  |2 +-
 .../ReplicationPeerConfigBuilder.html   |   90 +-
 .../class-use/ReplicationPeerConfigBuilder.html |   30 +-
 .../hbase/util/Bytes.ByteArrayComparator.html   |8 +-
 .../hbase/util/Bytes.RowEndKeyComparator.html   |8 +-
 apidocs/org/apache/hadoop/hbase/util/Bytes.html |  300 +-
 .../org/apache/hadoop/hbase/client/Append.html  |  451 +-
 .../org/apache/hadoop/hbase/client/Delete.html  |  637 +-
 .../apache/hadoop/hbase/client/Increment.html   |  501 +-
 .../apache/hadoop/hbase/client/Mutation.html| 1644 ++--
 .../org/apache/hadoop/hbase/client/Put.html |  647 +-
 .../replication/ReplicationPeerConfig.html  |   10 +-
 .../ReplicationPeerConfigBuilder.html   |   40 +-
 .../hbase/util/Bytes.ByteArrayComparator.html   | 5150 ++--
 .../hbase/util/Bytes.RowEndKeyComparator.html   | 5150 ++--
 .../org/apache/hadoop/hbase/util/Bytes.html | 5150 ++--
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   | 7302 +-
 checkstyle.rss  |  104 +-
 coc.html|4 +-
 cygwin.html |4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/allclasses-frame.html|4 +-
 devapidocs/allclasses-noframe.html  |4 +-
 devapidocs/constant-values.html |6 +-
 devapidocs/index-all.html   |  212 +-
 devapidocs/org/apache/hadoop/hbase/Cell.html|2 +-
 .../org/apache/hadoop/hbase/ExtendedCell.html   |2 +-
 .../apache/hadoop/hbase/KeyValueTestUtil.html   |   30 +-
 devapidocs/org/apache/hadoop/hbase/RawCell.html |2 +-
 .../hadoop/hbase/backup/package-tree.html   |2 +-
 .../hadoop/hbase/class-use/Abortable.html   |9 -
 .../org/apache/hadoop/hbase/class-use/Cell.html |   52 +-
 .../hadoop/hbase/class-use/ExtendedCell.html|   28 +-
 .../apache/hadoop/hbase/class-use/RawCell.html  |   28 +-
 .../org/apache/hadoop/hbase/class-use/Tag.html  |   17 +
 .../org/apache/hadoop/hbase/client/Append.html  |   67 +-
 .../org/apache/hadoop/hbase/client/Delete.html  |   60 +-
 .../apache/hadoop/hbase/client/Increment.html   |   50 +-
 .../hbase/client/Mutation.CellWrapper.html  |  951 +++
 .../apache/hadoop/hbase/client/Mutation.html|  247 +-
 .../client/NoncedRegionServerCallable.html  |4 +-
 .../org/apache/hadoop/hbase/client/Put.html |   62 +-
 .../hadoop/hbase/client/class-use/Delete.html   |   32 +-
 .../hadoop/hbase/client/class-use/Get.html  |   32 +-
 .../client/class-use/Mutation.CellWrapper.html  |  125 +
 .../hadoop/hbase/client/class-use/Mutation.html |   20 +-
 .../hadoop/hbase/client/class-use/Put.html  |   34 +-
 .../hadoop/hbase/client/class-use/Result.html   |   22 -
 .../hbase/client/class-use/RowMutations.html|   29 +-
 .../hadoop/hbase/client/package-frame.html  |1 +
 .../hadoop/hbase/client/package-summary.html|  128 +-
 .../hadoop/hbase/client/package-tree.html   |   25 +-
 .../apache/hadoop/hbase/client/package-use.html |   22 +-
 .../replication/ReplicationPeerConfigUtil.html  |   44 +-
 .../hadoop/hbase/executor/package-tree.html |2 +-
 .../hadoop/hbase/filter/package-tree.html   |8 +-
 .../org/apache/hadoop/hbase/io/HeapSize.html|2 +-
 .../hadoop/hbase/io/class-use/HeapSize.html |4 +
 

[22/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
deleted file mode 100644
index 0aff120..000
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
+++ /dev/null
@@ -1,530 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-TableBasedReplicationQueuesClientImpl (Apache HBase 3.0.0-SNAPSHOT 
API)
-
-
-
-
-
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.replication
-Class 
TableBasedReplicationQueuesClientImpl
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.replication.ReplicationTableBase
-
-
-org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesClientImpl
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-ReplicationQueuesClient
-
-
-
-@InterfaceAudience.Private
-public class TableBasedReplicationQueuesClientImpl
-extends ReplicationTableBase
-implements ReplicationQueuesClient
-Implements the ReplicationQueuesClient interface on top of 
the Replication Table. It utilizes
- the ReplicationTableBase to access the Replication Table.
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-
-
-
-Fields inherited from classorg.apache.hadoop.hbase.replication.ReplicationTableBase
-abortable,
 CF_QUEUE,
 COL_QUEUE_OWNER,
 COL_QUEUE_OWNER_HISTORY,
 conf,
 QUEUE_HISTORY_DELIMITER,
 REPLICATION_TABLE_NAME,
 ROW_KEY_DELIMITER
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-TableBasedReplicationQueuesClientImpl(org.apache.hadoop.conf.Configurationconf,
- Abortableabortable)
-
-
-TableBasedReplicationQueuesClientImpl(ReplicationQueuesClientArgumentsargs)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-getAllPeersFromHFileRefsQueue()
-Get list of all peers from hfile reference queue.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-getAllQueues(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringserverName)
-Get a list of all queues for the specified region 
server.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-getAllWALs()
-Load all wals in all replication queues from ZK.
-
-
-
-int
-getHFileRefsNodeChangeVersion()
-Get the change version number of replication hfile 
references node.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-getListOfReplicators()
-Get a list of all region servers that have outstanding 
replication queues.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or 

[35/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index a10fb33..8082b30 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -983,6 +983,8 @@
 
 Add the pair to the container, grouped by the 
regionName
 
+add(Cell)
 - Method in class org.apache.hadoop.hbase.client.Mutation
+
 add(Cell)
 - Method in class org.apache.hadoop.hbase.client.Put
 
 Add the specified KeyValue to this Put operation.
@@ -1748,8 +1750,6 @@
 
 addHFileRefs(String,
 ListPairPath, Path) - Method in class 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl
 
-addHFileRefs(String,
 ListPairPath, Path) - Method in class 
org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl
-
 addHFileRefsToQueue(TableName,
 byte[], ListPairPath, Path) - Method in class 
org.apache.hadoop.hbase.replication.regionserver.Replication
 
 addHistogram(String,
 Histogram, MetricsRecordBuilder) - Method in class 
org.apache.hadoop.hbase.metrics.impl.HBaseMetrics2HadoopMetricsAdapter
@@ -1898,8 +1898,6 @@
 
 addLog(String,
 String) - Method in class org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl
 
-addLog(String,
 String) - Method in class org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl
-
 addLogFile(String,
 String) - Method in class org.apache.hadoop.hbase.snapshot.SnapshotInfo.SnapshotStats
 
 Add the specified log file to the stats
@@ -2026,8 +2024,6 @@
 
 addPeerToHFileRefs(String)
 - Method in class org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl
 
-addPeerToHFileRefs(String)
 - Method in class org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl
-
 addPrimaryAssignment(RegionInfo,
 ServerName) - Method in class org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta
 
 addProperty(String,
 String) - Method in class org.apache.hadoop.hbase.rest.model.NamespacesInstanceModel
@@ -4179,10 +4175,6 @@
 
 attempts
 - Variable in class org.apache.hadoop.hbase.util.RetryCounter
 
-attemptToClaimQueue(Result,
 String) - Method in class org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl
-
-Attempt to claim the given queue with a checkAndPut on the 
OWNER column.
-
 attemptToOwnTask(boolean,
 ZKWatcher, ServerName, String, int) - Static method in class 
org.apache.hadoop.hbase.coordination.ZkSplitLogWorkerCoordination
 
 Try to own the task by transitioning the zk node data from 
UNASSIGNED to OWNED.
@@ -7112,8 +7104,6 @@
 
 Build the row key for the given queueId.
 
-buildQueueRowKey(String)
 - Method in class org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl
-
 buildRegionOpenInfoRequest(MasterProcedureEnv)
 - Method in class org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionOpenOperation
 
 buildRegionSpaceUseReportRequest(MapRegionInfo,
 Long) - Method in class org.apache.hadoop.hbase.regionserver.HRegionServer
@@ -9057,6 +9047,8 @@
 The unit of storage in HBase consisting of the following 
fields:
  
 
+cell
 - Variable in class org.apache.hadoop.hbase.client.Mutation.CellWrapper
+
 cell
 - Variable in class org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyByteBufferCell
 
 cell
 - Variable in class org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyCell
@@ -9568,6 +9560,8 @@
 
 cellVisibilityExpr
 - Variable in class org.apache.hadoop.hbase.mapreduce.TsvImporterMapper
 
+CellWrapper(Cell)
 - Constructor for class org.apache.hadoop.hbase.client.Mutation.CellWrapper
+
 CellWritableComparable()
 - Constructor for class org.apache.hadoop.hbase.mapreduce.Import.CellWritableComparable
 
 CellWritableComparable(Cell)
 - Constructor for class org.apache.hadoop.hbase.mapreduce.Import.CellWritableComparable
@@ -10542,10 +10536,6 @@
 
 checkpoint
 - Variable in class org.apache.hadoop.hbase.mapreduce.SampleUploader.Uploader
 
-checkQueueExists(String)
 - Method in class org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl
-
-Check if the queue specified by queueId is stored in 
HBase
-
 checkQueuesDeleted(String)
 - Method in class org.apache.hadoop.hbase.replication.ReplicationPeersZKImpl
 
 checkQuota(int,
 int, int) - Method in class org.apache.hadoop.hbase.quotas.DefaultOperationQuota
@@ -11143,8 +11133,6 @@
 
 claimQueue(String,
 String) - Method in class org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl
 
-claimQueue(String,
 String) - Method in class org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl
-
 CLASS
 - Static variable in class org.apache.hadoop.hbase.util.CommonFSUtils.StreamCapabilities
 
 CLASS_LOADER
 - Static variable in exception org.apache.hadoop.hbase.ipc.RemoteWithExtrasException
@@ -12148,6 +12136,8 @@
  Use RawCell.cloneTags()
 
 
+cloneTags()
 - Method in class org.apache.hadoop.hbase.client.Mutation.CellWrapper
+
 cloneTags(Cell)
 - Static method 

[27/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 0c57dc7..061085c 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
-public class HMaster
+public class HMaster
 extends HRegionServer
 implements MasterServices
 HMaster is the "master server" for HBase. An HBase cluster 
has one active
@@ -1434,7 +1434,7 @@ implements 
 
 LOG
-private staticorg.slf4j.Logger LOG
+private staticorg.slf4j.Logger LOG
 
 
 
@@ -1443,7 +1443,7 @@ implements 
 
 MASTER
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MASTER
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MASTER
 
 See Also:
 Constant
 Field Values
@@ -1456,7 +1456,7 @@ implements 
 
 activeMasterManager
-private finalActiveMasterManager activeMasterManager
+private finalActiveMasterManager activeMasterManager
 
 
 
@@ -1465,7 +1465,7 @@ implements 
 
 regionServerTracker
-RegionServerTracker regionServerTracker
+RegionServerTracker regionServerTracker
 
 
 
@@ -1474,7 +1474,7 @@ implements 
 
 drainingServerTracker
-privateDrainingServerTracker drainingServerTracker
+privateDrainingServerTracker drainingServerTracker
 
 
 
@@ -1483,7 +1483,7 @@ implements 
 
 loadBalancerTracker
-LoadBalancerTracker loadBalancerTracker
+LoadBalancerTracker loadBalancerTracker
 
 
 
@@ -1492,7 +1492,7 @@ implements 
 
 splitOrMergeTracker
-privateSplitOrMergeTracker splitOrMergeTracker
+privateSplitOrMergeTracker splitOrMergeTracker
 
 
 
@@ -1501,7 +1501,7 @@ implements 
 
 regionNormalizerTracker
-privateRegionNormalizerTracker 
regionNormalizerTracker
+privateRegionNormalizerTracker 
regionNormalizerTracker
 
 
 
@@ -1510,7 +1510,7 @@ implements 
 
 maintenanceModeTracker
-privateMasterMaintenanceModeTracker maintenanceModeTracker
+privateMasterMaintenanceModeTracker maintenanceModeTracker
 
 
 
@@ -1519,7 +1519,7 @@ implements 
 
 clusterSchemaService
-privateClusterSchemaService clusterSchemaService
+privateClusterSchemaService clusterSchemaService
 
 
 
@@ -1528,7 +1528,7 @@ implements 
 
 HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
 
 See Also:
 Constant
 Field Values
@@ -1541,7 +1541,7 @@ implements 
 
 DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
-public static finalint DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
+public static finalint DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
 
 See Also:
 Constant
 Field Values
@@ -1554,7 +1554,7 @@ implements 
 
 metricsMaster
-finalMetricsMaster metricsMaster
+finalMetricsMaster metricsMaster
 
 
 
@@ -1563,7 +1563,7 @@ implements 
 
 fileSystemManager
-privateMasterFileSystem fileSystemManager
+privateMasterFileSystem fileSystemManager
 
 
 
@@ -1572,7 +1572,7 @@ implements 
 
 walManager
-privateMasterWalManager walManager
+privateMasterWalManager walManager
 
 
 
@@ -1581,7 +1581,7 @@ implements 
 
 serverManager
-private volatileServerManager serverManager
+private volatileServerManager serverManager
 
 
 
@@ -1590,7 +1590,7 @@ implements 
 
 assignmentManager
-privateAssignmentManager assignmentManager
+privateAssignmentManager assignmentManager
 
 
 
@@ -1599,7 +1599,7 @@ implements 
 
 replicationManager
-privateReplicationManager replicationManager
+privateReplicationManager replicationManager
 
 
 
@@ -1608,7 +1608,7 @@ implements 
 
 rsFatals
-MemoryBoundedLogMessageBuffer rsFatals
+MemoryBoundedLogMessageBuffer rsFatals
 
 
 
@@ -1617,7 +1617,7 @@ implements 
 
 activeMaster
-private volatileboolean activeMaster
+private volatileboolean activeMaster
 
 
 
@@ -1626,7 +1626,7 @@ implements 
 
 initialized
-private finalProcedureEvent initialized
+private finalProcedureEvent initialized
 
 
 
@@ -1635,7 +1635,7 @@ implements 
 
 serviceStarted
-volatileboolean serviceStarted
+volatileboolean serviceStarted
 
 
 
@@ -1644,7 +1644,7 @@ implements 
 
 serverCrashProcessingEnabled
-private finalProcedureEvent serverCrashProcessingEnabled
+private finalProcedureEvent serverCrashProcessingEnabled
 
 
 
@@ -1653,7 +1653,7 @@ implements 
 
 maxBlancingTime
-private finalint maxBlancingTime
+private 

[46/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/src-html/org/apache/hadoop/hbase/client/Append.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Append.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Append.html
index 809287d..2fc6c0d 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Append.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Append.html
@@ -25,12 +25,12 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import java.util.List;
-021import java.util.Map;
-022import java.util.NavigableMap;
-023import java.util.UUID;
-024import org.apache.hadoop.hbase.Cell;
-025import 
org.apache.hadoop.hbase.CellUtil;
+020import java.io.IOException;
+021import java.util.List;
+022import java.util.Map;
+023import java.util.NavigableMap;
+024import java.util.UUID;
+025import org.apache.hadoop.hbase.Cell;
 026import 
org.apache.hadoop.hbase.KeyValue;
 027import 
org.apache.hadoop.hbase.io.TimeRange;
 028import 
org.apache.hadoop.hbase.security.access.Permission;
@@ -38,225 +38,226 @@
 030import 
org.apache.hadoop.hbase.util.Bytes;
 031import 
org.apache.hadoop.hbase.util.ClassSize;
 032import 
org.apache.yetus.audience.InterfaceAudience;
-033
-034/**
-035 * Performs Append operations on a single 
row.
-036 * p
-037 * This operation ensures atomicty to 
readers. Appends are done
-038 * under a single row lock, so write 
operations to a row are synchronized, and
-039 * readers are guaranteed to see this 
operation fully completed.
-040 * p
-041 * To append to a set of columns of a 
row, instantiate an Append object with the
-042 * row to append to. At least one column 
to append must be specified using the
-043 * {@link #addColumn(byte[], byte[], 
byte[])} method.
-044 */
-045@InterfaceAudience.Public
-046public class Append extends Mutation {
-047  private static final long HEAP_OVERHEAD 
= ClassSize.REFERENCE + ClassSize.TIMERANGE;
-048  private TimeRange tr = new 
TimeRange();
-049
-050  /**
-051   * Sets the TimeRange to be used on the 
Get for this append.
-052   * p
-053   * This is useful for when you have 
counters that only last for specific
-054   * periods of time (ie. counters that 
are partitioned by time).  By setting
-055   * the range of valid times for this 
append, you can potentially gain
-056   * some performance with a more optimal 
Get operation.
-057   * Be careful adding the time range to 
this class as you will update the old cell if the
-058   * time range doesn't include the 
latest cells.
-059   * p
-060   * This range is used as [minStamp, 
maxStamp).
-061   * @param minStamp minimum timestamp 
value, inclusive
-062   * @param maxStamp maximum timestamp 
value, exclusive
-063   * @return this
-064   */
-065  public Append setTimeRange(long 
minStamp, long maxStamp) {
-066tr = new TimeRange(minStamp, 
maxStamp);
-067return this;
-068  }
-069
-070  /**
-071   * Gets the TimeRange used for this 
append.
-072   * @return TimeRange
-073   */
-074  public TimeRange getTimeRange() {
-075return this.tr;
-076  }
-077
-078  @Override
-079  protected long extraHeapSize(){
-080return HEAP_OVERHEAD;
-081  }
-082
-083  /**
-084   * @param returnResults
-085   *  True (default) if the 
append operation should return the results.
-086   *  A client that is not 
interested in the result can save network
-087   *  bandwidth setting this to 
false.
-088   */
-089  @Override
-090  public Append setReturnResults(boolean 
returnResults) {
-091
super.setReturnResults(returnResults);
-092return this;
-093  }
-094
-095  /**
-096   * @return current setting for 
returnResults
-097   */
-098  // This method makes public the 
superclasses's protected method.
-099  @Override
-100  public boolean isReturnResults() {
-101return super.isReturnResults();
-102  }
-103
-104  /**
-105   * Create a Append operation for the 
specified row.
-106   * p
-107   * At least one column must be appended 
to.
-108   * @param row row key; makes a local 
copy of passed in array.
-109   */
-110  public Append(byte[] row) {
-111this(row, 0, row.length);
-112  }
-113  /**
-114   * Copy constructor
-115   * @param appendToCopy append to copy
-116   */
-117  public Append(Append appendToCopy) {
-118super(appendToCopy);
-119this.tr = 
appendToCopy.getTimeRange();
-120  }
-121
-122  /** Create a Append operation for the 
specified row.
-123   * p
-124   * At least one column must be appended 
to.
-125   * @param rowArray Makes a copy out of 
this buffer.
-126   * @param rowOffset
-127   * @param rowLength
-128   */
-129  public Append(final byte [] rowArray, 
final int rowOffset, final int rowLength) {
-130checkRow(rowArray, rowOffset, 
rowLength);
-131this.row = Bytes.copy(rowArray, 
rowOffset, rowLength);
-132  }
-133
-134  /**
-135   * Construct the Append with user 
defined data. NOTED:
-136   * 1) all cells in the 

[36/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index f3f28a7..19938d7 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2017 The Apache Software Foundation
 
-  File: 3471,
- Errors: 19198,
+  File: 3466,
+ Errors: 19162,
  Warnings: 0,
  Infos: 0
   
@@ -1105,7 +1105,7 @@ under the License.
   0
 
 
-  23
+  24
 
   
   
@@ -3966,20 +3966,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.util.IterableUtils.java;>org/apache/hadoop/hbase/util/IterableUtils.java
-
-
-  0
-
-
-  0
-
-
-  1
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.coprocessor.BulkLoadObserver.java;>org/apache/hadoop/hbase/coprocessor/BulkLoadObserver.java
 
 
@@ -6682,20 +6668,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesClientImpl.java;>org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java
-
-
-  0
-
-
-  0
-
-
-  2
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.MetricsHeapMemoryManagerSourceImpl.java;>org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java
 
 
@@ -16608,20 +16580,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl.java;>org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java
-
-
-  0
-
-
-  0
-
-
-  7
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.CoordinatedStateException.java;>org/apache/hadoop/hbase/CoordinatedStateException.java
 
 
@@ -22334,6 +22292,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.coprocessor.TestPassCustomCellViaRegionObserver.java;>org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.backup.BackupInfo.java;>org/apache/hadoop/hbase/backup/BackupInfo.java
 
 
@@ -29040,20 +29012,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.TestReplicationTableBase.java;>org/apache/hadoop/hbase/replication/TestReplicationTableBase.java
-
-
-  0
-
-
-  0
-
-
-  2
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.master.TestMasterFileSystem.java;>org/apache/hadoop/hbase/master/TestMasterFileSystem.java
 
 
@@ -31924,20 +31882,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.regionserver.TestTableBasedReplicationSourceManagerImpl.java;>org/apache/hadoop/hbase/replication/regionserver/TestTableBasedReplicationSourceManagerImpl.java
-
-
-  0
-
- 

[24/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
index 1cf45c7..487ea03 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
@@ -332,11 +332,11 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
+org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode
-org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
+org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
 org.apache.hadoop.hbase.master.RegionState.State
-org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index d075171..20eb5a8 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -208,8 +208,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
 org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
+org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
index d264c19..c43c370 100644
--- a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
@@ -125,8 +125,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.monitoring.MonitoredTask.State
 org.apache.hadoop.hbase.monitoring.TaskMonitor.TaskFilter.TaskType
+org.apache.hadoop.hbase.monitoring.MonitoredTask.State
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index d791c07..c68325b 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -443,20 +443,20 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.CompareOperator
-org.apache.hadoop.hbase.Cell.DataType
 

[26/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index 6b833fe..0f2f580 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static enum MasterRpcServices.BalanceSwitchMode
+static enum MasterRpcServices.BalanceSwitchMode
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumMasterRpcServices.BalanceSwitchMode
 
 
@@ -210,7 +210,7 @@ the order they are declared.
 
 
 SYNC
-public static finalMasterRpcServices.BalanceSwitchMode SYNC
+public static finalMasterRpcServices.BalanceSwitchMode SYNC
 
 
 
@@ -219,7 +219,7 @@ the order they are declared.
 
 
 ASYNC
-public static finalMasterRpcServices.BalanceSwitchMode ASYNC
+public static finalMasterRpcServices.BalanceSwitchMode ASYNC
 
 
 



[28/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/io/HeapSize.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/HeapSize.html 
b/devapidocs/org/apache/hadoop/hbase/io/HeapSize.html
index ac67944..398945e 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/HeapSize.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/HeapSize.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-Append, BlockCacheKey, BucketCache, BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 
 ByteBufferChunkCell, ByteBufferKeyValue, CombinedBlockCache, Delete, HFileBlock, HFileBlockIndex.BlockIndexReader, HFileBlockIndex.ByteArrayKeyBlockIndexReader,
 HFileBlockIndex.CellBasedKeyBlockIndexReader,
 HFileContext, HMobStore, HRegion, HStore, InclusiveCombinedBlockCache, Increment, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, LruBlockCache, LruCachedBlock, LruCachedBlockQueue, MapReduceCell, Mutation, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.EmptyByteBufferCell, PrivateCellUtil.EmptyCell, <
 a 
href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html"
 title="class in 
org.apache.hadoop.hbase">PrivateCellUtil.FirstOnRowByteBufferCell, PrivateCellUtil.FirstOnRowCell, 
PrivateCellUtil.FirstOnRowColByteBufferCell, PrivateCellUtil.FirstOnRowColCell, PrivateCellUtil.FirstOnRowColTSByteBufferCell, PrivateCellUtil.FirstOnRowColTSCell, PrivateCellUtil.FirstOnRowDeleteFamilyCell, PrivateCellUtil.LastOnRowByteBufferCell, PrivateCellUtil.LastOnRowCell, PrivateCellUtil.LastOnRowColByteBufferCell, PrivateCellUtil.LastOnRowColCell, 
PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, 
PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, Put, SizeCachedKeyValue, SizeCachedNoTagsKeyValue, WALEdit, WALSplitter.RegionEntryBuffer
+Append, BlockCacheKey, BucketCache, BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 
 ByteBufferChunkCell, ByteBufferKeyValue, CombinedBlockCache, Delete, HFileBlock, HFileBlockIndex.BlockIndexReader, HFileBlockIndex.ByteArrayKeyBlockIndexReader,
 HFileBlockIndex.CellBasedKeyBlockIndexReader,
 HFileContext, HMobStore, HRegion, HStore, InclusiveCombinedBlockCache, Increment, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, LruBlockCache, LruCachedBlock, LruCachedBlockQueue, MapReduceCell, Mutation, Mutation.CellWrapper, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.EmptyByteBufferCell, href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.EmptyCell.html" 
 >title="class in org.apache.hadoop.hbase">PrivateCellUtil.EmptyCell, href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html"
 > title="class in 
 >org.apache.hadoop.hbase">PrivateCellUtil.FirstOnRowByteBufferCell, href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html"
 > title="class in org.apache.hadoop.hbase">PrivateCellUtil.FirstOnRowCell, 
 >href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferCell.html"
 > title="class in 
 >org.apache.hadoop.hbase">PrivateCellUtil.FirstOnRowColByteBufferCell, href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColCell.html"
 > title="class in 
 >org.apache.hadoop.hbase">PrivateCellUtil.FirstOnRowColCell, href="../../../../../org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferCell.html"
 > title="class in org.apache.hadoop.hbase">P
 rivateCellUtil.FirstOnRowColTSByteBufferCell, PrivateCellUtil.FirstOnRowColTSCell, PrivateCellUtil.FirstOnRowDeleteFamilyCell, PrivateCellUtil.LastOnRowByteBufferCell, PrivateCellUtil.LastOnRowCell, PrivateCellUtil.LastOnRowColByteBufferCell, PrivateCellUtil.LastOnRowColCell, PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, 
PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, Put, SizeCachedKeyValue, SizeCachedNoTagsKeyValue, WALEdit, WALSplitter.RegionEntryBuffer
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
index c4899b8..4817a35 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
@@ -316,6 +316,10 @@
 Mutation
 
 
+private static class
+Mutation.CellWrapper
+
+
 class
 Put
 Used to perform Put operations for a single row.


[20/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
index 6ab6bf6..8762dbe 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
@@ -403,26 +403,16 @@
 
 
 void
-TableBasedReplicationQueuesImpl.addHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
-
-
-void
 ReplicationQueuesZKImpl.addHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
 
-
+
 void
 ReplicationQueues.addHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
 Add new hfile references to the queue.
 
 
-
-void
-TableBasedReplicationQueuesImpl.addLog(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId,
-  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringfilename)
-
 
 void
 ReplicationQueuesZKImpl.addLog(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId,
@@ -437,66 +427,57 @@
 
 
 void
-TableBasedReplicationQueuesImpl.addPeerToHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
-
-
-void
 ReplicationQueuesZKImpl.addPeerToHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
 
-
+
 void
 ReplicationQueues.addPeerToHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Add a peer to hfile reference queue if peer does not 
exist.
 
 
-
+
 private void
 ReplicationPeersZKImpl.changePeerState(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid,

org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.Statestate)
 Update the state znode of a peer cluster.
 
 
-
+
 private void
 ReplicationPeersZKImpl.checkQueuesDeleted(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
 
-
+
 boolean
 ReplicationPeersZKImpl.createAndAddPeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Attempt to connect to a new remote slave cluster.
 
 
-
+
 private ReplicationPeerZKImpl
 ReplicationPeersZKImpl.createPeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Helper method to connect to a peer
 
 
-
+
 void
 ReplicationPeers.disablePeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Stop the replication to the specified remote slave 
cluster.
 
 
-
+
 void
 ReplicationPeersZKImpl.disablePeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
 
-
+
 void
 ReplicationPeers.enablePeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Restart the replication to the specified remote slave 
cluster.
 
 
-
+
 void
 ReplicationPeersZKImpl.enablePeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
 
-
-long

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/Bytes.html 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
index 0cbf18b..fc852d9 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class Bytes
+public class Bytes
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableBytes
 Utility class that handles byte arrays, conversions to/from 
other types,
@@ -1293,7 +1293,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 UTF8_CSN
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UTF8_CSN
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UTF8_CSN
 
 
 
@@ -1302,7 +1302,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 EMPTY_BYTE_ARRAY
-private static finalbyte[] EMPTY_BYTE_ARRAY
+private static finalbyte[] EMPTY_BYTE_ARRAY
 
 
 
@@ -1311,7 +1311,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -1320,7 +1320,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_BOOLEAN
-public static finalint SIZEOF_BOOLEAN
+public static finalint SIZEOF_BOOLEAN
 Size of boolean in bytes
 
 See Also:
@@ -1334,7 +1334,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_BYTE
-public static finalint SIZEOF_BYTE
+public static finalint SIZEOF_BYTE
 Size of byte in bytes
 
 See Also:
@@ -1348,7 +1348,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_CHAR
-public static finalint SIZEOF_CHAR
+public static finalint SIZEOF_CHAR
 Size of char in bytes
 
 See Also:
@@ -1362,7 +1362,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_DOUBLE
-public static finalint SIZEOF_DOUBLE
+public static finalint SIZEOF_DOUBLE
 Size of double in bytes
 
 See Also:
@@ -1376,7 +1376,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_FLOAT
-public static finalint SIZEOF_FLOAT
+public static finalint SIZEOF_FLOAT
 Size of float in bytes
 
 See Also:
@@ -1390,7 +1390,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_INT
-public static finalint SIZEOF_INT
+public static finalint SIZEOF_INT
 Size of int in bytes
 
 See Also:
@@ -1404,7 +1404,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_LONG
-public static finalint SIZEOF_LONG
+public static finalint SIZEOF_LONG
 Size of long in bytes
 
 See Also:
@@ -1418,7 +1418,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_SHORT
-public static finalint SIZEOF_SHORT
+public static finalint SIZEOF_SHORT
 Size of short in bytes
 
 See Also:
@@ -1432,7 +1432,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 MASK_FOR_LOWER_INT_IN_LONG
-public static finallong MASK_FOR_LOWER_INT_IN_LONG
+public static finallong MASK_FOR_LOWER_INT_IN_LONG
 Mask to apply to a long to reveal the lower int only. Use 
like this:
  int i = (int)(0xL ^ some_long_value);
 
@@ -1447,7 +1447,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 ESTIMATED_HEAP_TAX
-public static finalint ESTIMATED_HEAP_TAX
+public static finalint ESTIMATED_HEAP_TAX
 Estimate of size cost to pay beyond payload in jvm for 
instance of byte [].
  Estimate based on study of jhat and jprofiler numbers.
 
@@ -1462,7 +1462,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 UNSAFE_UNALIGNED
-private static finalboolean UNSAFE_UNALIGNED
+private static finalboolean UNSAFE_UNALIGNED
 
 
 
@@ -1471,7 +1471,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 bytes
-privatebyte[] bytes
+privatebyte[] bytes
 
 
 
@@ -1480,7 +1480,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 offset
-privateint offset
+privateint offset
 
 
 
@@ -1489,7 +1489,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 length
-privateint length
+privateint length
 
 
 
@@ -1498,7 +1498,7 @@ implements 

[29/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
index eb95695..24f54ef 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -1064,88 +1064,92 @@
 
 
 
+Mutation.CellWrapper
+
+
+
 NoncedRegionServerCallableT
 
 Implementations make an rpc call against a RegionService 
via a protobuf Service.
 
 
-
+
 NoOpRetryableCallerInterceptor
 
 Class that acts as a NoOpInterceptor.
 
 
-
+
 NoOpRetryingInterceptorContext
 
 
-
+
 Operation
 
 Superclass for any type that maps to a potentially 
application-level query.
 
 
-
+
 OperationWithAttributes
 
 
-
+
 PackagePrivateFieldAccessor
 
 A helper class used to access the package private field in 
o.a.h.h.client package.
 
 
-
+
 PerClientRandomNonceGenerator
 
 NonceGenerator implementation that uses client ID hash + 
random int as nonce group, and random
  numbers as nonces.
 
 
-
+
 PreemptiveFastFailInterceptor
 
 The concrete RetryingCallerInterceptor 
class that implements the preemptive fast fail
  feature.
 
 
-
+
 Put
 
 Used to perform Put operations for a single row.
 
 
-
+
 Query
 
 Base class for HBase read operations; e.g.
 
 
-
+
 QuotaStatusCalls
 
 Client class to wrap RPCs to HBase servers for space quota 
status information.
 
 
-
+
 RawAsyncHBaseAdmin
 
 The implementation of AsyncAdmin.
 
 
-
+
 RawAsyncTableImpl
 
 The implementation of RawAsyncTable.
 
 
-
+
 RegionAdminServiceCallableT
 
 Similar to RegionServerCallable but for the AdminService 
interface.
 
 
-
+
 RegionCoprocessorRpcChannel
 
 Provides clients with an RPC connection to call Coprocessor 
Endpoint
@@ -1153,103 +1157,103 @@
  against a given table region.
 
 
-
+
 RegionCoprocessorRpcChannelImpl
 
 The implementation of a region based coprocessor rpc 
channel.
 
 
-
+
 RegionCoprocessorServiceExec
 
 Represents a coprocessor service method execution against a 
single region.
 
 
-
+
 RegionInfoBuilder
 
 
-
+
 RegionInfoBuilder.MutableRegionInfo
 
 An implementation of RegionInfo that adds mutable methods 
so can build a RegionInfo instance.
 
 
-
+
 RegionInfoDisplay
 
 Utility used composing RegionInfo for 'display'; e.g.
 
 
-
+
 RegionLoadStats
 
 POJO representing region server load
 
 
-
+
 RegionReplicaUtil
 
 Utility methods which contain the logic for regions and 
replicas.
 
 
-
+
 RegionServerCallableT,S
 
 Implementations make a RPC call against a RegionService via 
a protobuf Service.
 
 
-
+
 RegionServerCoprocessorRpcChannelImpl
 
 The implementation of a region server based coprocessor rpc 
channel.
 
 
-
+
 RequestControllerFactory
 
 A factory class that constructs an RequestController.
 
 
-
+
 Result
 
 Single row result of a Get or Scan query.
 
 
-
+
 ResultBoundedCompletionServiceV
 
 A completion service for the RpcRetryingCallerFactory.
 
 
-
+
 ResultStatsUtil
 
 A Result with some statistics 
about the server/region status
 
 
-
+
 RetriesExhaustedException.ThrowableWithExtraContext
 
 Datastructure that allows adding more info around Throwable 
incident.
 
 
-
+
 RetryingCallerInterceptor
 
 This class is designed to fit into the RetryingCaller class 
which forms the
  central piece of intelligence for the client side retries for most 
calls.
 
 
-
+
 RetryingCallerInterceptorContext
 
 The context object used in the RpcRetryingCaller to enable
  RetryingCallerInterceptor to 
intercept calls.
 
 
-
+
 RetryingCallerInterceptorFactory
 
 Factory implementation to provide the ConnectionImplementation with
@@ -1257,186 +1261,186 @@
  to intercept the RpcRetryingCaller during the 
course of their calls.
 
 
-
+
 RetryingTimeTracker
 
 Tracks the amount of time remaining for an operation.
 
 
-
+
 ReversedClientScanner
 
 A reversed client scanner which support backward 
scanning
 
 
-
+
 ReversedScannerCallable
 
 A reversed ScannerCallable which supports backward 
scanning.
 
 
-
+
 RowMutations
 
 Performs multiple mutations atomically on a single 
row.
 
 
-
+
 RpcRetryingCallableV
 
 A RetryingCallable for RPC connection operations.
 
 
-
+
 RpcRetryingCallerFactory
 
 Factory to create an RpcRetryingCaller
 
 
-
+
 RpcRetryingCallerImplT
 
 Runs an rpc'ing RetryingCallable.
 
 
-
+
 RpcRetryingCallerWithReadReplicas
 
 Caller that goes to replica if the primary region does no 
answer within a configurable
  timeout.
 
 
-
+
 Scan
 
 Used to perform Scan operations.
 
 
-
+
 ScannerCallable
 
 Scanner operations such as create, next, etc.
 
 
-
+
 ScannerCallableWithReplicas
 
 This class has the logic for handling scanners for regions 
with and without replicas.
 
 
-
+
 SecureBulkLoadClient
 
 Client proxy for SecureBulkLoadProtocol
 
 
-
+
 ServerStatisticTracker
 

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 637fc8695 -> b618ac405


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
index 86fc15e..d02bcdf 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
@@ -38,2586 +38,2590 @@
 030import java.nio.ByteBuffer;
 031import 
java.nio.charset.StandardCharsets;
 032import java.security.SecureRandom;
-033import java.util.Arrays;
-034import java.util.Collection;
-035import java.util.Comparator;
-036import java.util.Iterator;
-037import java.util.List;
-038
-039import org.apache.hadoop.hbase.Cell;
-040import 
org.apache.hadoop.hbase.CellComparator;
-041import 
org.apache.hadoop.hbase.KeyValue;
-042import 
org.apache.hadoop.io.RawComparator;
-043import 
org.apache.hadoop.io.WritableComparator;
-044import 
org.apache.hadoop.io.WritableUtils;
-045import 
org.apache.yetus.audience.InterfaceAudience;
-046import org.slf4j.Logger;
-047import org.slf4j.LoggerFactory;
-048
-049import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-050import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-051
-052import com.google.protobuf.ByteString;
-053
-054import sun.misc.Unsafe;
-055
-056/**
-057 * Utility class that handles byte 
arrays, conversions to/from other types,
-058 * comparisons, hash code generation, 
manufacturing keys for HashMaps or
-059 * HashSets, and can be used as key in 
maps or trees.
-060 */
-061@SuppressWarnings("restriction")
-062@InterfaceAudience.Public
-063@edu.umd.cs.findbugs.annotations.SuppressWarnings(
-064
value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
-065justification="It has been like this 
forever")
-066public class Bytes implements 
ComparableBytes {
-067
-068  // Using the charset canonical name for 
String/byte[] conversions is much
-069  // more efficient due to use of cached 
encoders/decoders.
-070  private static final String UTF8_CSN = 
StandardCharsets.UTF_8.name();
-071
-072  //HConstants.EMPTY_BYTE_ARRAY should be 
updated if this changed
-073  private static final byte [] 
EMPTY_BYTE_ARRAY = new byte [0];
-074
-075  private static final Logger LOG = 
LoggerFactory.getLogger(Bytes.class);
-076
-077  /**
-078   * Size of boolean in bytes
-079   */
-080  public static final int SIZEOF_BOOLEAN 
= Byte.SIZE / Byte.SIZE;
-081
-082  /**
-083   * Size of byte in bytes
-084   */
-085  public static final int SIZEOF_BYTE = 
SIZEOF_BOOLEAN;
-086
-087  /**
-088   * Size of char in bytes
-089   */
-090  public static final int SIZEOF_CHAR = 
Character.SIZE / Byte.SIZE;
-091
-092  /**
-093   * Size of double in bytes
-094   */
-095  public static final int SIZEOF_DOUBLE = 
Double.SIZE / Byte.SIZE;
-096
-097  /**
-098   * Size of float in bytes
-099   */
-100  public static final int SIZEOF_FLOAT = 
Float.SIZE / Byte.SIZE;
-101
-102  /**
-103   * Size of int in bytes
-104   */
-105  public static final int SIZEOF_INT = 
Integer.SIZE / Byte.SIZE;
-106
-107  /**
-108   * Size of long in bytes
-109   */
-110  public static final int SIZEOF_LONG = 
Long.SIZE / Byte.SIZE;
-111
-112  /**
-113   * Size of short in bytes
-114   */
-115  public static final int SIZEOF_SHORT = 
Short.SIZE / Byte.SIZE;
-116
-117  /**
-118   * Mask to apply to a long to reveal 
the lower int only. Use like this:
-119   * int i = (int)(0xL ^ 
some_long_value);
-120   */
-121  public static final long 
MASK_FOR_LOWER_INT_IN_LONG = 0xL;
-122
-123  /**
-124   * Estimate of size cost to pay beyond 
payload in jvm for instance of byte [].
-125   * Estimate based on study of jhat and 
jprofiler numbers.
-126   */
-127  // JHat says BU is 56 bytes.
-128  // SizeOf which uses 
java.lang.instrument says 24 bytes. (3 longs?)
-129  public static final int 
ESTIMATED_HEAP_TAX = 16;
-130
-131  private static final boolean 
UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
-132
-133  /**
-134   * Returns length of the byte array, 
returning 0 if the array is null.
-135   * Useful for calculating sizes.
-136   * @param b byte array, which can be 
null
-137   * @return 0 if b is null, otherwise 
returns length
-138   */
-139  final public static int len(byte[] b) 
{
-140return b == null ? 0 : b.length;
-141  }
-142
-143  private byte[] bytes;
-144  private int offset;
-145  private int length;
-146
-147  /**
-148   * Create a zero-size sequence.
-149   */
-150  public Bytes() {
-151super();
-152  }
-153
-154  /**
-155   * Create a Bytes using the byte array 
as the initial value.
-156   * @param bytes This array becomes 

[50/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/org/apache/hadoop/hbase/client/Delete.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Delete.html 
b/apidocs/org/apache/hadoop/hbase/client/Delete.html
index f8b71c7..6aafb41 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Delete.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Delete.html
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class Delete
+public class Delete
 extends Mutation
 implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableRow
 Used to perform Delete operations on a single row.
@@ -254,7 +254,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Delete
-add(Cellkv)
+add(Cellcell)
 Add an existing delete marker to this Delete object.
 
 
@@ -441,7 +441,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Delete
-publicDelete(byte[]row)
+publicDelete(byte[]row)
 Create a Delete operation for the specified row.
  
  If no further operations are done, this will delete everything
@@ -461,7 +461,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Delete
-publicDelete(byte[]row,
+publicDelete(byte[]row,
   longtimestamp)
 Create a Delete operation for the specified row and 
timestamp.
 
@@ -484,7 +484,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Delete
-publicDelete(byte[]row,
+publicDelete(byte[]row,
   introwOffset,
   introwLength)
 Create a Delete operation for the specified row and 
timestamp.
@@ -509,7 +509,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Delete
-publicDelete(byte[]row,
+publicDelete(byte[]row,
   introwOffset,
   introwLength,
   longtimestamp)
@@ -536,7 +536,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Delete
-publicDelete(DeletedeleteToCopy)
+publicDelete(DeletedeleteToCopy)
 
 Parameters:
 deleteToCopy - delete to copy
@@ -549,7 +549,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Delete
-publicDelete(byte[]row,
+publicDelete(byte[]row,
   longts,
   http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellfamilyMap)
 Construct the Delete with user defined data. NOTED:
@@ -579,7 +579,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 addDeleteMarker
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
-publicDeleteaddDeleteMarker(Cellkv)
+publicDeleteaddDeleteMarker(Cellkv)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Deprecated.As of release 2.0.0, this will be removed in HBase 
3.0.0. Use add(Cell)
  instead
@@ -600,12 +600,12 @@ public
 
 add
-publicDeleteadd(Cellkv)
+publicDeleteadd(Cellcell)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Add an existing delete marker to this Delete object.
 
 Parameters:
-kv - An existing KeyValue of type "delete".
+cell - An existing cell of type "delete".
 Returns:
 this for invocation chaining
 Throws:
@@ -619,7 +619,7 @@ public
 
 addFamily
-publicDeleteaddFamily(byte[]family)
+publicDeleteaddFamily(byte[]family)
 Delete all versions of all columns of the specified family.
  
  Overrides previous calls to deleteColumn and deleteColumns for the
@@ -638,7 +638,7 @@ public
 
 addFamily
-publicDeleteaddFamily(byte[]family,
+publicDeleteaddFamily(byte[]family,
 longtimestamp)
 Delete all columns of the specified family with a timestamp 
less than
  or equal to the specified timestamp.
@@ -660,7 +660,7 @@ public
 
 addFamilyVersion
-publicDeleteaddFamilyVersion(byte[]family,
+publicDeleteaddFamilyVersion(byte[]family,
longtimestamp)
 Delete all columns of the specified family with a timestamp 
equal to
  the specified timestamp.
@@ -679,7 +679,7 @@ public
 
 addColumns
-publicDeleteaddColumns(byte[]family,
+publicDeleteaddColumns(byte[]family,
  byte[]qualifier)
 Delete all versions of the specified column.
 
@@ -697,7 +697,7 @@ public
 
 addColumns
-publicDeleteaddColumns(byte[]family,
+publicDeleteaddColumns(byte[]family,
  

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueTestUtil.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueTestUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueTestUtil.html
index 381bbfd..01af885 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueTestUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueTestUtil.html
@@ -30,170 +30,169 @@
 022import java.util.Collection;
 023import java.util.List;
 024
-025import 
org.apache.hadoop.hbase.util.Bytes;
-026import 
org.apache.hadoop.hbase.util.IterableUtils;
+025import 
org.apache.commons.collections4.IterableUtils;
+026import 
org.apache.hadoop.hbase.util.Bytes;
 027import 
org.apache.hadoop.hbase.util.Strings;
 028import 
org.apache.yetus.audience.InterfaceAudience;
-029
-030import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-031
-032@InterfaceAudience.Private
-033public class KeyValueTestUtil {
-034
-035  public static KeyValue create(
-036  String row,
-037  String family,
-038  String qualifier,
-039  long timestamp,
-040  String value)
-041  {
-042return create(row, family, qualifier, 
timestamp, KeyValue.Type.Put, value);
-043  }
-044
-045  public static KeyValue create(
-046  String row,
-047  String family,
-048  String qualifier,
-049  long timestamp,
-050  KeyValue.Type type,
-051  String value)
-052  {
-053  return new KeyValue(
-054  Bytes.toBytes(row),
-055  Bytes.toBytes(family),
-056  Bytes.toBytes(qualifier),
-057  timestamp,
-058  type,
-059  Bytes.toBytes(value)
-060  );
-061  }
-062
-063  public static ByteBuffer 
toByteBufferAndRewind(final Iterable? extends KeyValue kvs,
-064  boolean includeMemstoreTS) {
-065int totalBytes = 
KeyValueUtil.totalLengthWithMvccVersion(kvs, includeMemstoreTS);
-066ByteBuffer bb = 
ByteBuffer.allocate(totalBytes);
-067for (KeyValue kv : 
IterableUtils.nullSafe(kvs)) {
-068  KeyValueUtil.appendToByteBuffer(bb, 
kv, includeMemstoreTS);
-069}
-070bb.rewind();
-071return bb;
-072  }
-073
-074  /**
-075   * Checks whether KeyValues from 
kvCollection2 are contained in kvCollection1.
-076   *
-077   * The comparison is made without 
distinguishing MVCC version of the KeyValues
-078   *
-079   * @param kvCollection1
-080   * @param kvCollection2
-081   * @return true if KeyValues from 
kvCollection2 are contained in kvCollection1
-082   */
-083  public static boolean 
containsIgnoreMvccVersion(Collection? extends Cell kvCollection1,
-084  Collection? extends Cell 
kvCollection2) {
-085for (Cell kv1 : kvCollection1) {
-086  boolean found = false;
-087  for (Cell kv2 : kvCollection2) {
-088if 
(PrivateCellUtil.equalsIgnoreMvccVersion(kv1, kv2)) found = true;
-089  }
-090  if (!found) return false;
-091}
-092return true;
-093  }
-094
-095  public static ListKeyValue 
rewindThenToList(final ByteBuffer bb,
-096  final boolean includesMemstoreTS, 
final boolean useTags) {
-097bb.rewind();
-098ListKeyValue kvs = 
Lists.newArrayList();
-099KeyValue kv = null;
-100while (true) {
-101  kv = 
KeyValueUtil.nextShallowCopy(bb, includesMemstoreTS, useTags);
-102  if (kv == null) {
-103break;
-104  }
-105  kvs.add(kv);
-106}
-107return kvs;
-108  }
+029import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+030
+031@InterfaceAudience.Private
+032public class KeyValueTestUtil {
+033
+034  public static KeyValue create(
+035  String row,
+036  String family,
+037  String qualifier,
+038  long timestamp,
+039  String value)
+040  {
+041return create(row, family, qualifier, 
timestamp, KeyValue.Type.Put, value);
+042  }
+043
+044  public static KeyValue create(
+045  String row,
+046  String family,
+047  String qualifier,
+048  long timestamp,
+049  KeyValue.Type type,
+050  String value)
+051  {
+052  return new KeyValue(
+053  Bytes.toBytes(row),
+054  Bytes.toBytes(family),
+055  Bytes.toBytes(qualifier),
+056  timestamp,
+057  type,
+058  Bytes.toBytes(value)
+059  );
+060  }
+061
+062  public static ByteBuffer 
toByteBufferAndRewind(final Iterable? extends KeyValue kvs,
+063  boolean includeMemstoreTS) {
+064int totalBytes = 
KeyValueUtil.totalLengthWithMvccVersion(kvs, includeMemstoreTS);
+065ByteBuffer bb = 
ByteBuffer.allocate(totalBytes);
+066for (KeyValue kv : 
IterableUtils.emptyIfNull(kvs)) {
+067  KeyValueUtil.appendToByteBuffer(bb, 
kv, includeMemstoreTS);
+068}
+069bb.rewind();
+070return bb;
+071  }
+072
+073  /**
+074   * Checks whether KeyValues from 
kvCollection2 are contained in kvCollection1.
+075   *
+076   * The 

[13/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
new file mode 100644
index 000..8b6f080
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
@@ -0,0 +1,1040 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/*
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018
+019package org.apache.hadoop.hbase.client;
+020
+021import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
+022
+023import java.io.IOException;
+024import java.nio.ByteBuffer;
+025import java.util.ArrayList;
+026import java.util.Arrays;
+027import java.util.HashMap;
+028import java.util.Iterator;
+029import java.util.List;
+030import java.util.Map;
+031import java.util.NavigableMap;
+032import java.util.Optional;
+033import java.util.TreeMap;
+034import java.util.UUID;
+035import java.util.stream.Collectors;
+036import 
org.apache.hadoop.hbase.ArrayBackedTag;
+037import org.apache.hadoop.hbase.Cell;
+038import 
org.apache.hadoop.hbase.CellScannable;
+039import 
org.apache.hadoop.hbase.CellScanner;
+040import 
org.apache.hadoop.hbase.CellUtil;
+041import 
org.apache.hadoop.hbase.ExtendedCell;
+042import 
org.apache.hadoop.hbase.HConstants;
+043import 
org.apache.hadoop.hbase.KeyValue;
+044import 
org.apache.hadoop.hbase.PrivateCellUtil;
+045import org.apache.hadoop.hbase.RawCell;
+046import org.apache.hadoop.hbase.Tag;
+047import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+048import 
org.apache.hadoop.hbase.io.HeapSize;
+049import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+050import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+051import 
org.apache.hadoop.hbase.security.access.AccessControlConstants;
+052import 
org.apache.hadoop.hbase.security.access.AccessControlUtil;
+053import 
org.apache.hadoop.hbase.security.access.Permission;
+054import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
+055import 
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
+056import 
org.apache.hadoop.hbase.util.Bytes;
+057import 
org.apache.hadoop.hbase.util.ClassSize;
+058import 
org.apache.yetus.audience.InterfaceAudience;
+059
+060import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
+062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
+063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+064import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataInput;
+065import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataOutput;
+066import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteStreams;
+067
+068@InterfaceAudience.Public
+069public abstract class Mutation extends 
OperationWithAttributes implements Row, CellScannable,
+070HeapSize {
+071  public static final long 
MUTATION_OVERHEAD = ClassSize.align(
+072  // This
+073  ClassSize.OBJECT +
+074  // row + 
OperationWithAttributes.attributes
+075  2 * ClassSize.REFERENCE +
+076  // Timestamp
+077  1 * Bytes.SIZEOF_LONG +
+078  // durability
+079  ClassSize.REFERENCE +
+080  // familyMap
+081  ClassSize.REFERENCE +
+082  // familyMap
+083  ClassSize.TREEMAP +
+084  // priority
+085  ClassSize.INTEGER
+086  );
+087
+088  /**
+089   * The attribute for storing the list 
of clusters that have consumed the change.
+090   */
+091  private static final String 
CONSUMED_CLUSTER_IDS = "_cs.id";
+092
+093  /**
+094   * The attribute for storing TTL for 
the result of the mutation.
+095   */
+096  private static final String 
OP_ATTRIBUTE_TTL = "_ttl";
+097
+098  private static final String 
RETURN_RESULTS = "_rr_";
+099
+100  // TODO: row should be final
+101  protected byte [] row 

[09/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 9d49b9a..c36fdce 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -165,3380 +165,3375 @@
 157import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
 158import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
 159import 
org.apache.hadoop.hbase.replication.ReplicationException;
-160import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-161import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-162import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-163import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-164import 
org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
-165import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-166import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-167import 
org.apache.hadoop.hbase.security.UserProvider;
-168import 
org.apache.hadoop.hbase.trace.TraceUtil;
-169import 
org.apache.hadoop.hbase.util.Addressing;
-170import 
org.apache.hadoop.hbase.util.Bytes;
-171import 
org.apache.hadoop.hbase.util.CompressionTest;
-172import 
org.apache.hadoop.hbase.util.EncryptionTest;
-173import 
org.apache.hadoop.hbase.util.FSUtils;
-174import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-175import 
org.apache.hadoop.hbase.util.HasThread;
-176import 
org.apache.hadoop.hbase.util.IdLock;
-177import 
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-178import 
org.apache.hadoop.hbase.util.Pair;
-179import 
org.apache.hadoop.hbase.util.Threads;
-180import 
org.apache.hadoop.hbase.util.VersionInfo;
-181import 
org.apache.hadoop.hbase.util.ZKDataMigrator;
-182import 
org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-183import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-184import 
org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-185import 
org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
-186import 
org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-187import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-188import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-189import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-190import 
org.apache.yetus.audience.InterfaceAudience;
-191import 
org.apache.zookeeper.KeeperException;
-192import org.eclipse.jetty.server.Server;
-193import 
org.eclipse.jetty.server.ServerConnector;
-194import 
org.eclipse.jetty.servlet.ServletHolder;
-195import 
org.eclipse.jetty.webapp.WebAppContext;
-196import org.slf4j.Logger;
-197import org.slf4j.LoggerFactory;
-198
-199import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-200import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-201import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-202import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-203import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-204import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-205import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-206import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-207import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-208import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-209
-210/**
-211 * HMaster is the "master server" for 
HBase. An HBase cluster has one active
-212 * master.  If many masters are started, 
all compete.  Whichever wins goes on to
-213 * run the cluster.  All others park 
themselves in their constructor until
-214 * master or cluster shutdown or until 
the active master loses its lease in
-215 * zookeeper.  Thereafter, all running 
master jostle to take over master role.
-216 *
-217 * pThe Master can be asked 
shutdown the cluster. See {@link #shutdown()}.  In
-218 * this case it will tell all 
regionservers to go down and then wait on them
-219 * all reporting in that they are down.  
This master will then shut itself down.
+160import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+161import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+162import 
org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
+163import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
+164import 

[42/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/src-html/org/apache/hadoop/hbase/client/Put.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Put.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Put.html
index d152a92..8c56813 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Put.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Put.html
@@ -34,339 +34,320 @@
 026import java.util.NavigableMap;
 027import java.util.UUID;
 028import org.apache.hadoop.hbase.Cell;
-029import 
org.apache.hadoop.hbase.CellUtil;
-030import 
org.apache.hadoop.hbase.HConstants;
-031import 
org.apache.hadoop.hbase.IndividualBytesFieldCell;
-032import 
org.apache.hadoop.hbase.KeyValue;
-033import 
org.apache.hadoop.hbase.io.HeapSize;
-034import 
org.apache.hadoop.hbase.security.access.Permission;
-035import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-036import 
org.apache.hadoop.hbase.util.Bytes;
-037import 
org.apache.yetus.audience.InterfaceAudience;
-038
-039/**
-040 * Used to perform Put operations for a 
single row.
-041 * p
-042 * To perform a Put, instantiate a Put 
object with the row to insert to, and
-043 * for each column to be inserted, 
execute {@link #addColumn(byte[], byte[],
-044 * byte[]) add} or {@link 
#addColumn(byte[], byte[], long, byte[]) add} if
-045 * setting the timestamp.
-046 */
-047@InterfaceAudience.Public
-048public class Put extends Mutation 
implements HeapSize, ComparableRow {
-049  /**
-050   * Create a Put operation for the 
specified row.
-051   * @param row row key
-052   */
-053  public Put(byte [] row) {
-054this(row, 
HConstants.LATEST_TIMESTAMP);
-055  }
-056
-057  /**
-058   * Create a Put operation for the 
specified row, using a given timestamp.
-059   *
-060   * @param row row key; we make a copy 
of what we are passed to keep local.
-061   * @param ts timestamp
-062   */
-063  public Put(byte[] row, long ts) {
-064this(row, 0, row.length, ts);
-065  }
-066
-067  /**
-068   * We make a copy of the passed in row 
key to keep local.
-069   * @param rowArray
-070   * @param rowOffset
-071   * @param rowLength
-072   */
-073  public Put(byte [] rowArray, int 
rowOffset, int rowLength) {
-074this(rowArray, rowOffset, rowLength, 
HConstants.LATEST_TIMESTAMP);
-075  }
-076
-077  /**
-078   * @param row row key; we make a copy 
of what we are passed to keep local.
-079   * @param ts  timestamp
-080   */
-081  public Put(ByteBuffer row, long ts) {
-082if (ts  0) {
-083  throw new 
IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
-084}
-085checkRow(row);
-086this.row = new 
byte[row.remaining()];
-087row.get(this.row);
-088this.ts = ts;
-089  }
-090
-091  /**
-092   * @param row row key; we make a copy 
of what we are passed to keep local.
-093   */
-094  public Put(ByteBuffer row) {
-095this(row, 
HConstants.LATEST_TIMESTAMP);
-096  }
-097
-098  /**
-099   * We make a copy of the passed in row 
key to keep local.
-100   * @param rowArray
-101   * @param rowOffset
-102   * @param rowLength
-103   * @param ts
-104   */
-105  public Put(byte [] rowArray, int 
rowOffset, int rowLength, long ts) {
-106checkRow(rowArray, rowOffset, 
rowLength);
-107this.row = Bytes.copy(rowArray, 
rowOffset, rowLength);
-108this.ts = ts;
-109if (ts  0) {
-110  throw new 
IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
-111}
-112  }
-113
-114  /**
-115   * Create a Put operation for an 
immutable row key.
-116   *
-117   * @param row row key
-118   * @param rowIsImmutable whether the 
input row is immutable.
-119   *   Set to true if 
the caller can guarantee that
-120   *   the row will 
not be changed for the Put duration.
-121   */
-122  public Put(byte [] row, boolean 
rowIsImmutable) {
-123this(row, 
HConstants.LATEST_TIMESTAMP, rowIsImmutable);
-124  }
-125
-126  /**
-127   * Create a Put operation for an 
immutable row key, using a given timestamp.
-128   *
-129   * @param row row key
-130   * @param ts timestamp
-131   * @param rowIsImmutable whether the 
input row is immutable.
-132   *   Set to true if 
the caller can guarantee that
-133   *   the row will 
not be changed for the Put duration.
-134   */
-135  public Put(byte[] row, long ts, boolean 
rowIsImmutable) {
-136// Check and set timestamp
-137if (ts  0) {
-138  throw new 
IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
-139}
-140this.ts = ts;
-141
-142// Deal with row according to 
rowIsImmutable
-143checkRow(row);
-144if (rowIsImmutable) {  // Row is 
immutable
-145  this.row = row;  // Do not make a 
local copy, but point to the provided byte array directly
-146} else {  // Row is not immutable
-147  this.row = Bytes.copy(row, 0, 
row.length);  

[05/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
index 07b6abe..f51c693 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -78,2190 +78,2184 @@
 070import 
org.apache.hadoop.hbase.procedure2.LockType;
 071import 
org.apache.hadoop.hbase.procedure2.LockedResource;
 072import 
org.apache.hadoop.hbase.procedure2.Procedure;
-073import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-074import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
-075import 
org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
-076import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-077import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-078import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-079import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-080import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-081import 
org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
-082import 
org.apache.hadoop.hbase.replication.ReplicationException;
-083import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-084import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-085import 
org.apache.hadoop.hbase.security.User;
-086import 
org.apache.hadoop.hbase.security.access.AccessController;
-087import 
org.apache.hadoop.hbase.security.visibility.VisibilityController;
-088import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-089import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-090import 
org.apache.hadoop.hbase.util.Bytes;
-091import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-092import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-093import 
org.apache.hadoop.hbase.util.Pair;
-094import 
org.apache.yetus.audience.InterfaceAudience;
-095import 
org.apache.zookeeper.KeeperException;
-096import org.slf4j.Logger;
-097import org.slf4j.LoggerFactory;
-098
-099import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-100import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-101import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-127import 

[03/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
index ce8c56c..1ebcb9e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
@@ -29,535 +29,532 @@
 021import java.io.IOException;
 022import java.util.ArrayList;
 023import java.util.Collection;
-024import java.util.HashMap;
-025import java.util.List;
-026import java.util.Map;
-027import java.util.Set;
-028import java.util.TreeMap;
-029import 
java.util.concurrent.ConcurrentHashMap;
-030import 
java.util.concurrent.ConcurrentMap;
-031
-032import 
org.apache.hadoop.conf.Configuration;
-033import 
org.apache.hadoop.hbase.Abortable;
-034import 
org.apache.hadoop.hbase.CompoundConfiguration;
-035import 
org.apache.hadoop.hbase.HBaseConfiguration;
-036import 
org.apache.hadoop.hbase.TableName;
-037import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-038import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-039import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-040import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
-041import 
org.apache.hadoop.hbase.util.Bytes;
-042import 
org.apache.hadoop.hbase.util.Pair;
-043import 
org.apache.hadoop.hbase.zookeeper.ZKConfig;
-044import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-045import 
org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
-046import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-047import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-048import 
org.apache.yetus.audience.InterfaceAudience;
-049import 
org.apache.zookeeper.KeeperException;
-050import org.slf4j.Logger;
-051import org.slf4j.LoggerFactory;
-052
-053/**
-054 * This class provides an implementation 
of the ReplicationPeers interface using ZooKeeper. The
-055 * peers znode contains a list of all 
peer replication clusters and the current replication state of
-056 * those clusters. It has one child peer 
znode for each peer cluster. The peer znode is named with
-057 * the cluster id provided by the user in 
the HBase shell. The value of the peer znode contains the
-058 * peers cluster key provided by the user 
in the HBase Shell. The cluster key contains a list of
-059 * zookeeper quorum peers, the client 
port for the zookeeper quorum, and the base znode for HBase.
-060 * For example:
-061 *
-062 *  /hbase/replication/peers/1 [Value: 
zk1.host.com,zk2.host.com,zk3.host.com:2181:/hbase]
-063 *  /hbase/replication/peers/2 [Value: 
zk5.host.com,zk6.host.com,zk7.host.com:2181:/hbase]
-064 *
-065 * Each of these peer znodes has a child 
znode that indicates whether or not replication is enabled
-066 * on that peer cluster. These peer-state 
znodes do not have child znodes and simply contain a
-067 * boolean value (i.e. ENABLED or 
DISABLED). This value is read/maintained by the
-068 * ReplicationPeer.PeerStateTracker 
class. For example:
+024import java.util.List;
+025import java.util.Map;
+026import java.util.Set;
+027import java.util.TreeMap;
+028import 
java.util.concurrent.ConcurrentHashMap;
+029import 
java.util.concurrent.ConcurrentMap;
+030
+031import 
org.apache.hadoop.conf.Configuration;
+032import 
org.apache.hadoop.hbase.Abortable;
+033import 
org.apache.hadoop.hbase.CompoundConfiguration;
+034import 
org.apache.hadoop.hbase.HBaseConfiguration;
+035import 
org.apache.hadoop.hbase.TableName;
+036import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
+037import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+039import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+040import 
org.apache.hadoop.hbase.util.Pair;
+041import 
org.apache.hadoop.hbase.zookeeper.ZKConfig;
+042import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
+043import 
org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
+044import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+045import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+046import 
org.apache.yetus.audience.InterfaceAudience;
+047import 
org.apache.zookeeper.KeeperException;
+048import org.slf4j.Logger;
+049import org.slf4j.LoggerFactory;
+050
+051/**
+052 * This class provides an implementation 
of the ReplicationPeers interface using ZooKeeper. The
+053 * peers znode contains a list of all 
peer replication clusters and the current replication state of
+054 * those clusters. It has one child peer 
znode for each peer cluster. The peer znode is named with
+055 * the cluster id provided by the user in 
the HBase shell. 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
index 6aab17f..172f50c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static enum Bytes.LexicographicalComparerHolder.UnsafeComparer
+static enum Bytes.LexicographicalComparerHolder.UnsafeComparer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumBytes.LexicographicalComparerHolder.UnsafeComparer
 implements Bytes.Comparerbyte[]
 
@@ -238,7 +238,7 @@ the order they are declared.
 
 
 INSTANCE
-public static finalBytes.LexicographicalComparerHolder.UnsafeComparer
 INSTANCE
+public static finalBytes.LexicographicalComparerHolder.UnsafeComparer
 INSTANCE
 
 
 
@@ -255,7 +255,7 @@ the order they are declared.
 
 
 theUnsafe
-static finalsun.misc.Unsafe theUnsafe
+static finalsun.misc.Unsafe theUnsafe
 
 
 
@@ -272,7 +272,7 @@ the order they are declared.
 
 
 values
-public staticBytes.LexicographicalComparerHolder.UnsafeComparer[]values()
+public staticBytes.LexicographicalComparerHolder.UnsafeComparer[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -292,7 +292,7 @@ for (Bytes.LexicographicalComparerHolder.UnsafeComparer c : 
Bytes.Lexicographica
 
 
 valueOf
-public staticBytes.LexicographicalComparerHolder.UnsafeComparervalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticBytes.LexicographicalComparerHolder.UnsafeComparervalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 
@@ -314,7 +314,7 @@ not permitted.)
 
 
 compareTo
-publicintcompareTo(byte[]buffer1,
+publicintcompareTo(byte[]buffer1,
  intoffset1,
  intlength1,
  byte[]buffer2,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
index bafa964..81432ff 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class Bytes.LexicographicalComparerHolder
+static class Bytes.LexicographicalComparerHolder
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Provides a lexicographical comparer implementation; either 
a Java
  implementation or a faster implementation based on Unsafe.
@@ -236,7 +236,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 UNSAFE_COMPARER_NAME
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UNSAFE_COMPARER_NAME
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UNSAFE_COMPARER_NAME
 
 
 
@@ -245,7 +245,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 BEST_COMPARER
-static finalBytes.Comparerbyte[] BEST_COMPARER
+static finalBytes.Comparerbyte[] BEST_COMPARER
 
 
 
@@ -262,7 +262,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LexicographicalComparerHolder
-LexicographicalComparerHolder()
+LexicographicalComparerHolder()
 
 
 
@@ -279,7 +279,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getBestComparer
-staticBytes.Comparerbyte[]getBestComparer()
+staticBytes.Comparerbyte[]getBestComparer()
 Returns the Unsafe-using Comparer, or falls back to the 
pure-Java
  implementation if unable to do 

[45/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Delete.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
index 0c7ac41..0f682ab 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
@@ -33,342 +33,331 @@
 025import java.util.NavigableMap;
 026import java.util.UUID;
 027import org.apache.hadoop.hbase.Cell;
-028import 
org.apache.hadoop.hbase.CellUtil;
-029import 
org.apache.hadoop.hbase.HConstants;
-030import 
org.apache.hadoop.hbase.KeyValue;
-031import 
org.apache.hadoop.hbase.security.access.Permission;
-032import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-033import 
org.apache.hadoop.hbase.util.Bytes;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035
-036/**
-037 * Used to perform Delete operations on a 
single row.
-038 * p
-039 * To delete an entire row, instantiate a 
Delete object with the row
-040 * to delete.  To further define the 
scope of what to delete, perform
-041 * additional methods as outlined 
below.
-042 * p
-043 * To delete specific families, execute 
{@link #addFamily(byte[]) deleteFamily}
-044 * for each family to delete.
-045 * p
-046 * To delete multiple versions of 
specific columns, execute
-047 * {@link #addColumns(byte[], byte[]) 
deleteColumns}
-048 * for each column to delete.
-049 * p
-050 * To delete specific versions of 
specific columns, execute
-051 * {@link #addColumn(byte[], byte[], 
long) deleteColumn}
-052 * for each column version to delete.
-053 * p
-054 * Specifying timestamps, deleteFamily 
and deleteColumns will delete all
-055 * versions with a timestamp less than or 
equal to that passed.  If no
-056 * timestamp is specified, an entry is 
added with a timestamp of 'now'
-057 * where 'now' is the servers's 
System.currentTimeMillis().
-058 * Specifying a timestamp to the 
deleteColumn method will
-059 * delete versions only with a timestamp 
equal to that specified.
-060 * If no timestamp is passed to 
deleteColumn, internally, it figures the
-061 * most recent cell's timestamp and adds 
a delete at that timestamp; i.e.
-062 * it deletes the most recently added 
cell.
-063 * pThe timestamp passed to the 
constructor is used ONLY for delete of
-064 * rows.  For anything less -- a 
deleteColumn, deleteColumns or
-065 * deleteFamily -- then you need to use 
the method overrides that take a
-066 * timestamp.  The constructor timestamp 
is not referenced.
-067 */
-068@InterfaceAudience.Public
-069public class Delete extends Mutation 
implements ComparableRow {
-070  /**
-071   * Create a Delete operation for the 
specified row.
-072   * p
-073   * If no further operations are done, 
this will delete everything
-074   * associated with the specified row 
(all versions of all columns in all
-075   * families), with timestamp from 
current point in time to the past.
-076   * Cells defining timestamp for a 
future point in time
-077   * (timestamp  current time) will 
not be deleted.
-078   * @param row row key
-079   */
-080  public Delete(byte [] row) {
-081this(row, 
HConstants.LATEST_TIMESTAMP);
-082  }
-083
-084  /**
-085   * Create a Delete operation for the 
specified row and timestamp.p
-086   *
-087   * If no further operations are done, 
this will delete all columns in all
-088   * families of the specified row with a 
timestamp less than or equal to the
-089   * specified timestamp.p
-090   *
-091   * This timestamp is ONLY used for a 
delete row operation.  If specifying
-092   * families or columns, you must 
specify each timestamp individually.
-093   * @param row row key
-094   * @param timestamp maximum version 
timestamp (only for delete row)
-095   */
-096  public Delete(byte [] row, long 
timestamp) {
-097this(row, 0, row.length, 
timestamp);
-098  }
-099
-100  /**
-101   * Create a Delete operation for the 
specified row and timestamp.p
-102   *
-103   * If no further operations are done, 
this will delete all columns in all
-104   * families of the specified row with a 
timestamp less than or equal to the
-105   * specified timestamp.p
-106   *
-107   * This timestamp is ONLY used for a 
delete row operation.  If specifying
-108   * families or columns, you must 
specify each timestamp individually.
-109   * @param row We make a local copy of 
this passed in row.
-110   * @param rowOffset
-111   * @param rowLength
-112   */
-113  public Delete(final byte[] row, final 
int rowOffset, final int rowLength) {
-114this(row, rowOffset, rowLength, 
HConstants.LATEST_TIMESTAMP);
-115  }
-116
-117  /**
-118   * Create a Delete operation for the 
specified row and timestamp.p
-119   *
-120   * If no further operations are done, 
this will delete all columns in all
-121   * families of the specified row with a 

[38/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/book.html
--
diff --git a/book.html b/book.html
index efe247f..03d89ab 100644
--- a/book.html
+++ b/book.html
@@ -37177,7 +37177,7 @@ The server will return cellblocks compressed using this 
same compressor as long
 
 
 Version 3.0.0-SNAPSHOT
-Last updated 2017-12-24 14:29:40 UTC
+Last updated 2017-12-25 16:50:56 UTC
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index 6824e05..cfa1251 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Bulk Loads in Apache HBase (TM)
@@ -311,7 +311,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-24
+  Last Published: 
2017-12-26
 
 
 



[44/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Increment.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
index ea801d2..6d58b59 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
@@ -106,264 +106,257 @@
 098   * @throws java.io.IOException e
 099   */
 100  public Increment add(Cell cell) throws 
IOException{
-101byte [] family = 
CellUtil.cloneFamily(cell);
-102ListCell list = 
getCellList(family);
-103//Checking that the row of the kv is 
the same as the put
-104if (!CellUtil.matchingRows(cell, 
this.row)) {
-105  throw new WrongRowIOException("The 
row in " + cell +
-106" doesn't match the original one 
" +  Bytes.toStringBinary(this.row));
-107}
-108list.add(cell);
-109return this;
-110  }
-111
-112  /**
-113   * Increment the column from the 
specific family with the specified qualifier
-114   * by the specified amount.
-115   * p
-116   * Overrides previous calls to 
addColumn for this family and qualifier.
-117   * @param family family name
-118   * @param qualifier column qualifier
-119   * @param amount amount to increment 
by
-120   * @return the Increment object
-121   */
-122  public Increment addColumn(byte [] 
family, byte [] qualifier, long amount) {
-123if (family == null) {
-124  throw new 
IllegalArgumentException("family cannot be null");
-125}
-126ListCell list = 
getCellList(family);
-127KeyValue kv = 
createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount));
-128list.add(kv);
-129return this;
-130  }
-131
-132  /**
-133   * Gets the TimeRange used for this 
increment.
-134   * @return TimeRange
-135   */
-136  public TimeRange getTimeRange() {
-137return this.tr;
-138  }
-139
-140  /**
-141   * Sets the TimeRange to be used on the 
Get for this increment.
+101super.add(cell);
+102return this;
+103  }
+104
+105  /**
+106   * Increment the column from the 
specific family with the specified qualifier
+107   * by the specified amount.
+108   * p
+109   * Overrides previous calls to 
addColumn for this family and qualifier.
+110   * @param family family name
+111   * @param qualifier column qualifier
+112   * @param amount amount to increment 
by
+113   * @return the Increment object
+114   */
+115  public Increment addColumn(byte [] 
family, byte [] qualifier, long amount) {
+116if (family == null) {
+117  throw new 
IllegalArgumentException("family cannot be null");
+118}
+119ListCell list = 
getCellList(family);
+120KeyValue kv = 
createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount));
+121list.add(kv);
+122return this;
+123  }
+124
+125  /**
+126   * Gets the TimeRange used for this 
increment.
+127   * @return TimeRange
+128   */
+129  public TimeRange getTimeRange() {
+130return this.tr;
+131  }
+132
+133  /**
+134   * Sets the TimeRange to be used on the 
Get for this increment.
+135   * p
+136   * This is useful for when you have 
counters that only last for specific
+137   * periods of time (ie. counters that 
are partitioned by time).  By setting
+138   * the range of valid times for this 
increment, you can potentially gain
+139   * some performance with a more optimal 
Get operation.
+140   * Be careful adding the time range to 
this class as you will update the old cell if the
+141   * time range doesn't include the 
latest cells.
 142   * p
-143   * This is useful for when you have 
counters that only last for specific
-144   * periods of time (ie. counters that 
are partitioned by time).  By setting
-145   * the range of valid times for this 
increment, you can potentially gain
-146   * some performance with a more optimal 
Get operation.
-147   * Be careful adding the time range to 
this class as you will update the old cell if the
-148   * time range doesn't include the 
latest cells.
-149   * p
-150   * This range is used as [minStamp, 
maxStamp).
-151   * @param minStamp minimum timestamp 
value, inclusive
-152   * @param maxStamp maximum timestamp 
value, exclusive
-153   * @throws IOException if invalid time 
range
-154   * @return this
-155   */
-156  public Increment setTimeRange(long 
minStamp, long maxStamp)
-157  throws IOException {
-158tr = new TimeRange(minStamp, 
maxStamp);
-159return this;
-160  }
-161
-162  @Override
-163  public Increment setTimestamp(long 
timestamp) {
-164super.setTimestamp(timestamp);
-165return this;
-166  }
-167
-168  /**
-169   * @param returnResults True (default) 
if the increment operation should return the results. A
-170   *  client that is not 
interested in the result can save network bandwidth setting this
-171   *  to false.
-172   */
-173  

[08/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 9d49b9a..c36fdce 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -165,3380 +165,3375 @@
 157import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
 158import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
 159import 
org.apache.hadoop.hbase.replication.ReplicationException;
-160import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-161import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-162import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-163import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-164import 
org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
-165import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-166import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-167import 
org.apache.hadoop.hbase.security.UserProvider;
-168import 
org.apache.hadoop.hbase.trace.TraceUtil;
-169import 
org.apache.hadoop.hbase.util.Addressing;
-170import 
org.apache.hadoop.hbase.util.Bytes;
-171import 
org.apache.hadoop.hbase.util.CompressionTest;
-172import 
org.apache.hadoop.hbase.util.EncryptionTest;
-173import 
org.apache.hadoop.hbase.util.FSUtils;
-174import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-175import 
org.apache.hadoop.hbase.util.HasThread;
-176import 
org.apache.hadoop.hbase.util.IdLock;
-177import 
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-178import 
org.apache.hadoop.hbase.util.Pair;
-179import 
org.apache.hadoop.hbase.util.Threads;
-180import 
org.apache.hadoop.hbase.util.VersionInfo;
-181import 
org.apache.hadoop.hbase.util.ZKDataMigrator;
-182import 
org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-183import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-184import 
org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-185import 
org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
-186import 
org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-187import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-188import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-189import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-190import 
org.apache.yetus.audience.InterfaceAudience;
-191import 
org.apache.zookeeper.KeeperException;
-192import org.eclipse.jetty.server.Server;
-193import 
org.eclipse.jetty.server.ServerConnector;
-194import 
org.eclipse.jetty.servlet.ServletHolder;
-195import 
org.eclipse.jetty.webapp.WebAppContext;
-196import org.slf4j.Logger;
-197import org.slf4j.LoggerFactory;
-198
-199import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-200import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-201import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-202import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-203import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-204import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-205import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-206import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-207import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-208import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-209
-210/**
-211 * HMaster is the "master server" for 
HBase. An HBase cluster has one active
-212 * master.  If many masters are started, 
all compete.  Whichever wins goes on to
-213 * run the cluster.  All others park 
themselves in their constructor until
-214 * master or cluster shutdown or until 
the active master loses its lease in
-215 * zookeeper.  Thereafter, all running 
master jostle to take over master role.
-216 *
-217 * pThe Master can be asked 
shutdown the cluster. See {@link #shutdown()}.  In
-218 * this case it will tell all 
regionservers to go down and then wait on them
-219 * all reporting in that they are down.  
This master will then shut itself down.
+160import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+161import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+162import 
org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
+163import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
+164import 
org.apache.hadoop.hbase.security.AccessDeniedException;

[49/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/apidocs/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Mutation.html 
b/apidocs/org/apache/hadoop/hbase/client/Mutation.html
index a97e6d0..c71bd38 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Mutation.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Mutation.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public abstract class Mutation
+public abstract class Mutation
 extends OperationWithAttributes
 implements Row, CellScannable, 
org.apache.hadoop.hbase.io.HeapSize
 
@@ -455,7 +455,7 @@ implements 
 
 MUTATION_OVERHEAD
-public static finallong MUTATION_OVERHEAD
+public static finallong MUTATION_OVERHEAD
 
 
 
@@ -464,7 +464,7 @@ implements 
 
 row
-protectedbyte[] row
+protectedbyte[] row
 
 
 
@@ -473,7 +473,7 @@ implements 
 
 ts
-protectedlong ts
+protectedlong ts
 
 
 
@@ -482,7 +482,7 @@ implements 
 
 durability
-protectedDurability durability
+protectedDurability durability
 
 
 
@@ -491,7 +491,7 @@ implements 
 
 familyMap
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell familyMap
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell familyMap
 
 
 
@@ -508,7 +508,7 @@ implements 
 
 Mutation
-protectedMutation()
+protectedMutation()
 empty construction.
  We need this empty construction to keep binary compatibility.
 
@@ -519,7 +519,7 @@ implements 
 
 Mutation
-protectedMutation(Mutationclone)
+protectedMutation(Mutationclone)
 
 
 
@@ -528,7 +528,7 @@ implements 
 
 Mutation
-protectedMutation(byte[]row,
+protectedMutation(byte[]row,
longts,
http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellfamilyMap)
 Construct the mutation with user defined data.
@@ -554,7 +554,7 @@ implements 
 
 cellScanner
-publicCellScannercellScanner()
+publicCellScannercellScanner()
 
 Specified by:
 cellScannerin
 interfaceCellScannable
@@ -569,7 +569,7 @@ implements 
 
 getFingerprint
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">ObjectgetFingerprint()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">ObjectgetFingerprint()
 Compile the column family (i.e. schema) information
  into a Map. Useful for parsing and aggregation by debugging,
  logging, and administration tools.
@@ -587,7 +587,7 @@ implements 
 
 toMap
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">ObjecttoMap(intmaxCols)
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">ObjecttoMap(intmaxCols)
 Compile the details beyond the scope of getFingerprint 
(row, columns,
  timestamps, etc.) into a Map along with the fingerprinted information.
  Useful for debugging, logging, and administration tools.
@@ -607,7 +607,7 @@ implements 
 
 setDurability
-publicMutationsetDurability(Durabilityd)
+publicMutationsetDurability(Durabilityd)
 Set the durability for this mutation
 
 Parameters:
@@ -621,7 +621,7 @@ 

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.html 
b/devapidocs/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.html
index dfc3068..10e70a5 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -321,7 +321,7 @@ implements 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/util/IterableUtils.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/IterableUtils.html 
b/devapidocs/org/apache/hadoop/hbase/util/IterableUtils.html
deleted file mode 100644
index a757d87..000
--- a/devapidocs/org/apache/hadoop/hbase/util/IterableUtils.html
+++ /dev/null
@@ -1,310 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-IterableUtils (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":9};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.util
-Class IterableUtils
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.util.IterableUtils
-
-
-
-
-
-
-
-
-@InterfaceAudience.Private
-public class IterableUtils
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-Utility methods for Iterable including null-safe 
handlers.
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-EMPTY_LIST
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-IterableUtils()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsStatic MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-static Thttp://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true;
 title="class or interface in java.lang">IterableT
-nullSafe(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true;
 title="class or interface in 
java.lang">IterableTin)
-
-
-
-
-
-
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, 

[34/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/Cell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/Cell.html 
b/devapidocs/org/apache/hadoop/hbase/Cell.html
index 09896fb..58be56e 100644
--- a/devapidocs/org/apache/hadoop/hbase/Cell.html
+++ b/devapidocs/org/apache/hadoop/hbase/Cell.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferCell, ByteBufferChunkCell, 
ByteBufferKeyOnlyKeyValue, ByteBufferKeyValue, IndividualBytesFieldCell, KeyOnlyFilter.KeyOnlyByteBufferCell, KeyOnlyFilter.KeyOnlyCell, 
KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.EmptyByteBufferCell, PrivateCellUtil.EmptyCell, PrivateCellUtil.FirstOnRowByteBufferCell, PrivateCellUtil.FirstOnRowCell, PrivateCellUtil.FirstOnRowColByteBufferCell, PrivateCellUtil.FirstOnRowColCell, PrivateCellUtil.FirstOnRowColTSByteBufferCell, PrivateCellUtil.FirstOnRowColTSCell, PrivateCellUtil.FirstOnRowDeleteFamilyCell, PrivateCellUtil.LastOnRowByteBufferCell, PrivateCellUtil.LastOnRowCell, PrivateCellUtil.LastOnRowColByteBufferCell, PrivateCellUtil.LastOnRowColCell, 
PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, SizeCachedKeyValue, SizeCachedNoTagsKeyValue
+BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferCell, ByteBufferChunkCell, 
ByteBufferKeyOnlyKeyValue, ByteBufferKeyValue, IndividualBytesFieldCell, KeyOnlyFilter.KeyOnlyByteBufferCell, KeyOnlyFilter.KeyOnlyCell, 
KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, Mutation.CellWrapper, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.EmptyByteBufferCell, PrivateCellUtil.EmptyCell, PrivateCellUtil.FirstOnRowByteBufferCell, PrivateCellUtil.FirstOnRowCell, PrivateCellUtil.FirstOnRowColByteBufferCell, PrivateCellUtil.FirstOnRowColCell, PrivateCellUtil.FirstOnRowColTSByteBufferCell, PrivateCellUtil.FirstOnRowColTSCell, Priva
 teCellUtil.FirstOnRowDeleteFamilyCell, PrivateCellUtil.LastOnRowByteBufferCell, PrivateCellUtil.LastOnRowCell, PrivateCellUtil.LastOnRowColByteBufferCell, PrivateCellUtil.LastOnRowColCell, 
PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, SizeCachedKeyValue, SizeCachedNoTagsKeyValue
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html 
b/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
index e5f8f4f..1165e75 100644
--- a/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
+++ b/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferChunkCell, 
ByteBufferKeyValue, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.EmptyByteBufferCell, PrivateCellUtil.EmptyCell, PrivateCellUtil.FirstOnRowByteBufferCell, PrivateCellUtil.FirstOnRowCell, PrivateCellUtil.FirstOnRowColByteBufferCell, PrivateCellUtil.FirstOnRowColCell, PrivateCellUtil.FirstOnRowColTSByteBufferCell, PrivateCellUtil.FirstOnRowColTSCell, PrivateCellUtil.FirstOnRowDeleteFamilyCell, PrivateCellUtil.LastOnRowByteBufferCell, PrivateCellUtil.LastOnRowCell, PrivateCellUtil.LastOnRowColByteBufferCell, PrivateCellUtil.LastOnRowColCell, 
PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, SizeCachedKeyValue, SizeCachedNoTagsKeyValue
+BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferChunkCell, 
ByteBufferKeyValue, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, Mutation.CellWrapper, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, Priv
 ateCellUtil.EmptyByteBufferCell, PrivateCellUtil.EmptyCell, PrivateCellUtil.FirstOnRowByteBufferCell, PrivateCellUtil.FirstOnRowCell, PrivateCellUtil.FirstOnRowColByteBufferCell, PrivateCellUtil.FirstOnRowColCell, PrivateCellUtil.FirstOnRowColTSByteBufferCell, PrivateCellUtil.FirstOnRowColTSCell, PrivateCellUtil.FirstOnRowDeleteFamilyCell, PrivateCellUtil.LastOnRowByteBufferCell, PrivateCellUtil.LastOnRowCell, 

[12/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
index 38865a3..8b6f080 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
@@ -26,746 +26,954 @@
 018
 019package org.apache.hadoop.hbase.client;
 020
-021import java.io.IOException;
-022import java.nio.ByteBuffer;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.HashMap;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.NavigableMap;
-029import java.util.TreeMap;
-030import java.util.UUID;
-031import java.util.stream.Collectors;
-032import org.apache.hadoop.hbase.Cell;
-033import 
org.apache.hadoop.hbase.CellScannable;
-034import 
org.apache.hadoop.hbase.CellScanner;
-035import 
org.apache.hadoop.hbase.CellUtil;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.KeyValue;
-038import 
org.apache.hadoop.hbase.PrivateCellUtil;
-039import org.apache.hadoop.hbase.Tag;
-040import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-041import 
org.apache.hadoop.hbase.io.HeapSize;
-042import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-043import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-044import 
org.apache.hadoop.hbase.security.access.AccessControlConstants;
-045import 
org.apache.hadoop.hbase.security.access.AccessControlUtil;
-046import 
org.apache.hadoop.hbase.security.access.Permission;
-047import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-048import 
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
-049import 
org.apache.hadoop.hbase.util.Bytes;
-050import 
org.apache.hadoop.hbase.util.ClassSize;
-051import 
org.apache.yetus.audience.InterfaceAudience;
-052
-053import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-054import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
-055import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
-056import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataInput;
-057import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataOutput;
-058import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteStreams;
+021import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
+022
+023import java.io.IOException;
+024import java.nio.ByteBuffer;
+025import java.util.ArrayList;
+026import java.util.Arrays;
+027import java.util.HashMap;
+028import java.util.Iterator;
+029import java.util.List;
+030import java.util.Map;
+031import java.util.NavigableMap;
+032import java.util.Optional;
+033import java.util.TreeMap;
+034import java.util.UUID;
+035import java.util.stream.Collectors;
+036import 
org.apache.hadoop.hbase.ArrayBackedTag;
+037import org.apache.hadoop.hbase.Cell;
+038import 
org.apache.hadoop.hbase.CellScannable;
+039import 
org.apache.hadoop.hbase.CellScanner;
+040import 
org.apache.hadoop.hbase.CellUtil;
+041import 
org.apache.hadoop.hbase.ExtendedCell;
+042import 
org.apache.hadoop.hbase.HConstants;
+043import 
org.apache.hadoop.hbase.KeyValue;
+044import 
org.apache.hadoop.hbase.PrivateCellUtil;
+045import org.apache.hadoop.hbase.RawCell;
+046import org.apache.hadoop.hbase.Tag;
+047import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+048import 
org.apache.hadoop.hbase.io.HeapSize;
+049import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+050import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+051import 
org.apache.hadoop.hbase.security.access.AccessControlConstants;
+052import 
org.apache.hadoop.hbase.security.access.AccessControlUtil;
+053import 
org.apache.hadoop.hbase.security.access.Permission;
+054import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
+055import 
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
+056import 
org.apache.hadoop.hbase.util.Bytes;
+057import 
org.apache.hadoop.hbase.util.ClassSize;
+058import 
org.apache.yetus.audience.InterfaceAudience;
 059
-060@InterfaceAudience.Public
-061public abstract class Mutation extends 
OperationWithAttributes implements Row, CellScannable,
-062HeapSize {
-063  public static final long 
MUTATION_OVERHEAD = ClassSize.align(
-064  // This
-065  ClassSize.OBJECT +
-066  // row + 
OperationWithAttributes.attributes
-067  2 * ClassSize.REFERENCE +
-068  // Timestamp
-069  1 * Bytes.SIZEOF_LONG +
-070  // durability
-071  ClassSize.REFERENCE +
-072  // familyMap
-073  ClassSize.REFERENCE +
-074  // familyMap
-075  ClassSize.TREEMAP +
-076  // priority
-077  ClassSize.INTEGER
-078  );
-079
-080  /**
-081   

[23/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
index 71630ed..7a5b5b6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var methods = 
{"i0":6,"i1":18,"i2":18,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -121,7 +121,7 @@ public interface 
-All MethodsInstance MethodsAbstract Methods
+All MethodsInstance MethodsAbstract MethodsDefault Methods
 
 Modifier and Type
 Method and Description
@@ -131,47 +131,57 @@ public interface build()
 
 
-ReplicationPeerConfigBuilder
-setBandwidth(longbandwidth)
+default ReplicationPeerConfigBuilder
+putAllConfiguration(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringconfiguration)
 
 
-ReplicationPeerConfigBuilder
-setClusterKey(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclusterKey)
-Set the clusterKey which is the concatenation of the slave 
cluster's:
- 
hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
-
+default ReplicationPeerConfigBuilder
+putAllPeerData(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Mapbyte[],byte[]peerData)
 
 
 ReplicationPeerConfigBuilder
-setConfiguration(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringconfiguration)
+putConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringvalue)
 
 
 ReplicationPeerConfigBuilder
-setExcludeNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringnamespaces)
+putPeerData(byte[]key,
+   byte[]value)
 
 
 ReplicationPeerConfigBuilder
-setExcludeTableCFsMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringtableCFsMap)
+setBandwidth(longbandwidth)
 
 
 ReplicationPeerConfigBuilder
-setNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringnamespaces)
+setClusterKey(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclusterKey)
+Set the clusterKey which is the concatenation of the slave 
cluster's:
+ 
hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
+
 
 
 ReplicationPeerConfigBuilder
-setPeerData(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Mapbyte[],byte[]peerData)
+setExcludeNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 

[11/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/client/Put.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Put.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Put.html
index d152a92..8c56813 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Put.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Put.html
@@ -34,339 +34,320 @@
 026import java.util.NavigableMap;
 027import java.util.UUID;
 028import org.apache.hadoop.hbase.Cell;
-029import 
org.apache.hadoop.hbase.CellUtil;
-030import 
org.apache.hadoop.hbase.HConstants;
-031import 
org.apache.hadoop.hbase.IndividualBytesFieldCell;
-032import 
org.apache.hadoop.hbase.KeyValue;
-033import 
org.apache.hadoop.hbase.io.HeapSize;
-034import 
org.apache.hadoop.hbase.security.access.Permission;
-035import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-036import 
org.apache.hadoop.hbase.util.Bytes;
-037import 
org.apache.yetus.audience.InterfaceAudience;
-038
-039/**
-040 * Used to perform Put operations for a 
single row.
-041 * p
-042 * To perform a Put, instantiate a Put 
object with the row to insert to, and
-043 * for each column to be inserted, 
execute {@link #addColumn(byte[], byte[],
-044 * byte[]) add} or {@link 
#addColumn(byte[], byte[], long, byte[]) add} if
-045 * setting the timestamp.
-046 */
-047@InterfaceAudience.Public
-048public class Put extends Mutation 
implements HeapSize, ComparableRow {
-049  /**
-050   * Create a Put operation for the 
specified row.
-051   * @param row row key
-052   */
-053  public Put(byte [] row) {
-054this(row, 
HConstants.LATEST_TIMESTAMP);
-055  }
-056
-057  /**
-058   * Create a Put operation for the 
specified row, using a given timestamp.
-059   *
-060   * @param row row key; we make a copy 
of what we are passed to keep local.
-061   * @param ts timestamp
-062   */
-063  public Put(byte[] row, long ts) {
-064this(row, 0, row.length, ts);
-065  }
-066
-067  /**
-068   * We make a copy of the passed in row 
key to keep local.
-069   * @param rowArray
-070   * @param rowOffset
-071   * @param rowLength
-072   */
-073  public Put(byte [] rowArray, int 
rowOffset, int rowLength) {
-074this(rowArray, rowOffset, rowLength, 
HConstants.LATEST_TIMESTAMP);
-075  }
-076
-077  /**
-078   * @param row row key; we make a copy 
of what we are passed to keep local.
-079   * @param ts  timestamp
-080   */
-081  public Put(ByteBuffer row, long ts) {
-082if (ts  0) {
-083  throw new 
IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
-084}
-085checkRow(row);
-086this.row = new 
byte[row.remaining()];
-087row.get(this.row);
-088this.ts = ts;
-089  }
-090
-091  /**
-092   * @param row row key; we make a copy 
of what we are passed to keep local.
-093   */
-094  public Put(ByteBuffer row) {
-095this(row, 
HConstants.LATEST_TIMESTAMP);
-096  }
-097
-098  /**
-099   * We make a copy of the passed in row 
key to keep local.
-100   * @param rowArray
-101   * @param rowOffset
-102   * @param rowLength
-103   * @param ts
-104   */
-105  public Put(byte [] rowArray, int 
rowOffset, int rowLength, long ts) {
-106checkRow(rowArray, rowOffset, 
rowLength);
-107this.row = Bytes.copy(rowArray, 
rowOffset, rowLength);
-108this.ts = ts;
-109if (ts  0) {
-110  throw new 
IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
-111}
-112  }
-113
-114  /**
-115   * Create a Put operation for an 
immutable row key.
-116   *
-117   * @param row row key
-118   * @param rowIsImmutable whether the 
input row is immutable.
-119   *   Set to true if 
the caller can guarantee that
-120   *   the row will 
not be changed for the Put duration.
-121   */
-122  public Put(byte [] row, boolean 
rowIsImmutable) {
-123this(row, 
HConstants.LATEST_TIMESTAMP, rowIsImmutable);
-124  }
-125
-126  /**
-127   * Create a Put operation for an 
immutable row key, using a given timestamp.
-128   *
-129   * @param row row key
-130   * @param ts timestamp
-131   * @param rowIsImmutable whether the 
input row is immutable.
-132   *   Set to true if 
the caller can guarantee that
-133   *   the row will 
not be changed for the Put duration.
-134   */
-135  public Put(byte[] row, long ts, boolean 
rowIsImmutable) {
-136// Check and set timestamp
-137if (ts  0) {
-138  throw new 
IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
-139}
-140this.ts = ts;
-141
-142// Deal with row according to 
rowIsImmutable
-143checkRow(row);
-144if (rowIsImmutable) {  // Row is 
immutable
-145  this.row = row;  // Do not make a 
local copy, but point to the provided byte array directly
-146} else {  // Row is not immutable
-147  this.row = Bytes.copy(row, 0, 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
index 0c7ac41..0f682ab 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
@@ -33,342 +33,331 @@
 025import java.util.NavigableMap;
 026import java.util.UUID;
 027import org.apache.hadoop.hbase.Cell;
-028import 
org.apache.hadoop.hbase.CellUtil;
-029import 
org.apache.hadoop.hbase.HConstants;
-030import 
org.apache.hadoop.hbase.KeyValue;
-031import 
org.apache.hadoop.hbase.security.access.Permission;
-032import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-033import 
org.apache.hadoop.hbase.util.Bytes;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035
-036/**
-037 * Used to perform Delete operations on a 
single row.
-038 * p
-039 * To delete an entire row, instantiate a 
Delete object with the row
-040 * to delete.  To further define the 
scope of what to delete, perform
-041 * additional methods as outlined 
below.
-042 * p
-043 * To delete specific families, execute 
{@link #addFamily(byte[]) deleteFamily}
-044 * for each family to delete.
-045 * p
-046 * To delete multiple versions of 
specific columns, execute
-047 * {@link #addColumns(byte[], byte[]) 
deleteColumns}
-048 * for each column to delete.
-049 * p
-050 * To delete specific versions of 
specific columns, execute
-051 * {@link #addColumn(byte[], byte[], 
long) deleteColumn}
-052 * for each column version to delete.
-053 * p
-054 * Specifying timestamps, deleteFamily 
and deleteColumns will delete all
-055 * versions with a timestamp less than or 
equal to that passed.  If no
-056 * timestamp is specified, an entry is 
added with a timestamp of 'now'
-057 * where 'now' is the servers's 
System.currentTimeMillis().
-058 * Specifying a timestamp to the 
deleteColumn method will
-059 * delete versions only with a timestamp 
equal to that specified.
-060 * If no timestamp is passed to 
deleteColumn, internally, it figures the
-061 * most recent cell's timestamp and adds 
a delete at that timestamp; i.e.
-062 * it deletes the most recently added 
cell.
-063 * pThe timestamp passed to the 
constructor is used ONLY for delete of
-064 * rows.  For anything less -- a 
deleteColumn, deleteColumns or
-065 * deleteFamily -- then you need to use 
the method overrides that take a
-066 * timestamp.  The constructor timestamp 
is not referenced.
-067 */
-068@InterfaceAudience.Public
-069public class Delete extends Mutation 
implements ComparableRow {
-070  /**
-071   * Create a Delete operation for the 
specified row.
-072   * p
-073   * If no further operations are done, 
this will delete everything
-074   * associated with the specified row 
(all versions of all columns in all
-075   * families), with timestamp from 
current point in time to the past.
-076   * Cells defining timestamp for a 
future point in time
-077   * (timestamp  current time) will 
not be deleted.
-078   * @param row row key
-079   */
-080  public Delete(byte [] row) {
-081this(row, 
HConstants.LATEST_TIMESTAMP);
-082  }
-083
-084  /**
-085   * Create a Delete operation for the 
specified row and timestamp.p
-086   *
-087   * If no further operations are done, 
this will delete all columns in all
-088   * families of the specified row with a 
timestamp less than or equal to the
-089   * specified timestamp.p
-090   *
-091   * This timestamp is ONLY used for a 
delete row operation.  If specifying
-092   * families or columns, you must 
specify each timestamp individually.
-093   * @param row row key
-094   * @param timestamp maximum version 
timestamp (only for delete row)
-095   */
-096  public Delete(byte [] row, long 
timestamp) {
-097this(row, 0, row.length, 
timestamp);
-098  }
-099
-100  /**
-101   * Create a Delete operation for the 
specified row and timestamp.p
-102   *
-103   * If no further operations are done, 
this will delete all columns in all
-104   * families of the specified row with a 
timestamp less than or equal to the
-105   * specified timestamp.p
-106   *
-107   * This timestamp is ONLY used for a 
delete row operation.  If specifying
-108   * families or columns, you must 
specify each timestamp individually.
-109   * @param row We make a local copy of 
this passed in row.
-110   * @param rowOffset
-111   * @param rowLength
-112   */
-113  public Delete(final byte[] row, final 
int rowOffset, final int rowLength) {
-114this(row, rowOffset, rowLength, 
HConstants.LATEST_TIMESTAMP);
-115  }
-116
-117  /**
-118   * Create a Delete operation for the 
specified row and timestamp.p
-119   *
-120   * If no further operations are done, 
this will delete all columns in all
-121   * families of the 

[21/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.html
deleted file mode 100644
index 980f8bd..000
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.html
+++ /dev/null
@@ -1,1070 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-TableBasedReplicationQueuesImpl (Apache HBase 3.0.0-SNAPSHOT 
API)
-
-
-
-
-
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.replication
-Class 
TableBasedReplicationQueuesImpl
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.replication.ReplicationTableBase
-
-
-org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-ReplicationQueues
-
-
-
-@InterfaceAudience.Private
-public class TableBasedReplicationQueuesImpl
-extends ReplicationTableBase
-implements ReplicationQueues
-This class provides an implementation of the 
ReplicationQueues interface using an HBase table
- "Replication Table". It utilizes the ReplicationTableBase to access the 
Replication Table.
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private static byte[]
-EMPTY_STRING_BYTES
-
-
-private static byte[]
-INITIAL_OFFSET_BYTES
-
-
-private static org.slf4j.Logger
-LOG
-
-
-private ReplicationStateZKBase
-replicationState
-
-
-private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-serverName
-
-
-private byte[]
-serverNameBytes
-
-
-
-
-
-
-Fields inherited from classorg.apache.hadoop.hbase.replication.ReplicationTableBase
-abortable,
 CF_QUEUE,
 COL_QUEUE_OWNER,
 COL_QUEUE_OWNER_HISTORY,
 conf,
 QUEUE_HISTORY_DELIMITER,
 REPLICATION_TABLE_NAME,
 ROW_KEY_DELIMITER
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-TableBasedReplicationQueuesImpl(org.apache.hadoop.conf.Configurationconf,
-   Abortableabort,
-   ZKWatcherzkw)
-
-
-TableBasedReplicationQueuesImpl(ReplicationQueuesArgumentsargs)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-void
-addHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
-Add new hfile references to the queue.
-
-
-
-void
-addLog(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId,
-  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringfilename)
-Add a new WAL file to the given queue.
-
-
-
-void
-addPeerToHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
-Add a peer to hfile reference queue if peer does not 
exist.
-
-
-
-private boolean
-attemptToClaimQueue(Resultqueue,
-   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 

[14/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
index ea801d2..6d58b59 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
@@ -106,264 +106,257 @@
 098   * @throws java.io.IOException e
 099   */
 100  public Increment add(Cell cell) throws 
IOException{
-101byte [] family = 
CellUtil.cloneFamily(cell);
-102ListCell list = 
getCellList(family);
-103//Checking that the row of the kv is 
the same as the put
-104if (!CellUtil.matchingRows(cell, 
this.row)) {
-105  throw new WrongRowIOException("The 
row in " + cell +
-106" doesn't match the original one 
" +  Bytes.toStringBinary(this.row));
-107}
-108list.add(cell);
-109return this;
-110  }
-111
-112  /**
-113   * Increment the column from the 
specific family with the specified qualifier
-114   * by the specified amount.
-115   * p
-116   * Overrides previous calls to 
addColumn for this family and qualifier.
-117   * @param family family name
-118   * @param qualifier column qualifier
-119   * @param amount amount to increment 
by
-120   * @return the Increment object
-121   */
-122  public Increment addColumn(byte [] 
family, byte [] qualifier, long amount) {
-123if (family == null) {
-124  throw new 
IllegalArgumentException("family cannot be null");
-125}
-126ListCell list = 
getCellList(family);
-127KeyValue kv = 
createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount));
-128list.add(kv);
-129return this;
-130  }
-131
-132  /**
-133   * Gets the TimeRange used for this 
increment.
-134   * @return TimeRange
-135   */
-136  public TimeRange getTimeRange() {
-137return this.tr;
-138  }
-139
-140  /**
-141   * Sets the TimeRange to be used on the 
Get for this increment.
+101super.add(cell);
+102return this;
+103  }
+104
+105  /**
+106   * Increment the column from the 
specific family with the specified qualifier
+107   * by the specified amount.
+108   * p
+109   * Overrides previous calls to 
addColumn for this family and qualifier.
+110   * @param family family name
+111   * @param qualifier column qualifier
+112   * @param amount amount to increment 
by
+113   * @return the Increment object
+114   */
+115  public Increment addColumn(byte [] 
family, byte [] qualifier, long amount) {
+116if (family == null) {
+117  throw new 
IllegalArgumentException("family cannot be null");
+118}
+119ListCell list = 
getCellList(family);
+120KeyValue kv = 
createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount));
+121list.add(kv);
+122return this;
+123  }
+124
+125  /**
+126   * Gets the TimeRange used for this 
increment.
+127   * @return TimeRange
+128   */
+129  public TimeRange getTimeRange() {
+130return this.tr;
+131  }
+132
+133  /**
+134   * Sets the TimeRange to be used on the 
Get for this increment.
+135   * p
+136   * This is useful for when you have 
counters that only last for specific
+137   * periods of time (ie. counters that 
are partitioned by time).  By setting
+138   * the range of valid times for this 
increment, you can potentially gain
+139   * some performance with a more optimal 
Get operation.
+140   * Be careful adding the time range to 
this class as you will update the old cell if the
+141   * time range doesn't include the 
latest cells.
 142   * p
-143   * This is useful for when you have 
counters that only last for specific
-144   * periods of time (ie. counters that 
are partitioned by time).  By setting
-145   * the range of valid times for this 
increment, you can potentially gain
-146   * some performance with a more optimal 
Get operation.
-147   * Be careful adding the time range to 
this class as you will update the old cell if the
-148   * time range doesn't include the 
latest cells.
-149   * p
-150   * This range is used as [minStamp, 
maxStamp).
-151   * @param minStamp minimum timestamp 
value, inclusive
-152   * @param maxStamp maximum timestamp 
value, exclusive
-153   * @throws IOException if invalid time 
range
-154   * @return this
-155   */
-156  public Increment setTimeRange(long 
minStamp, long maxStamp)
-157  throws IOException {
-158tr = new TimeRange(minStamp, 
maxStamp);
-159return this;
-160  }
-161
-162  @Override
-163  public Increment setTimestamp(long 
timestamp) {
-164super.setTimestamp(timestamp);
-165return this;
-166  }
-167
-168  /**
-169   * @param returnResults True (default) 
if the increment operation should return the results. A
-170   *  client that is not 
interested in the result can save network bandwidth setting this
-171   *  to false.
-172 

[04/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationFactory.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationFactory.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationFactory.html
index 5272542..929de17 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationFactory.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationFactory.html
@@ -39,39 +39,34 @@
 031@InterfaceAudience.Private
 032public class ReplicationFactory {
 033
-034  public static final Class 
defaultReplicationQueueClass = ReplicationQueuesZKImpl.class;
-035
-036  public static ReplicationQueues 
getReplicationQueues(ReplicationQueuesArguments args)
-037  throws Exception {
-038Class? classToBuild = 
args.getConf().getClass("hbase.region.replica." +
-039
"replication.replicationQueues.class", defaultReplicationQueueClass);
-040return (ReplicationQueues) 
ConstructorUtils.invokeConstructor(classToBuild, args);
-041  }
-042
-043  public static ReplicationQueuesClient 
getReplicationQueuesClient(
-044  ReplicationQueuesClientArguments 
args) throws Exception {
-045Class? classToBuild = 
args.getConf().getClass(
-046  
"hbase.region.replica.replication.replicationQueuesClient.class",
-047  
ReplicationQueuesClientZKImpl.class);
-048return (ReplicationQueuesClient) 
ConstructorUtils.invokeConstructor(classToBuild, args);
+034  public static ReplicationQueues 
getReplicationQueues(ReplicationQueuesArguments args)
+035  throws Exception {
+036return (ReplicationQueues) 
ConstructorUtils.invokeConstructor(ReplicationQueuesZKImpl.class,
+037  args);
+038  }
+039
+040  public static ReplicationQueuesClient
+041  
getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws 
Exception {
+042return (ReplicationQueuesClient) 
ConstructorUtils
+043
.invokeConstructor(ReplicationQueuesClientZKImpl.class, args);
+044  }
+045
+046  public static ReplicationPeers 
getReplicationPeers(final ZKWatcher zk, Configuration conf,
+047  
   Abortable abortable) {
+048return getReplicationPeers(zk, conf, 
null, abortable);
 049  }
 050
 051  public static ReplicationPeers 
getReplicationPeers(final ZKWatcher zk, Configuration conf,
-052  
   Abortable abortable) {
-053return getReplicationPeers(zk, conf, 
null, abortable);
+052  
   final ReplicationQueuesClient queuesClient, Abortable abortable) {
+053return new ReplicationPeersZKImpl(zk, 
conf, queuesClient, abortable);
 054  }
 055
-056  public static ReplicationPeers 
getReplicationPeers(final ZKWatcher zk, Configuration conf,
-057  
   final ReplicationQueuesClient queuesClient, Abortable abortable) {
-058return new ReplicationPeersZKImpl(zk, 
conf, queuesClient, abortable);
-059  }
-060
-061  public static ReplicationTracker 
getReplicationTracker(ZKWatcher zookeeper,
-062  final ReplicationPeers 
replicationPeers, Configuration conf, Abortable abortable,
-063  Stoppable stopper) {
-064return new 
ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, abortable, 
stopper);
-065  }
-066}
+056  public static ReplicationTracker 
getReplicationTracker(ZKWatcher zookeeper,
+057  final ReplicationPeers 
replicationPeers, Configuration conf, Abortable abortable,
+058  Stoppable stopper) {
+059return new 
ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, abortable, 
stopper);
+060  }
+061}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl.html
index 3fae067..fc0477c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl.html
@@ -227,7 +227,7 @@
 219ReplicationPeerConfigBuilderImpl 
builder = new ReplicationPeerConfigBuilderImpl();
 220
builder.setClusterKey(peerConfig.getClusterKey())
 221
.setReplicationEndpointImpl(peerConfig.getReplicationEndpointImpl())
-222
.setPeerData(peerConfig.getPeerData()).setConfiguration(peerConfig.getConfiguration())
+222

[02/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
deleted file mode 100644
index 2b0ff55..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
+++ /dev/null
@@ -1,185 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/*
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
-019package 
org.apache.hadoop.hbase.replication;
-020
-021import 
org.apache.commons.lang3.NotImplementedException;
-022import 
org.apache.hadoop.conf.Configuration;
-023import 
org.apache.hadoop.hbase.Abortable;
-024import 
org.apache.hadoop.hbase.HConstants;
-025import 
org.apache.yetus.audience.InterfaceAudience;
-026import 
org.apache.hadoop.hbase.client.Result;
-027import 
org.apache.hadoop.hbase.client.ResultScanner;
-028import 
org.apache.hadoop.hbase.client.Scan;
-029import 
org.apache.hadoop.hbase.client.Table;
-030import 
org.apache.zookeeper.KeeperException;
-031
-032import java.io.IOException;
-033import java.util.HashSet;
-034import java.util.List;
-035import java.util.Set;
-036
-037/**
-038 * Implements the ReplicationQueuesClient 
interface on top of the Replication Table. It utilizes
-039 * the ReplicationTableBase to access the 
Replication Table.
-040 */
-041@InterfaceAudience.Private
-042public class 
TableBasedReplicationQueuesClientImpl extends ReplicationTableBase
-043  implements ReplicationQueuesClient {
-044
-045  public 
TableBasedReplicationQueuesClientImpl(ReplicationQueuesClientArguments args)
-046throws IOException {
-047super(args.getConf(), 
args.getAbortable());
-048  }
-049  public 
TableBasedReplicationQueuesClientImpl(Configuration conf,
-050  
 Abortable abortable) throws IOException {
-051super(conf, abortable);
-052  }
-053
-054  @Override
-055  public void init() throws 
ReplicationException{
-056// no-op
-057  }
-058
-059  @Override
-060  public ListString 
getListOfReplicators() {
-061return 
super.getListOfReplicators();
-062  }
-063
-064  @Override
-065  public ListString 
getLogsInQueue(String serverName, String queueId) {
-066return 
super.getLogsInQueue(serverName, queueId);
-067  }
-068
-069  @Override
-070  public ListString 
getAllQueues(String serverName) {
-071return 
super.getAllQueues(serverName);
-072  }
-073
-074  @Override
-075  public SetString getAllWALs() 
{
-076SetString allWals = new 
HashSet();
-077ResultScanner allQueues = null;
-078try (Table replicationTable = 
getOrBlockOnReplicationTable()) {
-079  allQueues = 
replicationTable.getScanner(new Scan());
-080  for (Result queue : allQueues) {
-081for (String wal : 
readWALsFromResult(queue)) {
-082  allWals.add(wal);
-083}
-084  }
-085} catch (IOException e) {
-086  String errMsg = "Failed getting all 
WAL's in Replication Table";
-087  abortable.abort(errMsg, e);
-088} finally {
-089  if (allQueues != null) {
-090allQueues.close();
-091  }
-092}
-093return allWals;
-094  }
-095
-096  @Override
-097  public int 
getHFileRefsNodeChangeVersion() throws KeeperException {
-098// TODO
-099throw new 
NotImplementedException(HConstants.NOT_IMPLEMENTED);
-100  }
-101
-102  @Override
-103  public ListString 
getAllPeersFromHFileRefsQueue() throws KeeperException {
-104// TODO
-105throw new 
NotImplementedException(HConstants.NOT_IMPLEMENTED);
-106  }
-107
-108  @Override
-109  public ListString 
getReplicableHFiles(String peerId) throws KeeperException {
-110// TODO
-111throw new 
NotImplementedException(HConstants.NOT_IMPLEMENTED);
-112  }
-113}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-

[06/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index 07b6abe..f51c693 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -78,2190 +78,2184 @@
 070import 
org.apache.hadoop.hbase.procedure2.LockType;
 071import 
org.apache.hadoop.hbase.procedure2.LockedResource;
 072import 
org.apache.hadoop.hbase.procedure2.Procedure;
-073import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-074import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
-075import 
org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
-076import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-077import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-078import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-079import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-080import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-081import 
org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
-082import 
org.apache.hadoop.hbase.replication.ReplicationException;
-083import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-084import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-085import 
org.apache.hadoop.hbase.security.User;
-086import 
org.apache.hadoop.hbase.security.access.AccessController;
-087import 
org.apache.hadoop.hbase.security.visibility.VisibilityController;
-088import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-089import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-090import 
org.apache.hadoop.hbase.util.Bytes;
-091import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-092import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-093import 
org.apache.hadoop.hbase.util.Pair;
-094import 
org.apache.yetus.audience.InterfaceAudience;
-095import 
org.apache.zookeeper.KeeperException;
-096import org.slf4j.Logger;
-097import org.slf4j.LoggerFactory;
-098
-099import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-100import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-101import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-126import 

hbase git commit: HBASE-19621 (addendum) Revisit the methods in ReplicationPeerConfigBuilder

2017-12-26 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2 8362b0dba -> 920005a2d


HBASE-19621 (addendum) Revisit the methods in ReplicationPeerConfigBuilder


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/920005a2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/920005a2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/920005a2

Branch: refs/heads/branch-2
Commit: 920005a2dc5795426f981a71041e4f93a72a1ff0
Parents: 8362b0d
Author: Guanghao Zhang 
Authored: Tue Dec 26 20:56:56 2017 +0800
Committer: Guanghao Zhang 
Committed: Tue Dec 26 21:56:40 2017 +0800

--
 .../hadoop/hbase/replication/ReplicationPeersZKImpl.java | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/920005a2/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 2f6d52c..289d2aa 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -363,9 +363,14 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 }
 // Update existingConfig's peer config and peer data with the new values, 
but don't touch config
 // or data that weren't explicitly changed
-ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(newConfig);
-builder.putAllConfiguration(existingConfig.getConfiguration());
-builder.putAllPeerData(existingConfig.getPeerData());
+ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(existingConfig);
+builder.putAllConfiguration(newConfig.getConfiguration())
+.putAllPeerData(newConfig.getPeerData())
+.setReplicateAllUserTables(newConfig.replicateAllUserTables())
+
.setNamespaces(newConfig.getNamespaces()).setTableCFsMap(newConfig.getTableCFsMap())
+.setExcludeNamespaces(newConfig.getExcludeNamespaces())
+.setExcludeTableCFsMap(newConfig.getExcludeTableCFsMap())
+.setBandwidth(newConfig.getBandwidth());
 
 try {
   ZKUtil.setData(this.zookeeper, getPeerNode(id),



hbase git commit: HBASE-19621 (addendum) Revisit the methods in ReplicationPeerConfigBuilder

2017-12-26 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master 7ce1943ef -> 80c7e4ea7


HBASE-19621 (addendum) Revisit the methods in ReplicationPeerConfigBuilder


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80c7e4ea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80c7e4ea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80c7e4ea

Branch: refs/heads/master
Commit: 80c7e4ea7994186ab1876bb53bd7239d2559a481
Parents: 7ce1943
Author: Guanghao Zhang 
Authored: Tue Dec 26 20:56:56 2017 +0800
Committer: Guanghao Zhang 
Committed: Tue Dec 26 21:55:49 2017 +0800

--
 .../hadoop/hbase/replication/ReplicationPeersZKImpl.java | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/80c7e4ea/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 2f6d52c..289d2aa 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -363,9 +363,14 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 }
 // Update existingConfig's peer config and peer data with the new values, 
but don't touch config
 // or data that weren't explicitly changed
-ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(newConfig);
-builder.putAllConfiguration(existingConfig.getConfiguration());
-builder.putAllPeerData(existingConfig.getPeerData());
+ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(existingConfig);
+builder.putAllConfiguration(newConfig.getConfiguration())
+.putAllPeerData(newConfig.getPeerData())
+.setReplicateAllUserTables(newConfig.replicateAllUserTables())
+
.setNamespaces(newConfig.getNamespaces()).setTableCFsMap(newConfig.getTableCFsMap())
+.setExcludeNamespaces(newConfig.getExcludeNamespaces())
+.setExcludeTableCFsMap(newConfig.getExcludeTableCFsMap())
+.setBandwidth(newConfig.getBandwidth());
 
 try {
   ZKUtil.setData(this.zookeeper, getPeerNode(id),



[17/17] hbase git commit: HBASE-19630 Add peer cluster key check when add new replication peer

2017-12-26 Thread zhangduo
HBASE-19630 Add peer cluster key check when add new replication peer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aa3bc1d9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aa3bc1d9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aa3bc1d9

Branch: refs/heads/HBASE-19397
Commit: aa3bc1d9bb0ea2dd38a574bbf4d1d9d17289302f
Parents: 7459c8c
Author: Guanghao Zhang 
Authored: Tue Dec 26 21:10:00 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:10:00 2017 +0800

--
 .../replication/ReplicationPeerManager.java | 54 
 .../replication/TestReplicationAdmin.java   | 22 
 2 files changed, 54 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aa3bc1d9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 84abfeb..b78cbce 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master.replication;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -42,6 +43,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -216,36 +218,36 @@ public final class ReplicationPeerManager {
 return desc != null ? Optional.of(desc.getPeerConfig()) : Optional.empty();
   }
 
-  /**
-   * If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
-   * Then allow config exclude namespaces or exclude table-cfs which can't be 
replicated to peer
-   * cluster.
-   * 
-   * If replicate_all flag is false, it means all user tables can't be 
replicated to peer cluster.
-   * Then allow to config namespaces or table-cfs which will be replicated to 
peer cluster.
-   */
-  private static void checkPeerConfig(ReplicationPeerConfig peerConfig)
-  throws DoNotRetryIOException {
+  private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws 
DoNotRetryIOException {
+checkClusterKey(peerConfig.getClusterKey());
+
 if (peerConfig.replicateAllUserTables()) {
-  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty()) ||
-(peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
-throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly " +
-  "when you want replicate all cluster");
+  // If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
+  // Then allow config exclude namespaces or exclude table-cfs which can't 
be replicated to peer
+  // cluster.
+  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty())
+  || (peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
+throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly "
++ "when you want replicate all cluster");
   }
   
checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(),
 peerConfig.getExcludeTableCFsMap());
 } else {
-  if ((peerConfig.getExcludeNamespaces() != null &&
-!peerConfig.getExcludeNamespaces().isEmpty()) ||
-(peerConfig.getExcludeTableCFsMap() != null &&
-  !peerConfig.getExcludeTableCFsMap().isEmpty())) {
+  // If replicate_all flag is false, it means all user tables can't be 
replicated to peer
+  // cluster. Then allow to config namespaces or table-cfs which will be 
replicated to peer
+  // cluster.
+  if ((peerConfig.getExcludeNamespaces() != null
+  && !peerConfig.getExcludeNamespaces().isEmpty())
+  || (peerConfig.getExcludeTableCFsMap() != null
+  && !peerConfig.getExcludeTableCFsMap().isEmpty())) {
 throw new DoNotRetryIOException(
-"Need clean 

[02/17] hbase git commit: HBASE-19621 Revisit the methods in ReplicationPeerConfigBuilder

2017-12-26 Thread zhangduo
HBASE-19621 Revisit the methods in ReplicationPeerConfigBuilder


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/15569392
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/15569392
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/15569392

Branch: refs/heads/HBASE-19397
Commit: 1556939236016bb51e45ffa1e8038c74e0f0db75
Parents: 2ce5dc8
Author: Guanghao Zhang 
Authored: Mon Dec 25 14:29:39 2017 +0800
Committer: Guanghao Zhang 
Committed: Tue Dec 26 14:40:31 2017 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  9 ++
 .../replication/ReplicationPeerConfig.java  | 10 +++
 .../ReplicationPeerConfigBuilder.java   | 14 +++--
 .../replication/ReplicationPeersZKImpl.java | 14 ++---
 .../replication/TestReplicationAdmin.java   | 31 ++--
 5 files changed, 38 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/15569392/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index a50d48f..012b309 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -26,7 +26,6 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.TreeMap;
 import java.util.stream.Collectors;
 
 import org.apache.commons.lang3.StringUtils;
@@ -277,17 +276,13 @@ public final class ReplicationPeerConfigUtil {
   builder.setReplicationEndpointImpl(peer.getReplicationEndpointImpl());
 }
 
-Map peerData = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 for (HBaseProtos.BytesBytesPair pair : peer.getDataList()) {
-  peerData.put(pair.getFirst().toByteArray(), 
pair.getSecond().toByteArray());
+  builder.putPeerData(pair.getFirst().toByteArray(), 
pair.getSecond().toByteArray());
 }
-builder.setPeerData(peerData);
 
-Map configuration = new HashMap<>();
 for (HBaseProtos.NameStringPair pair : peer.getConfigurationList()) {
-  configuration.put(pair.getName(), pair.getValue());
+  builder.putConfiguration(pair.getName(), pair.getValue());
 }
-builder.setConfiguration(configuration);
 
 Map tableCFsMap = convert2Map(
   peer.getTableCfsList().toArray(new 
ReplicationProtos.TableCF[peer.getTableCfsCount()]));

http://git-wip-us.apache.org/repos/asf/hbase/blob/15569392/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 8f6b938..ab75dff 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -219,7 +219,7 @@ public class ReplicationPeerConfig {
 ReplicationPeerConfigBuilderImpl builder = new 
ReplicationPeerConfigBuilderImpl();
 builder.setClusterKey(peerConfig.getClusterKey())
 .setReplicationEndpointImpl(peerConfig.getReplicationEndpointImpl())
-
.setPeerData(peerConfig.getPeerData()).setConfiguration(peerConfig.getConfiguration())
+
.putAllPeerData(peerConfig.getPeerData()).putAllConfiguration(peerConfig.getConfiguration())
 
.setTableCFsMap(peerConfig.getTableCFsMap()).setNamespaces(peerConfig.getNamespaces())
 .setReplicateAllUserTables(peerConfig.replicateAllUserTables())
 .setExcludeTableCFsMap(peerConfig.getExcludeTableCFsMap())
@@ -264,14 +264,14 @@ public class ReplicationPeerConfig {
 }
 
 @Override
-public ReplicationPeerConfigBuilder setPeerData(Map 
peerData) {
-  this.peerData = peerData;
+public ReplicationPeerConfigBuilder putConfiguration(String key, String 
value) {
+  this.configuration.put(key, value);
   return this;
 }
 
 @Override
-public ReplicationPeerConfigBuilder setConfiguration(Map 
configuration) {
-  this.configuration = configuration;
+public ReplicationPeerConfigBuilder putPeerData(byte[] key, byte[] value) {
+  

[13/17] hbase git commit: HBASE-19579 Add peer lock test for shell command list_locks

2017-12-26 Thread zhangduo
HBASE-19579 Add peer lock test for shell command list_locks

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e10313c4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e10313c4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e10313c4

Branch: refs/heads/HBASE-19397
Commit: e10313c4c6283b27fbab9e0db3e4b5cf183cc89e
Parents: a339565
Author: Guanghao Zhang 
Authored: Sat Dec 23 21:04:27 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:07:55 2017 +0800

--
 .../src/main/protobuf/LockService.proto  |  1 +
 .../src/test/ruby/shell/list_locks_test.rb   | 19 +++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e10313c4/hbase-protocol-shaded/src/main/protobuf/LockService.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto 
b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index b8d180c..0675070 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -77,6 +77,7 @@ enum LockedResourceType {
   NAMESPACE = 2;
   TABLE = 3;
   REGION = 4;
+  PEER = 5;
 }
 
 message LockedResource {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e10313c4/hbase-shell/src/test/ruby/shell/list_locks_test.rb
--
diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb 
b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
index f465a6b..ef1c0ce 100644
--- a/hbase-shell/src/test/ruby/shell/list_locks_test.rb
+++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
@@ -67,6 +67,25 @@ module Hbase
 proc_id)
 end
 
+define_test 'list peer locks' do
+  lock = create_exclusive_lock(0)
+  peer_id = '1'
+
+  @scheduler.waitPeerExclusiveLock(lock, peer_id)
+  output = capture_stdout { @list_locks.command }
+  @scheduler.wakePeerExclusiveLock(lock, peer_id)
+
+  assert_equal(
+"PEER(1)\n" \
+"Lock type: EXCLUSIVE, procedure: {" \
+  
"\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \
+  "\"procId\"=>\"0\", \"submittedTime\"=>\"0\", 
\"state\"=>\"RUNNABLE\", " \
+  "\"lastUpdate\"=>\"0\", " \
+  "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", 
\"description\"=>\"description\"}]" \
+"}\n\n",
+output)
+end
+
 define_test 'list server locks' do
   lock = create_exclusive_lock(0)
 



[06/17] hbase git commit: HBASE-19520 Add UTs for the new lock type PEER

2017-12-26 Thread zhangduo
HBASE-19520 Add UTs for the new lock type PEER

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/724ee671
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/724ee671
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/724ee671

Branch: refs/heads/HBASE-19397
Commit: 724ee671471d59042000174a806f44fa285a4bbd
Parents: 4b81af3
Author: Guanghao Zhang 
Authored: Wed Dec 20 16:43:38 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:03:19 2017 +0800

--
 .../procedure/MasterProcedureScheduler.java |   9 +-
 .../procedure/TestMasterProcedureScheduler.java |  65 -
 ...TestMasterProcedureSchedulerConcurrency.java | 135 +++
 3 files changed, 201 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/724ee671/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index dd85f5c..5f4665c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -389,6 +389,13 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 while (tableIter.hasNext()) {
   count += tableIter.next().size();
 }
+
+// Peer queues
+final AvlTreeIterator peerIter = new AvlTreeIterator<>(peerMap);
+while (peerIter.hasNext()) {
+  count += peerIter.next().size();
+}
+
 return count;
   }
 
@@ -1041,7 +1048,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
* @see #wakePeerExclusiveLock(Procedure, String)
* @param procedure the procedure trying to acquire the lock
* @param peerId peer to lock
-   * @return true if the procedure has to wait for the per to be available
+   * @return true if the procedure has to wait for the peer to be available
*/
   public boolean waitPeerExclusiveLock(Procedure procedure, String peerId) {
 schedLock();

http://git-wip-us.apache.org/repos/asf/hbase/blob/724ee671/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 0291165..fd77e1f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -905,6 +905,27 @@ public class TestMasterProcedureScheduler {
 }
   }
 
+  public static class TestPeerProcedure extends TestProcedure implements 
PeerProcedureInterface {
+private final String peerId;
+private final PeerOperationType opType;
+
+public TestPeerProcedure(long procId, String peerId, PeerOperationType 
opType) {
+  super(procId);
+  this.peerId = peerId;
+  this.opType = opType;
+}
+
+@Override
+public String getPeerId() {
+  return peerId;
+}
+
+@Override
+public PeerOperationType getPeerOperationType() {
+  return opType;
+}
+  }
+
   private static LockProcedure createLockProcedure(LockType lockType, long 
procId) throws Exception {
 LockProcedure procedure = new LockProcedure();
 
@@ -927,22 +948,19 @@ public class TestMasterProcedureScheduler {
 return createLockProcedure(LockType.SHARED, procId);
   }
 
-  private static void assertLockResource(LockedResource resource,
-  LockedResourceType resourceType, String resourceName)
-  {
+  private static void assertLockResource(LockedResource resource, 
LockedResourceType resourceType,
+  String resourceName) {
 assertEquals(resourceType, resource.getResourceType());
 assertEquals(resourceName, resource.getResourceName());
   }
 
-  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure)
-  {
+  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure) {
 assertEquals(LockType.EXCLUSIVE, resource.getLockType());
 assertEquals(procedure, resource.getExclusiveLockOwnerProcedure());
 assertEquals(0, resource.getSharedLockCount());
   }
 
-  private static void 

[10/17] hbase git commit: HBASE-19525 RS side changes for moving peer modification from zk watcher to procedure

2017-12-26 Thread zhangduo
HBASE-19525 RS side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/99c9bb8e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/99c9bb8e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/99c9bb8e

Branch: refs/heads/HBASE-19397
Commit: 99c9bb8e4454715618c91fbb9634faa158f7b454
Parents: f60be0e
Author: huzheng 
Authored: Wed Dec 20 10:47:18 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:03:53 2017 +0800

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java |  11 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  13 +-
 .../hbase/replication/ReplicationListener.java  |  14 --
 .../hbase/replication/ReplicationPeer.java  |  28 ++-
 .../replication/ReplicationPeerZKImpl.java  | 180 ---
 .../replication/ReplicationPeersZKImpl.java |  19 +-
 .../replication/ReplicationTrackerZKImpl.java   |  73 +-
 .../regionserver/ReplicationSourceService.java  |   9 +-
 .../handler/RSProcedureHandler.java |   3 +
 .../replication/BaseReplicationEndpoint.java|   2 +-
 .../regionserver/PeerProcedureHandler.java  |  38 
 .../regionserver/PeerProcedureHandlerImpl.java  |  81 +++
 .../regionserver/RefreshPeerCallable.java   |  39 +++-
 .../replication/regionserver/Replication.java   |  10 +
 .../regionserver/ReplicationSource.java |   9 +-
 .../regionserver/ReplicationSourceManager.java  |  37 ++-
 .../replication/TestReplicationAdmin.java   |   2 +-
 .../TestReplicationAdminUsingProcedure.java | 226 +++
 .../replication/DummyModifyPeerProcedure.java   |  48 
 .../TestDummyModifyPeerProcedure.java   |  80 ---
 .../TestReplicationTrackerZKImpl.java   |  51 -
 .../TestReplicationSourceManager.java   |  32 ++-
 22 files changed, 533 insertions(+), 472 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/99c9bb8e/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 267dc7a..d5285dc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.InvalidProtocolBufferException;
@@ -199,7 +201,7 @@ public final class ProtobufUtil {
* byte array that is bytes.length plus {@link 
ProtobufMagic#PB_MAGIC}.length.
*/
   public static byte [] prependPBMagic(final byte [] bytes) {
-return Bytes.add(ProtobufMagic.PB_MAGIC, bytes);
+return Bytes.add(PB_MAGIC, bytes);
   }
 
   /**
@@ -224,10 +226,11 @@ public final class ProtobufUtil {
* @param bytes bytes to check
* @throws DeserializationException if we are missing the pb magic prefix
*/
-  public static void expectPBMagicPrefix(final byte [] bytes) throws 
DeserializationException {
+  public static void expectPBMagicPrefix(final byte[] bytes) throws 
DeserializationException {
 if (!isPBMagicPrefix(bytes)) {
-  throw new DeserializationException("Missing pb magic " +
-  Bytes.toString(ProtobufMagic.PB_MAGIC) + " prefix");
+  String bytesPrefix = bytes == null ? "null" : 
Bytes.toStringBinary(bytes, 0, PB_MAGIC.length);
+  throw new DeserializationException(
+  "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " 
+ bytesPrefix);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/99c9bb8e/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 17b1141..8954d04 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.shaded.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -282,7 +284,7 @@ 

[14/17] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2017-12-26 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3395659/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 93b8649..1faaae3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -48,17 +49,18 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
+import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.AtomicLongMap;
 
 /**
@@ -303,57 +305,53 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
   }
 
   public String dumpQueues(ClusterConnection connection, ZKWatcher zkw, 
Set peerIds,
-   boolean hdfs) throws Exception {
-ReplicationQueuesClient queuesClient;
+  boolean hdfs) throws Exception {
+ReplicationQueueStorage queueStorage;
 ReplicationPeers replicationPeers;
 ReplicationQueues replicationQueues;
 ReplicationTracker replicationTracker;
-ReplicationQueuesClientArguments replicationArgs =
-new ReplicationQueuesClientArguments(getConf(), new 
WarnOnlyAbortable(), zkw);
+ReplicationQueuesArguments replicationArgs =
+new ReplicationQueuesArguments(getConf(), new WarnOnlyAbortable(), 
zkw);
 StringBuilder sb = new StringBuilder();
 
-queuesClient = 
ReplicationFactory.getReplicationQueuesClient(replicationArgs);
-queuesClient.init();
+queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, 
getConf());
 replicationQueues = 
ReplicationFactory.getReplicationQueues(replicationArgs);
-replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), 
queuesClient, connection);
+replicationPeers =
+ReplicationFactory.getReplicationPeers(zkw, getConf(), queueStorage, 
connection);
 replicationTracker = ReplicationFactory.getReplicationTracker(zkw, 
replicationPeers, getConf(),
   new WarnOnlyAbortable(), new WarnOnlyStoppable());
-List liveRegionServers = 
replicationTracker.getListOfRegionServers();
+Set liveRegionServers = new 
HashSet<>(replicationTracker.getListOfRegionServers());
 
 // Loops each peer on each RS and dumps the queues
-try {
-  List regionservers = queuesClient.getListOfReplicators();
-  if (regionservers == null || regionservers.isEmpty()) {
-return sb.toString();
+List regionservers = queueStorage.getListOfReplicators();
+if (regionservers == null || regionservers.isEmpty()) {
+  return sb.toString();
+}
+for (ServerName regionserver : regionservers) {
+  List queueIds = queueStorage.getAllQueues(regionserver);
+  replicationQueues.init(regionserver.getServerName());
+  if (!liveRegionServers.contains(regionserver.getServerName())) {
+deadRegionServers.add(regionserver.getServerName());
   }
-  for (String regionserver : regionservers) {
-List queueIds = queuesClient.getAllQueues(regionserver);
-replicationQueues.init(regionserver);
-if (!liveRegionServers.contains(regionserver)) {
-  deadRegionServers.add(regionserver);
-}
-for (String queueId : queueIds) {
-  ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
-  List wals = queuesClient.getLogsInQueue(regionserver, 
queueId);
-  if 

[07/17] hbase git commit: HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure

2017-12-26 Thread zhangduo
HBASE-19536 Client side changes for moving peer modification from zk watcher to 
procedure

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f6c697d3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f6c697d3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f6c697d3

Branch: refs/heads/HBASE-19397
Commit: f6c697d331dcb502e567cbfd5989cece7fa533d7
Parents: 4faf7df
Author: Guanghao Zhang 
Authored: Tue Dec 19 15:50:57 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:03:19 2017 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  87 ++-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 149 ++-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  82 +-
 3 files changed, 238 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f6c697d3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index ff2722e..cf8e198 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2463,7 +2463,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @throws IOException if a remote or network exception occurs
*/
   default void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig)
@@ -2474,7 +2474,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @param enabled peer state, true if ENABLED and false if DISABLED
* @throws IOException if a remote or network exception occurs
*/
@@ -2482,6 +2482,37 @@ public interface Admin extends Abortable, Closeable {
   throws IOException;
 
   /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  default Future addReplicationPeerAsync(String peerId, 
ReplicationPeerConfig peerConfig)
+  throws IOException {
+return addReplicationPeerAsync(peerId, peerConfig, true);
+  }
+
+  /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @param enabled peer state, true if ENABLED and false if DISABLED
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig 
peerConfig,
+  boolean enabled) throws IOException;
+
+  /**
* Remove a peer and stop the replication.
* @param peerId a short name that identifies the peer
* @throws IOException if a remote or network exception occurs
@@ -2489,6 +2520,18 @@ public interface Admin extends Abortable, Closeable {
   void removeReplicationPeer(String peerId) throws IOException;
 
   /**
+   * Remove a replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow 

[11/17] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2017-12-26 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/d422629e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
deleted file mode 100644
index b6f8784..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.replication;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.replication.BaseReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Manages and performs all replication admin operations.
- * 
- * Used to add/remove a replication peer.
- */
-@InterfaceAudience.Private
-public class ReplicationManager {
-  private final ReplicationQueuesClient replicationQueuesClient;
-  private final ReplicationPeers replicationPeers;
-
-  public ReplicationManager(Configuration conf, ZKWatcher zkw, Abortable 
abortable)
-  throws IOException {
-try {
-  this.replicationQueuesClient = ReplicationFactory
-  .getReplicationQueuesClient(new 
ReplicationQueuesClientArguments(conf, abortable, zkw));
-  this.replicationQueuesClient.init();
-  this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf,
-this.replicationQueuesClient, abortable);
-  this.replicationPeers.init();
-} catch (Exception e) {
-  throw new IOException("Failed to construct ReplicationManager", e);
-}
-  }
-
-  public void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig, boolean enabled)
-  throws ReplicationException {
-checkPeerConfig(peerConfig);
-replicationPeers.registerPeer(peerId, peerConfig, enabled);
-replicationPeers.peerConnected(peerId);
-  }
-
-  public void removeReplicationPeer(String peerId) throws ReplicationException 
{
-replicationPeers.peerDisconnected(peerId);
-replicationPeers.unregisterPeer(peerId);
-  }
-
-  public void enableReplicationPeer(String peerId) throws ReplicationException 
{
-this.replicationPeers.enablePeer(peerId);
-  }
-
-  public void disableReplicationPeer(String peerId) throws 
ReplicationException {
-this.replicationPeers.disablePeer(peerId);
-  }
-
-  public ReplicationPeerConfig getPeerConfig(String peerId)
-  throws ReplicationException, ReplicationPeerNotFoundException {
-ReplicationPeerConfig peerConfig = 
replicationPeers.getReplicationPeerConfig(peerId);
-if (peerConfig == null) {
-  throw new ReplicationPeerNotFoundException(peerId);
-}
-return peerConfig;
-  }
-
-  public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig)
-  throws ReplicationException, IOException {
-checkPeerConfig(peerConfig);
-this.replicationPeers.updatePeerConfig(peerId, peerConfig);
-  }
-
-  public List listReplicationPeers(Pattern pattern)
-  throws ReplicationException {
-List peers = new ArrayList<>();
-List peerIds = replicationPeers.getAllPeerIds();
-for (String peerId : 

[12/17] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2017-12-26 Thread zhangduo
HBASE-19543 Abstract a replication storage interface to extract the zk specific 
code


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d422629e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d422629e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d422629e

Branch: refs/heads/HBASE-19397
Commit: d422629e865bc1a14281a2d31f9f63070376f4be
Parents: 99c9bb8
Author: zhangduo 
Authored: Fri Dec 22 14:37:28 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:03:53 2017 +0800

--
 .../hadoop/hbase/util/CollectionUtils.java  |   3 +
 hbase-replication/pom.xml   |  12 +
 .../replication/ReplicationPeerStorage.java |  74 
 .../replication/ReplicationQueueStorage.java| 164 +++
 .../replication/ReplicationStateZKBase.java |   1 -
 .../replication/ReplicationStorageFactory.java  |  49 +++
 .../replication/ZKReplicationPeerStorage.java   | 164 +++
 .../replication/ZKReplicationQueueStorage.java  | 425 +++
 .../replication/ZKReplicationStorageBase.java   |  75 
 .../TestZKReplicationPeerStorage.java   | 171 
 .../TestZKReplicationQueueStorage.java  | 171 
 .../org/apache/hadoop/hbase/master/HMaster.java |  36 +-
 .../hadoop/hbase/master/MasterServices.java |   6 +-
 .../master/procedure/MasterProcedureEnv.java|  24 +-
 .../master/replication/AddPeerProcedure.java|   6 +-
 .../replication/DisablePeerProcedure.java   |   7 +-
 .../master/replication/EnablePeerProcedure.java |   6 +-
 .../master/replication/ModifyPeerProcedure.java |  41 +-
 .../master/replication/RemovePeerProcedure.java |   6 +-
 .../master/replication/ReplicationManager.java  | 199 -
 .../replication/ReplicationPeerManager.java | 331 +++
 .../replication/UpdatePeerConfigProcedure.java  |   7 +-
 .../replication/TestReplicationAdmin.java   |  62 ++-
 .../hbase/master/MockNoopMasterServices.java|  10 +-
 .../hbase/master/TestMasterNoCluster.java   |   4 +-
 .../TestReplicationDisableInactivePeer.java |   6 +-
 26 files changed, 1749 insertions(+), 311 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d422629e/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
index 875b124..8bbb6f1 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
@@ -107,6 +107,9 @@ public class CollectionUtils {
 return list.get(list.size() - 1);
   }
 
+  public static  List nullToEmpty(List list) {
+return list != null ? list : Collections.emptyList();
+  }
   /**
* In HBASE-16648 we found that ConcurrentHashMap.get is much faster than 
computeIfAbsent if the
* value already exists. Notice that the implementation does not guarantee 
that the supplier will

http://git-wip-us.apache.org/repos/asf/hbase/blob/d422629e/hbase-replication/pom.xml
--
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
index ab22199..4e3cea0 100644
--- a/hbase-replication/pom.xml
+++ b/hbase-replication/pom.xml
@@ -121,6 +121,18 @@
   org.apache.hbase
   hbase-zookeeper
 
+
+  org.apache.hbase
+  hbase-common
+  test-jar
+  test
+
+
+  org.apache.hbase
+  hbase-zookeeper
+  test-jar
+  test
+
 
 
   org.apache.commons

http://git-wip-us.apache.org/repos/asf/hbase/blob/d422629e/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
new file mode 100644
index 000..e00cd0d
--- /dev/null
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain 

[08/17] hbase git commit: HBASE-19524 Master side changes for moving peer modification from zk watcher to procedure

2017-12-26 Thread zhangduo
HBASE-19524 Master side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4faf7dfc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4faf7dfc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4faf7dfc

Branch: refs/heads/HBASE-19397
Commit: 4faf7dfc63da77b759e4f1d503c9813a02d4e61a
Parents: 054e74f
Author: zhangduo 
Authored: Mon Dec 18 15:22:36 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:03:19 2017 +0800

--
 .../procedure2/RemoteProcedureDispatcher.java   |   3 +-
 .../src/main/protobuf/MasterProcedure.proto |  21 +++-
 .../src/main/protobuf/RegionServerStatus.proto  |   3 +-
 .../src/main/protobuf/Replication.proto |   5 +
 .../replication/ReplicationPeersZKImpl.java |   4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java | 100 ---
 .../hadoop/hbase/master/MasterRpcServices.java  |   4 +-
 .../hadoop/hbase/master/MasterServices.java |  26 +++--
 .../assignment/RegionTransitionProcedure.java   |  11 +-
 .../master/procedure/MasterProcedureEnv.java|   5 +
 .../master/procedure/ProcedurePrepareLatch.java |   2 +-
 .../master/replication/AddPeerProcedure.java|  97 ++
 .../replication/DisablePeerProcedure.java   |  70 +
 .../master/replication/EnablePeerProcedure.java |  69 +
 .../master/replication/ModifyPeerProcedure.java |  97 +++---
 .../master/replication/RefreshPeerCallable.java |  67 -
 .../replication/RefreshPeerProcedure.java   |  28 --
 .../master/replication/RemovePeerProcedure.java |  69 +
 .../master/replication/ReplicationManager.java  |  76 +++---
 .../replication/UpdatePeerConfigProcedure.java  |  92 +
 .../hbase/regionserver/HRegionServer.java   |   6 +-
 .../regionserver/RefreshPeerCallable.java   |  70 +
 .../hbase/master/MockNoopMasterServices.java|  23 +++--
 .../replication/DummyModifyPeerProcedure.java   |  13 ++-
 24 files changed, 736 insertions(+), 225 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4faf7dfc/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index e9a6906..1235b33 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -247,9 +247,8 @@ public abstract class RemoteProcedureDispatcher

[04/17] hbase git commit: HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer Procedure classes

2017-12-26 Thread zhangduo
HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer 
Procedure classes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f60be0ea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f60be0ea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f60be0ea

Branch: refs/heads/HBASE-19397
Commit: f60be0ea03991dbe64413ed63a7361a9cd17f471
Parents: 724ee67
Author: zhangduo 
Authored: Thu Dec 21 21:59:46 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:03:19 2017 +0800

--
 .../hadoop/hbase/master/replication/AddPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/DisablePeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/EnablePeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/ModifyPeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/RefreshPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/RemovePeerProcedure.java   | 6 +++---
 .../hbase/master/replication/UpdatePeerConfigProcedure.java| 6 +++---
 7 files changed, 21 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f60be0ea/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
index c3862d8..066c3e7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -28,6 +26,8 @@ import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AddPeerStateData;
 
@@ -37,7 +37,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.A
 @InterfaceAudience.Private
 public class AddPeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(AddPeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(AddPeerProcedure.class);
 
   private ReplicationPeerConfig peerConfig;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f60be0ea/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
index 0b32db9..9a28de6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
@@ -19,11 +19,11 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The procedure for disabling a replication peer.
@@ -31,7 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DisablePeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(DisablePeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(DisablePeerProcedure.class);
 
   public DisablePeerProcedure() {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f60be0ea/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java
--

[15/17] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2017-12-26 Thread zhangduo
HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a3395659
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a3395659
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a3395659

Branch: refs/heads/HBASE-19397
Commit: a3395659d37eeb8eb560bc3dce1933aef27a4ded
Parents: d422629
Author: zhangduo 
Authored: Mon Dec 25 18:49:56 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:07:55 2017 +0800

--
 .../hbase/replication/ReplicationFactory.java   |  19 +-
 .../replication/ReplicationPeersZKImpl.java |  24 +-
 .../replication/ReplicationQueueStorage.java|  26 +-
 .../replication/ReplicationQueuesClient.java|  93 -
 .../ReplicationQueuesClientArguments.java   |  40 --
 .../ReplicationQueuesClientZKImpl.java  | 176 -
 .../replication/ZKReplicationQueueStorage.java  |  90 -
 .../replication/TestReplicationStateBasic.java  | 378 +++
 .../replication/TestReplicationStateZKImpl.java | 148 
 .../TestZKReplicationQueueStorage.java  |  74 
 .../cleaner/ReplicationZKNodeCleaner.java   |  71 ++--
 .../cleaner/ReplicationZKNodeCleanerChore.java  |   5 +-
 .../replication/ReplicationPeerManager.java |  31 +-
 .../master/ReplicationHFileCleaner.java | 108 ++
 .../master/ReplicationLogCleaner.java   |  35 +-
 .../regionserver/DumpReplicationQueues.java |  77 ++--
 .../hbase/util/hbck/ReplicationChecker.java |  14 +-
 .../client/TestAsyncReplicationAdminApi.java|  31 +-
 .../replication/TestReplicationAdmin.java   |   2 +
 .../hbase/master/cleaner/TestLogsCleaner.java   |  29 +-
 .../cleaner/TestReplicationHFileCleaner.java|  58 +--
 .../cleaner/TestReplicationZKNodeCleaner.java   |  12 +-
 .../replication/TestReplicationStateBasic.java  | 378 ---
 .../replication/TestReplicationStateZKImpl.java | 227 ---
 .../TestReplicationSourceManagerZkImpl.java |  84 ++---
 25 files changed, 907 insertions(+), 1323 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a3395659/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 9f4ad18..6c1c213 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -37,20 +36,14 @@ public class ReplicationFactory {
   args);
   }
 
-  public static ReplicationQueuesClient
-  getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws 
Exception {
-return (ReplicationQueuesClient) ConstructorUtils
-.invokeConstructor(ReplicationQueuesClientZKImpl.class, args);
-  }
-
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- Abortable abortable) {
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  Abortable abortable) {
 return getReplicationPeers(zk, conf, null, abortable);
   }
 
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- final 
ReplicationQueuesClient queuesClient, Abortable abortable) {
-return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable);
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  ReplicationQueueStorage queueStorage, Abortable abortable) {
+return new ReplicationPeersZKImpl(zk, conf, queueStorage, abortable);
   }
 
   public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3395659/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 06cdbe5..3581a05 100644
--- 

[05/17] hbase git commit: HBASE-19216 Implement a general framework to execute remote procedure on RS

2017-12-26 Thread zhangduo
HBASE-19216 Implement a general framework to execute remote procedure on RS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/054e74fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/054e74fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/054e74fc

Branch: refs/heads/HBASE-19397
Commit: 054e74fc197b5492eb1fbcdf9a7db1fad9e615db
Parents: 7ce1943
Author: zhangduo 
Authored: Fri Dec 15 21:06:44 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:03:19 2017 +0800

--
 .../hbase/procedure2/LockedResourceType.java|   4 +-
 .../procedure2/RemoteProcedureDispatcher.java   |  23 +-
 .../src/main/protobuf/Admin.proto   |   9 +-
 .../src/main/protobuf/MasterProcedure.proto |  30 +++
 .../src/main/protobuf/RegionServerStatus.proto  |  15 ++
 .../apache/hadoop/hbase/executor/EventType.java |  26 ++-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  33 ++-
 .../hadoop/hbase/master/MasterRpcServices.java  |  13 ++
 .../assignment/RegionTransitionProcedure.java   |  18 +-
 .../procedure/MasterProcedureScheduler.java | 224 +--
 .../procedure/PeerProcedureInterface.java   |  34 +++
 .../master/procedure/RSProcedureDispatcher.java |  90 
 .../master/replication/ModifyPeerProcedure.java | 127 +++
 .../master/replication/RefreshPeerCallable.java |  67 ++
 .../replication/RefreshPeerProcedure.java   | 197 
 .../hbase/procedure2/RSProcedureCallable.java   |  43 
 .../hbase/regionserver/HRegionServer.java   |  69 +-
 .../hbase/regionserver/RSRpcServices.java   |  56 +++--
 .../handler/RSProcedureHandler.java |  51 +
 .../assignment/TestAssignmentManager.java   |  20 +-
 .../replication/DummyModifyPeerProcedure.java   |  41 
 .../TestDummyModifyPeerProcedure.java   |  80 +++
 .../security/access/TestAccessController.java   |   6 +-
 24 files changed, 1109 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/054e74fc/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
index c5fe62b..dc9b5d4 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,5 +22,5 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public enum LockedResourceType {
-  SERVER, NAMESPACE, TABLE, REGION
+  SERVER, NAMESPACE, TABLE, REGION, PEER
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/054e74fc/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 54f2b08..e9a6906 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -226,13 +226,30 @@ public abstract class RemoteProcedureDispatcher {
+/**
+ * For building the remote operation.
+ */
 RemoteOperation remoteCallBuild(TEnv env, TRemote remote);
-void remoteCallCompleted(TEnv env, TRemote remote, RemoteOperation 
response);
+
+/**
+ * Called when the executeProcedure call is failed.
+ */
 void remoteCallFailed(TEnv env, TRemote remote, IOException exception);
+
+/**
+ * Called when RS tells the remote procedure is succeeded through the
+ * {@code reportProcedureDone} method.
+ */
+void remoteOperationCompleted(TEnv env);
+
+/**
+ * Called when RS tells the remote procedure is failed through the {@code 
reportProcedureDone}
+ * method.
+ * @param error the error message
+ */
+void remoteOperationFailed(TEnv 

[01/17] hbase git commit: HBASE-19618 Remove replicationQueuesClient.class/replicationQueues.class config and remove table based ReplicationQueuesClient/ReplicationQueues implementation [Forced Update

2017-12-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19397 092291e33 -> aa3bc1d9b (forced update)


HBASE-19618 Remove replicationQueuesClient.class/replicationQueues.class config 
and remove table based ReplicationQueuesClient/ReplicationQueues implementation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2ce5dc89
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2ce5dc89
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2ce5dc89

Branch: refs/heads/HBASE-19397
Commit: 2ce5dc892710666c9a382fdeece412ecbb8559bb
Parents: 38472e1
Author: Guanghao Zhang 
Authored: Mon Dec 25 11:44:18 2017 +0800
Committer: Guanghao Zhang 
Committed: Tue Dec 26 14:39:41 2017 +0800

--
 .../hbase/replication/ReplicationFactory.java   |  17 +-
 .../TableBasedReplicationQueuesClientImpl.java  | 113 -
 .../TableBasedReplicationQueuesImpl.java| 448 -
 .../org/apache/hadoop/hbase/master/HMaster.java |  17 +-
 .../replication/TestMultiSlaveReplication.java  |   2 -
 .../TestReplicationStateHBaseImpl.java  | 495 ---
 .../replication/TestReplicationTableBase.java   | 109 
 ...tTableBasedReplicationSourceManagerImpl.java |  63 ---
 8 files changed, 12 insertions(+), 1252 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2ce5dc89/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 3ff6914..9f4ad18 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -31,21 +31,16 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 @InterfaceAudience.Private
 public class ReplicationFactory {
 
-  public static final Class defaultReplicationQueueClass = 
ReplicationQueuesZKImpl.class;
-
   public static ReplicationQueues 
getReplicationQueues(ReplicationQueuesArguments args)
   throws Exception {
-Class classToBuild = args.getConf().getClass("hbase.region.replica." +
-"replication.replicationQueues.class", defaultReplicationQueueClass);
-return (ReplicationQueues) 
ConstructorUtils.invokeConstructor(classToBuild, args);
+return (ReplicationQueues) 
ConstructorUtils.invokeConstructor(ReplicationQueuesZKImpl.class,
+  args);
   }
 
-  public static ReplicationQueuesClient getReplicationQueuesClient(
-  ReplicationQueuesClientArguments args) throws Exception {
-Class classToBuild = args.getConf().getClass(
-  "hbase.region.replica.replication.replicationQueuesClient.class",
-  ReplicationQueuesClientZKImpl.class);
-return (ReplicationQueuesClient) 
ConstructorUtils.invokeConstructor(classToBuild, args);
+  public static ReplicationQueuesClient
+  getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws 
Exception {
+return (ReplicationQueuesClient) ConstructorUtils
+.invokeConstructor(ReplicationQueuesClientZKImpl.class, args);
   }
 
   public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,

http://git-wip-us.apache.org/repos/asf/hbase/blob/2ce5dc89/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java
deleted file mode 100644
index 0a8ed31..000
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 

[16/17] hbase git commit: HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface

2017-12-26 Thread zhangduo
HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7459c8cf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7459c8cf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7459c8cf

Branch: refs/heads/HBASE-19397
Commit: 7459c8cf5ad170503d6d8a6f3327416c737df582
Parents: e10313c
Author: Guanghao Zhang 
Authored: Tue Dec 26 11:39:34 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:07:55 2017 +0800

--
 .../replication/VerifyReplication.java  |   5 -
 .../hbase/replication/ReplicationPeer.java  |  42 ++--
 .../hbase/replication/ReplicationPeerImpl.java  | 170 ++
 .../replication/ReplicationPeerZKImpl.java  | 233 ---
 .../hbase/replication/ReplicationPeers.java |   4 +-
 .../replication/ReplicationPeersZKImpl.java |  23 +-
 .../replication/TestReplicationStateBasic.java  |   7 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  29 +--
 8 files changed, 217 insertions(+), 296 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7459c8cf/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 01df2bd..da231e6 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -330,7 +329,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   private static Pair 
getPeerQuorumConfig(
   final Configuration conf, String peerId) throws IOException {
 ZKWatcher localZKW = null;
-ReplicationPeerZKImpl peer = null;
 try {
   localZKW = new ZKWatcher(conf, "VerifyReplication",
   new Abortable() {
@@ -351,9 +349,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   throw new IOException(
   "An error occurred while trying to connect to the remove peer 
cluster", e);
 } finally {
-  if (peer != null) {
-peer.close();
-  }
   if (localZKW != null) {
 localZKW.close();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7459c8cf/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index b66d76d..4846018 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
 
-
 /**
  * ReplicationPeer manages enabled / disabled state for the peer.
  */
@@ -49,65 +48,52 @@ public interface ReplicationPeer {
   String getId();
 
   /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig();
-
-  /**
-   * Get the peer config object. if loadFromBackingStore is true, it will load 
from backing store
-   * directly and update its load peer config. otherwise, just return the 
local cached peer config.
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig(boolean loadFromBackingStore)
-  throws ReplicationException;
-
-  /**
* Returns the state of the peer by reading local cache.
* @return the enabled state
*/
   PeerState getPeerState();
 
   /**
-   * Returns the state of peer, if loadFromBackingStore is true, it will load 
from backing store
-   * directly and 

[03/17] hbase git commit: HBASE-19550 Wrap the cell passed via Mutation#add(Cell) to be of ExtendedCell

2017-12-26 Thread zhangduo
HBASE-19550 Wrap the cell passed via Mutation#add(Cell) to be of ExtendedCell


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7ce1943e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7ce1943e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7ce1943e

Branch: refs/heads/HBASE-19397
Commit: 7ce1943ef3ef6865d0292364d5d9b51adf3e9827
Parents: 1556939
Author: Chia-Ping Tsai 
Authored: Tue Dec 26 16:39:51 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Dec 26 16:39:51 2017 +0800

--
 .../org/apache/hadoop/hbase/client/Append.java  |  19 +-
 .../org/apache/hadoop/hbase/client/Delete.java  |  17 +-
 .../apache/hadoop/hbase/client/Increment.java   |   9 +-
 .../apache/hadoop/hbase/client/Mutation.java| 208 ++
 .../org/apache/hadoop/hbase/client/Put.java |  25 +-
 .../TestPassCustomCellViaRegionObserver.java| 403 +++
 6 files changed, 628 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7ce1943e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 0cb51a2..b2995ed 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
 import java.util.UUID;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.security.access.Permission;
@@ -30,6 +30,8 @@ import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Performs Append operations on a single row.
@@ -44,6 +46,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  */
 @InterfaceAudience.Public
 public class Append extends Mutation {
+  private static final Logger LOG = LoggerFactory.getLogger(Append.class);
   private static final long HEAP_OVERHEAD = ClassSize.REFERENCE + 
ClassSize.TIMERANGE;
   private TimeRange tr = new TimeRange();
 
@@ -176,14 +179,12 @@ public class Append extends Mutation {
*/
   @SuppressWarnings("unchecked")
   public Append add(final Cell cell) {
-// Presume it is KeyValue for now.
-byte [] family = CellUtil.cloneFamily(cell);
-
-// Get cell list for the family
-List list = getCellList(family);
-
-// find where the new entry should be placed in the List
-list.add(cell);
+try {
+  super.add(cell);
+} catch (IOException e) {
+  // we eat the exception of wrong row for BC..
+  LOG.error(e.toString(), e);
+}
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7ce1943e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 57f5648..b5a0b93 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -25,7 +25,6 @@ import java.util.Map;
 import java.util.NavigableMap;
 import java.util.UUID;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.security.access.Permission;
@@ -170,22 +169,12 @@ public class Delete extends Mutation implements 
Comparable {
 
   /**
* Add an existing delete marker to this Delete object.
-   * @param kv An existing KeyValue of type "delete".
+   * @param cell An existing cell of type "delete".
* @return this for invocation chaining
* @throws IOException
*/
-  public Delete add(Cell kv) throws IOException {
-if (!CellUtil.isDelete(kv)) {
-  throw new IOException("The recently added KeyValue is not of type "
-  + "delete. Rowkey: " + Bytes.toStringBinary(this.row));
-}
-if (!CellUtil.matchingRows(kv, this.row)) {
-  throw new WrongRowIOException("The row in " + kv.toString() +
-" doesn't 

[09/17] hbase git commit: HBASE-19564 Procedure id is missing in the response of peer related operations

2017-12-26 Thread zhangduo
HBASE-19564 Procedure id is missing in the response of peer related operations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4b81af3d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4b81af3d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4b81af3d

Branch: refs/heads/HBASE-19397
Commit: 4b81af3d274fdaf035f37f7ebdf45ca28f0e677f
Parents: f6c697d
Author: zhangduo 
Authored: Wed Dec 20 20:57:37 2017 +0800
Committer: zhangduo 
Committed: Tue Dec 26 21:03:19 2017 +0800

--
 .../hadoop/hbase/master/MasterRpcServices.java  | 24 ++--
 .../master/replication/ModifyPeerProcedure.java |  4 +---
 2 files changed, 13 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4b81af3d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 700b363..9f71bab 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1886,10 +1886,10 @@ public class MasterRpcServices extends RSRpcServices
   public AddReplicationPeerResponse addReplicationPeer(RpcController 
controller,
   AddReplicationPeerRequest request) throws ServiceException {
 try {
-  master.addReplicationPeer(request.getPeerId(),
-ReplicationPeerConfigUtil.convert(request.getPeerConfig()), 
request.getPeerState()
-.getState().equals(ReplicationState.State.ENABLED));
-  return AddReplicationPeerResponse.newBuilder().build();
+  long procId = master.addReplicationPeer(request.getPeerId(),
+ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
+
request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
+  return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1899,8 +1899,8 @@ public class MasterRpcServices extends RSRpcServices
   public RemoveReplicationPeerResponse removeReplicationPeer(RpcController 
controller,
   RemoveReplicationPeerRequest request) throws ServiceException {
 try {
-  master.removeReplicationPeer(request.getPeerId());
-  return RemoveReplicationPeerResponse.newBuilder().build();
+  long procId = master.removeReplicationPeer(request.getPeerId());
+  return 
RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1910,8 +1910,8 @@ public class MasterRpcServices extends RSRpcServices
   public EnableReplicationPeerResponse enableReplicationPeer(RpcController 
controller,
   EnableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.enableReplicationPeer(request.getPeerId());
-  return EnableReplicationPeerResponse.newBuilder().build();
+  long procId = master.enableReplicationPeer(request.getPeerId());
+  return 
EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1921,8 +1921,8 @@ public class MasterRpcServices extends RSRpcServices
   public DisableReplicationPeerResponse disableReplicationPeer(RpcController 
controller,
   DisableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.disableReplicationPeer(request.getPeerId());
-  return DisableReplicationPeerResponse.newBuilder().build();
+  long procId = master.disableReplicationPeer(request.getPeerId());
+  return 
DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1948,9 +1948,9 @@ public class MasterRpcServices extends RSRpcServices
   public UpdateReplicationPeerConfigResponse 
updateReplicationPeerConfig(RpcController controller,
   UpdateReplicationPeerConfigRequest request) throws ServiceException {
 try {
-  master.updateReplicationPeerConfig(request.getPeerId(),
+  long procId = master.updateReplicationPeerConfig(request.getPeerId(),
 ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
-  return UpdateReplicationPeerConfigResponse.newBuilder().build();
+  return 
UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
 } catch 

  1   2   >