hbase git commit: HBASE-19905 ReplicationSyncUp tool will not exit if a peer replication is disabled

2018-02-04 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/master b0e998f2a -> 397d34736


HBASE-19905 ReplicationSyncUp tool will not exit if a peer replication is 
disabled

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/397d3473
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/397d3473
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/397d3473

Branch: refs/heads/master
Commit: 397d34736e63d7661a2f01524f8b302e1309d40f
Parents: b0e998f
Author: Ashish Singhi 
Authored: Sun Feb 4 17:52:38 2018 +0530
Committer: Ashish Singhi 
Committed: Sun Feb 4 17:52:38 2018 +0530

--
 .../replication/regionserver/ReplicationSourceManager.java   | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/397d3473/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 2147214..6e87563 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationListener;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
+import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
@@ -747,6 +748,13 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 abortWhenFail(() -> 
queueStorage.removeQueue(server.getServerName(), queueId));
 continue;
   }
+  if (server instanceof ReplicationSyncUp.DummyServer
+  && peer.getPeerState().equals(PeerState.DISABLED)) {
+LOG.warn("Peer {} is disbaled. ReplicationSyncUp tool will skip "
++ "replicating data to this peer.",
+  actualPeerId);
+continue;
+  }
   // track sources in walsByIdRecoveredQueues
   Map> walsByGroup = new HashMap<>();
   walsByIdRecoveredQueues.put(queueId, walsByGroup);



hbase git commit: HBASE-19926 Use a separated class to implement the WALActionListener for Replication

2018-02-04 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 397d34736 -> 14420e1b4


HBASE-19926 Use a separated class to implement the WALActionListener for 
Replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/14420e1b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/14420e1b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/14420e1b

Branch: refs/heads/master
Commit: 14420e1b415cd468f652bf0137bda575e0a5980a
Parents: 397d347
Author: zhangduo 
Authored: Sun Feb 4 10:42:33 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:36:43 2018 +0800

--
 .../replication/regionserver/Replication.java   | 22 +
 .../regionserver/ReplicationSourceManager.java  | 45 -
 .../ReplicationSourceWALActionListener.java | 98 
 .../TestReplicationSourceManager.java   | 27 +-
 4 files changed, 105 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/14420e1b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
index aaf3beb..7803ac4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.regionserver.ReplicationSinkService;
 import org.apache.hadoop.hbase.regionserver.ReplicationSourceService;
-import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
@@ -42,8 +41,6 @@ import 
org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -127,23 +124,8 @@ public class Replication implements 
ReplicationSourceService, ReplicationSinkSer
 replicationTracker, conf, this.server, fs, logDir, oldLogDir, 
clusterId,
 walProvider != null ? walProvider.getWALFileLengthProvider() : p -> 
OptionalLong.empty());
 if (walProvider != null) {
-  walProvider.addWALActionsListener(new WALActionsListener() {
-
-@Override
-public void preLogRoll(Path oldPath, Path newPath) throws IOException {
-  replicationManager.preLogRoll(newPath);
-}
-
-@Override
-public void postLogRoll(Path oldPath, Path newPath) throws IOException 
{
-  replicationManager.postLogRoll(newPath);
-}
-
-@Override
-public void visitLogEntryBeforeWrite(WALKey logKey, WALEdit logEdit) 
throws IOException {
-  replicationManager.scopeWALEdits(logKey, logEdit);
-}
-  });
+  walProvider
+.addWALActionsListener(new ReplicationSourceWALActionListener(conf, 
replicationManager));
 }
 this.statsThreadPeriod =
 this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60);

http://git-wip-us.apache.org/repos/asf/hbase/blob/14420e1b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 6e87563..85b2e85 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -43,8 +43,6 @@ import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 impo

hbase git commit: HBASE-19926 Use a separated class to implement the WALActionListener for Replication

2018-02-04 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 0ca7a2e91 -> 3b603d2c0


HBASE-19926 Use a separated class to implement the WALActionListener for 
Replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b603d2c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b603d2c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b603d2c

Branch: refs/heads/branch-2
Commit: 3b603d2c08c1f1905a589597737412b43970a304
Parents: 0ca7a2e
Author: zhangduo 
Authored: Sun Feb 4 10:42:33 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:32:14 2018 +0800

--
 .../replication/regionserver/Replication.java   | 22 +
 .../regionserver/ReplicationSourceManager.java  | 47 +-
 .../ReplicationSourceWALActionListener.java | 98 
 .../TestReplicationSourceManager.java   | 30 ++
 4 files changed, 108 insertions(+), 89 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b603d2c/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
index 0274b0a..ad12c66 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.regionserver.ReplicationSinkService;
 import org.apache.hadoop.hbase.regionserver.ReplicationSourceService;
-import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
@@ -44,8 +43,6 @@ import 
org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -130,23 +127,8 @@ public class Replication implements 
ReplicationSourceService, ReplicationSinkSer
 replicationTracker, conf, this.server, fs, logDir, oldLogDir, 
clusterId,
 walProvider != null ? walProvider.getWALFileLengthProvider() : p -> 
OptionalLong.empty());
 if (walProvider != null) {
-  walProvider.addWALActionsListener(new WALActionsListener() {
-
-@Override
-public void preLogRoll(Path oldPath, Path newPath) throws IOException {
-  replicationManager.preLogRoll(newPath);
-}
-
-@Override
-public void postLogRoll(Path oldPath, Path newPath) throws IOException 
{
-  replicationManager.postLogRoll(newPath);
-}
-
-@Override
-public void visitLogEntryBeforeWrite(WALKey logKey, WALEdit logEdit) 
throws IOException {
-  replicationManager.scopeWALEdits(logKey, logEdit);
-}
-  });
+  walProvider
+.addWALActionsListener(new ReplicationSourceWALActionListener(conf, 
replicationManager));
 }
 this.statsThreadPeriod =
 this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3b603d2c/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 8543896..cbbfca0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -40,12 +40,9 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.h

hbase git commit: HBASE-19905 ReplicationSyncUp tool will not exit if a peer replication is disabled

2018-02-04 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-2 3b603d2c0 -> 2d5b36d19


HBASE-19905 ReplicationSyncUp tool will not exit if a peer replication is 
disabled

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2d5b36d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2d5b36d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2d5b36d1

Branch: refs/heads/branch-2
Commit: 2d5b36d194b90d4a43505c094464130506a079f6
Parents: 3b603d2
Author: Ashish Singhi 
Authored: Sun Feb 4 18:12:46 2018 +0530
Committer: Ashish Singhi 
Committed: Sun Feb 4 18:12:46 2018 +0530

--
 .../replication/regionserver/ReplicationSourceManager.java   | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2d5b36d1/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index cbbfca0..c0c2333 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -56,6 +56,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationListener;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
+import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
@@ -739,6 +740,13 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 replicationQueues.removeQueue(peerId);
 continue;
   }
+  if (server instanceof ReplicationSyncUp.DummyServer
+  && peer.getPeerState().equals(PeerState.DISABLED)) {
+LOG.warn("Peer {} is disbaled. ReplicationSyncUp tool will skip "
++ "replicating data to this peer.",
+  actualPeerId);
+continue;
+  }
   // track sources in walsByIdRecoveredQueues
   Map> walsByGroup = new HashMap<>();
   walsByIdRecoveredQueues.put(peerId, walsByGroup);



[06/46] hbase git commit: HBASE-19928 TestVisibilityLabelsOnNewVersionBehaviorTable fails; ADDENDUM Fix failing TestMetaWithReplicas#testShutdownHandling; it was reading meta TableState

2018-02-04 Thread zhangduo
HBASE-19928 TestVisibilityLabelsOnNewVersionBehaviorTable fails; ADDENDUM Fix 
failing TestMetaWithReplicas#testShutdownHandling; it was reading meta 
TableState


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fbcb453c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fbcb453c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fbcb453c

Branch: refs/heads/HBASE-19397-branch-2
Commit: fbcb453ce2bbe4ffe0fa723b2ae05a7f91a6fc5d
Parents: cb138c2
Author: Michael Stack 
Authored: Sat Feb 3 21:47:59 2018 -0800
Committer: Michael Stack 
Committed: Sat Feb 3 21:48:18 2018 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fbcb453c/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index f80bbc0..5dc0565 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -1109,6 +1109,9 @@ public class MetaTableAccessor {
   @Nullable
   public static TableState getTableState(Connection conn, TableName tableName)
   throws IOException {
+if (tableName.equals(TableName.META_TABLE_NAME)) {
+  return new TableState(tableName, TableState.State.ENABLED);
+}
 Table metaHTable = getMetaHTable(conn);
 Get get = new Get(tableName.getName()).addColumn(getTableFamily(), 
getTableStateColumn());
 long time = EnvironmentEdgeManager.currentTime();



[35/46] hbase git commit: HBASE-19634 Add permission check for executeProcedures in AccessController

2018-02-04 Thread zhangduo
HBASE-19634 Add permission check for executeProcedures in AccessController


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0381e83b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0381e83b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0381e83b

Branch: refs/heads/HBASE-19397-branch-2
Commit: 0381e83b5cfb6efef0f1ac89dcf7f8e757ab85c6
Parents: 4112552
Author: zhangduo 
Authored: Thu Jan 4 16:18:21 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:42:08 2018 +0800

--
 .../hbase/coprocessor/RegionServerObserver.java | 14 ++
 .../hbase/regionserver/RSRpcServices.java   | 52 +++-
 .../RegionServerCoprocessorHost.java| 18 +++
 .../hbase/security/access/AccessController.java | 30 ++-
 .../hadoop/hbase/TestJMXConnectorServer.java|  7 +++
 .../security/access/TestAccessController.java   | 18 +--
 6 files changed, 100 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0381e83b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
index c1af3fb..5b751df 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
@@ -126,4 +126,18 @@ public interface RegionServerObserver {
   default void postClearCompactionQueues(
   final ObserverContext ctx)
   throws IOException {}
+
+  /**
+   * This will be called before executing procedures
+   * @param ctx the environment to interact with the framework and region 
server.
+   */
+  default void 
preExecuteProcedures(ObserverContext ctx)
+  throws IOException {}
+
+  /**
+   * This will be called after executing procedures
+   * @param ctx the environment to interact with the framework and region 
server.
+   */
+  default void 
postExecuteProcedures(ObserverContext ctx)
+  throws IOException {}
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0381e83b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index dbfcdc6..44934a6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -3459,36 +3459,40 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
   }
 
   @Override
+  @QosPriority(priority = HConstants.ADMIN_QOS)
   public ExecuteProceduresResponse executeProcedures(RpcController controller,
   ExecuteProceduresRequest request) throws ServiceException {
-if (request.getOpenRegionCount() > 0) {
-  for (OpenRegionRequest req : request.getOpenRegionList()) {
-openRegion(controller, req);
+try {
+  checkOpen();
+  regionServer.getRegionServerCoprocessorHost().preExecuteProcedures();
+  if (request.getOpenRegionCount() > 0) {
+for (OpenRegionRequest req : request.getOpenRegionList()) {
+  openRegion(controller, req);
+}
   }
-}
-if (request.getCloseRegionCount() > 0) {
-  for (CloseRegionRequest req : request.getCloseRegionList()) {
-closeRegion(controller, req);
+  if (request.getCloseRegionCount() > 0) {
+for (CloseRegionRequest req : request.getCloseRegionList()) {
+  closeRegion(controller, req);
+}
   }
-}
-if (request.getProcCount() > 0) {
-  for (RemoteProcedureRequest req : request.getProcList()) {
-RSProcedureCallable callable;
-try {
-  callable =
-
Class.forName(req.getProcClass()).asSubclass(RSProcedureCallable.class).newInstance();
-} catch (Exception e) {
-  // here we just ignore the error as this should not happen and we do 
not provide a general
-  // way to report errors for all types of remote procedure. The 
procedure will hang at
-  // master side but after you solve the problem and restart master it 
will be executed
-  // again and pass.
-  LOG.warn("create procedure of type " + req.getProcClass() + " 
failed, give up", e);
-  continue;
+  if (request.getProcCount() > 0) {
+for (RemoteProcedureRequest req : request.getProcList()) {
+  

[14/46] hbase git commit: HBASE-19524 Master side changes for moving peer modification from zk watcher to procedure

2018-02-04 Thread zhangduo
HBASE-19524 Master side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e5dc52ac
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e5dc52ac
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e5dc52ac

Branch: refs/heads/HBASE-19397-branch-2
Commit: e5dc52ac2f6c95b6f25a9b591c7211f572883a6a
Parents: e2a72ae
Author: zhangduo 
Authored: Mon Dec 18 15:22:36 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../procedure2/RemoteProcedureDispatcher.java   |  3 +-
 .../src/main/protobuf/MasterProcedure.proto | 21 -
 .../src/main/protobuf/RegionServerStatus.proto  |  3 +-
 .../src/main/protobuf/Replication.proto |  5 +
 .../replication/ReplicationPeersZKImpl.java |  4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java | 95 ---
 .../hadoop/hbase/master/MasterRpcServices.java  |  4 +-
 .../hadoop/hbase/master/MasterServices.java | 26 +++---
 .../assignment/RegionTransitionProcedure.java   | 13 +--
 .../master/procedure/MasterProcedureEnv.java|  5 +
 .../master/procedure/ProcedurePrepareLatch.java |  2 +-
 .../master/replication/AddPeerProcedure.java| 97 
 .../replication/DisablePeerProcedure.java   | 70 ++
 .../master/replication/EnablePeerProcedure.java | 69 ++
 .../master/replication/ModifyPeerProcedure.java | 97 +---
 .../master/replication/RefreshPeerCallable.java | 67 --
 .../replication/RefreshPeerProcedure.java   | 28 --
 .../master/replication/RemovePeerProcedure.java | 69 ++
 .../master/replication/ReplicationManager.java  | 76 ---
 .../replication/UpdatePeerConfigProcedure.java  | 92 +++
 .../hbase/regionserver/HRegionServer.java   |  5 +-
 .../regionserver/RefreshPeerCallable.java   | 70 ++
 .../hbase/master/MockNoopMasterServices.java| 23 +++--
 .../replication/DummyModifyPeerProcedure.java   | 13 ++-
 24 files changed, 733 insertions(+), 224 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e5dc52ac/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index dca0bec..a22a7ba 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -247,9 +247,8 @@ public abstract class RemoteProcedureDispatcherhttp://git-wip-us.apache.org/repos/asf/hbase/blob/e5dc52ac/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 0e2bdba..ae676ea 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -27,6 +27,7 @@ option optimize_for = SPEED;
 import "HBase.proto";
 import "RPC.proto";
 import "Snapshot.proto";
+import "Replication.proto";
 
 // 
 //  WARNING - Compatibility rules
@@ -367,9 +368,10 @@ message GCMergedRegionsStateData {
 }
 
 enum PeerModificationState {
-  UPDATE_PEER_STORAGE = 1;
-  REFRESH_PEER_ON_RS = 2;
-  POST_PEER_MODIFICATION = 3;
+  PRE_PEER_MODIFICATION = 1;
+  UPDATE_PEER_STORAGE = 2;
+  REFRESH_PEER_ON_RS = 3;
+  POST_PEER_MODIFICATION = 4;
 }
 
 message PeerModificationStateData {
@@ -394,4 +396,17 @@ message RefreshPeerParameter {
   required string peer_id = 1;
   required PeerModificationType type = 2;
   required ServerName target_server = 3;
+}
+
+message ModifyPeerStateData {
+  required string peer_id = 1;
+}
+
+message AddPeerStateData {
+  required ReplicationPeer peer_config = 1;
+  required bool enabled = 2;
+}
+
+message UpdatePeerConfigStateData {
+  required ReplicationPeer peer_config = 1;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e5dc52ac/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index eb396ac..4f75941 100644
--- a/hbase-protocol-sha

[33/46] hbase git commit: HBASE-19623 Create replication endpoint asynchronously when adding a replication source

2018-02-04 Thread zhangduo
HBASE-19623 Create replication endpoint asynchronously when adding a 
replication source


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3be13975
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3be13975
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3be13975

Branch: refs/heads/HBASE-19397-branch-2
Commit: 3be13975aeda434cc4816f7c10ca30213a58994b
Parents: 7bb1768
Author: zhangduo 
Authored: Tue Jan 2 13:25:58 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:42:08 2018 +0800

--
 .../hbase/replication/ReplicationPeer.java  |   8 ++
 .../hbase/replication/ReplicationPeers.java |  18 +--
 .../replication/ZKReplicationPeerStorage.java   |   7 +-
 .../replication/TestReplicationStateBasic.java  |  20 +---
 .../TestZKReplicationPeerStorage.java   |  14 +--
 .../HBaseInterClusterReplicationEndpoint.java   |  17 ++-
 .../RecoveredReplicationSource.java |  13 +--
 .../regionserver/ReplicationSource.java | 110 +++
 .../ReplicationSourceInterface.java |   8 +-
 .../regionserver/ReplicationSourceManager.java  |  47 +---
 .../client/TestAsyncReplicationAdminApi.java|   2 -
 .../replication/TestReplicationAdmin.java   |   2 -
 .../replication/ReplicationSourceDummy.java |   7 +-
 .../replication/TestReplicationSource.java  |   5 +-
 .../TestReplicationSourceManager.java   |   8 +-
 15 files changed, 116 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3be13975/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 4846018..2da3cce 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,14 @@ public interface ReplicationPeer {
   PeerState getPeerState();
 
   /**
+   * Test whether the peer is enabled.
+   * @return {@code true} if enabled, otherwise {@code false}.
+   */
+  default boolean isPeerEnabled() {
+return getPeerState() == PeerState.ENABLED;
+  }
+
+  /**
* Get the peer config object
* @return the ReplicationPeerConfig for this peer
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/3be13975/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index 422801b..45940a5 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.replication;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -86,21 +87,6 @@ public class ReplicationPeers {
   }
 
   /**
-   * Get the peer state for the specified connected remote slave cluster. The 
value might be read
-   * from cache, so it is recommended to use {@link #peerStorage } to read 
storage directly if
-   * reading the state after enabling or disabling it.
-   * @param peerId a short that identifies the cluster
-   * @return true if replication is enabled, false otherwise.
-   */
-  public boolean isPeerEnabled(String peerId) {
-ReplicationPeer replicationPeer = this.peerCache.get(peerId);
-if (replicationPeer == null) {
-  throw new IllegalArgumentException("Peer with id= " + peerId + " is not 
cached");
-}
-return replicationPeer.getPeerState() == PeerState.ENABLED;
-  }
-
-  /**
* Returns the ReplicationPeerImpl for the specified cached peer. This 
ReplicationPeer will
* continue to track changes to the Peer's state and config. This method 
returns null if no peer
* has been cached with the given peerId.
@@ -117,7 +103,7 @@ public class ReplicationPeers {
* @return a Set of Strings for peerIds
*/
   public Set getAllPeerIds() {
-return peerCache.keySet();
+return Collections.unmodifiableSet(peerCache.keySet());
   }
 
   public static Configuration 
getPeerClusterConfiguration(ReplicationPeerConfig peerConfig,

http://git-wip-us.apache.org/repos/asf/hbase/blob/3be13975/hbase-

[13/46] hbase git commit: HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure

2018-02-04 Thread zhangduo
HBASE-19536 Client side changes for moving peer modification from zk watcher to 
procedure

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/762770b0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/762770b0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/762770b0

Branch: refs/heads/HBASE-19397-branch-2
Commit: 762770b090820fd92085466eea20033cbe5763cd
Parents: e5dc52a
Author: Guanghao Zhang 
Authored: Tue Dec 19 15:50:57 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  87 ++-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 149 ++-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  82 +-
 3 files changed, 238 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/762770b0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 40dac2f..b8546fa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2473,7 +2473,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @throws IOException if a remote or network exception occurs
*/
   default void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig)
@@ -2484,7 +2484,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @param enabled peer state, true if ENABLED and false if DISABLED
* @throws IOException if a remote or network exception occurs
*/
@@ -2492,6 +2492,37 @@ public interface Admin extends Abortable, Closeable {
   throws IOException;
 
   /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  default Future addReplicationPeerAsync(String peerId, 
ReplicationPeerConfig peerConfig)
+  throws IOException {
+return addReplicationPeerAsync(peerId, peerConfig, true);
+  }
+
+  /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @param enabled peer state, true if ENABLED and false if DISABLED
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig 
peerConfig,
+  boolean enabled) throws IOException;
+
+  /**
* Remove a peer and stop the replication.
* @param peerId a short name that identifies the peer
* @throws IOException if a remote or network exception occurs
@@ -2499,6 +2530,18 @@ public interface Admin extends Abortable, Closeable {
   void removeReplicationPeer(String peerId) throws IOException;
 
   /**
+   * Remove a replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short

[09/46] hbase git commit: HBASE-19926 Use a separated class to implement the WALActionListener for Replication

2018-02-04 Thread zhangduo
HBASE-19926 Use a separated class to implement the WALActionListener for 
Replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b603d2c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b603d2c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b603d2c

Branch: refs/heads/HBASE-19397-branch-2
Commit: 3b603d2c08c1f1905a589597737412b43970a304
Parents: 0ca7a2e
Author: zhangduo 
Authored: Sun Feb 4 10:42:33 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:32:14 2018 +0800

--
 .../replication/regionserver/Replication.java   | 22 +
 .../regionserver/ReplicationSourceManager.java  | 47 +-
 .../ReplicationSourceWALActionListener.java | 98 
 .../TestReplicationSourceManager.java   | 30 ++
 4 files changed, 108 insertions(+), 89 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b603d2c/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
index 0274b0a..ad12c66 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.regionserver.ReplicationSinkService;
 import org.apache.hadoop.hbase.regionserver.ReplicationSourceService;
-import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
@@ -44,8 +43,6 @@ import 
org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -130,23 +127,8 @@ public class Replication implements 
ReplicationSourceService, ReplicationSinkSer
 replicationTracker, conf, this.server, fs, logDir, oldLogDir, 
clusterId,
 walProvider != null ? walProvider.getWALFileLengthProvider() : p -> 
OptionalLong.empty());
 if (walProvider != null) {
-  walProvider.addWALActionsListener(new WALActionsListener() {
-
-@Override
-public void preLogRoll(Path oldPath, Path newPath) throws IOException {
-  replicationManager.preLogRoll(newPath);
-}
-
-@Override
-public void postLogRoll(Path oldPath, Path newPath) throws IOException 
{
-  replicationManager.postLogRoll(newPath);
-}
-
-@Override
-public void visitLogEntryBeforeWrite(WALKey logKey, WALEdit logEdit) 
throws IOException {
-  replicationManager.scopeWALEdits(logKey, logEdit);
-}
-  });
+  walProvider
+.addWALActionsListener(new ReplicationSourceWALActionListener(conf, 
replicationManager));
 }
 this.statsThreadPeriod =
 this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3b603d2c/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 8543896..cbbfca0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -40,12 +40,9 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache

[01/46] hbase git commit: HBASE-19919 Tidying up logging; ADDENDUM Fix tests w/ mocked Servers [Forced Update!]

2018-02-04 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19397-branch-2 754e4f7fb -> 88c4aa7ad (forced update)


HBASE-19919 Tidying up logging; ADDENDUM Fix tests w/ mocked Servers


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/00653a4d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/00653a4d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/00653a4d

Branch: refs/heads/HBASE-19397-branch-2
Commit: 00653a4d18b4e3a97c93d3fd9d44ed9b6149db53
Parents: 66a11f2
Author: Michael Stack 
Authored: Sat Feb 3 09:25:16 2018 -0800
Committer: Michael Stack 
Committed: Sat Feb 3 09:25:45 2018 -0800

--
 .../java/org/apache/hadoop/hbase/master/SplitLogManager.java   | 6 +-
 .../hadoop/hbase/master/assignment/AssignmentManager.java  | 6 +-
 2 files changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/00653a4d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index 63af398..2e2f8bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -121,8 +121,12 @@ public class SplitLogManager {
   throws IOException {
 this.server = master;
 this.conf = conf;
+// Get Server Thread name. Sometimes the Server is mocked so may not 
implement HasThread.
+// For example, in tests.
+String name = master instanceof HasThread? ((HasThread)master).getName():
+master.getServerName().toShortString();
 this.choreService =
-new ChoreService(((HasThread)master).getName() + ".splitLogManager.");
+new ChoreService(name + ".splitLogManager.");
 if (server.getCoordinatedStateManager() != null) {
   SplitLogManagerCoordination coordination = 
getSplitLogManagerCoordination();
   Set failedDeletions = Collections.synchronizedSet(new 
HashSet());

http://git-wip-us.apache.org/repos/asf/hbase/blob/00653a4d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 1f65230..e09b29b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -1624,7 +1624,11 @@ public class AssignmentManager implements ServerListener 
{
   }
 
   private void startAssignmentThread() {
-assignThread = new Thread(((HasThread)this.master).getName()) {
+// Get Server Thread name. Sometimes the Server is mocked so may not 
implement HasThread.
+// For example, in tests.
+String name = master instanceof HasThread? ((HasThread)master).getName():
+master.getServerName().toShortString();
+assignThread = new Thread(name) {
   @Override
   public void run() {
 while (isRunning()) {



[11/46] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2018-02-04 Thread zhangduo
HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e8400fd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e8400fd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e8400fd

Branch: refs/heads/HBASE-19397-branch-2
Commit: 9e8400fdad871e5b02dcfb866a579be8b30e60ae
Parents: 12d321d
Author: zhangduo 
Authored: Mon Dec 25 18:49:56 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |  19 +-
 .../replication/ReplicationPeersZKImpl.java |  21 +-
 .../replication/ReplicationQueueStorage.java|  26 +-
 .../replication/ReplicationQueuesClient.java|  93 -
 .../ReplicationQueuesClientArguments.java   |  40 --
 .../ReplicationQueuesClientZKImpl.java  | 176 -
 .../replication/ZKReplicationQueueStorage.java  |  90 -
 .../replication/TestReplicationStateBasic.java  | 378 +++
 .../replication/TestReplicationStateZKImpl.java | 153 
 .../TestZKReplicationQueueStorage.java  |  74 
 .../cleaner/ReplicationZKNodeCleaner.java   |  71 ++--
 .../cleaner/ReplicationZKNodeCleanerChore.java  |   5 +-
 .../replication/ReplicationPeerManager.java |  31 +-
 .../master/ReplicationHFileCleaner.java | 109 ++
 .../master/ReplicationLogCleaner.java   |  35 +-
 .../regionserver/DumpReplicationQueues.java |  78 ++--
 .../hbase/util/hbck/ReplicationChecker.java |  14 +-
 .../client/TestAsyncReplicationAdminApi.java|  31 +-
 .../replication/TestReplicationAdmin.java   |   2 +
 .../hbase/master/cleaner/TestLogsCleaner.java   |  24 +-
 .../cleaner/TestReplicationHFileCleaner.java|  29 --
 .../cleaner/TestReplicationZKNodeCleaner.java   |  12 +-
 .../replication/TestReplicationStateBasic.java  | 378 ---
 .../replication/TestReplicationStateZKImpl.java | 232 
 .../TestReplicationSourceManagerZkImpl.java |  41 --
 25 files changed, 868 insertions(+), 1294 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e8400fd/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 9f4ad18..6c1c213 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -37,20 +36,14 @@ public class ReplicationFactory {
   args);
   }
 
-  public static ReplicationQueuesClient
-  getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws 
Exception {
-return (ReplicationQueuesClient) ConstructorUtils
-.invokeConstructor(ReplicationQueuesClientZKImpl.class, args);
-  }
-
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- Abortable abortable) {
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  Abortable abortable) {
 return getReplicationPeers(zk, conf, null, abortable);
   }
 
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- final 
ReplicationQueuesClient queuesClient, Abortable abortable) {
-return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable);
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  ReplicationQueueStorage queueStorage, Abortable abortable) {
+return new ReplicationPeersZKImpl(zk, conf, queueStorage, abortable);
   }
 
   public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e8400fd/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 419e289..4e5f757 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase

[23/46] hbase git commit: HBASE-19642 Fix locking for peer modification procedure

2018-02-04 Thread zhangduo
HBASE-19642 Fix locking for peer modification procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a4d04406
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a4d04406
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a4d04406

Branch: refs/heads/HBASE-19397-branch-2
Commit: a4d04406e372e9b35f2bdf9f9ed0e48e5fc7b9b8
Parents: 3b27510
Author: zhangduo 
Authored: Wed Dec 27 18:27:13 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../procedure/MasterProcedureScheduler.java | 14 +
 .../master/replication/ModifyPeerProcedure.java | 21 +---
 2 files changed, 32 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a4d04406/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index a25217c..4ecb3b1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -610,6 +610,20 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 public boolean requireExclusiveLock(Procedure proc) {
   return requirePeerExclusiveLock((PeerProcedureInterface) proc);
 }
+
+@Override
+public boolean isAvailable() {
+  if (isEmpty()) {
+return false;
+  }
+  if (getLockStatus().hasExclusiveLock()) {
+// if we have an exclusive lock already taken
+// only child of the lock owner can be executed
+Procedure nextProc = peek();
+return nextProc != null && getLockStatus().hasLockAccess(nextProc);
+  }
+  return true;
+}
   }
 
   // 


http://git-wip-us.apache.org/repos/asf/hbase/blob/a4d04406/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index 279fbc7..a682606 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -46,6 +46,8 @@ public abstract class ModifyPeerProcedure
 
   protected String peerId;
 
+  private volatile boolean locked;
+
   // used to keep compatible with old client where we can only returns after 
updateStorage.
   protected ProcedurePrepareLatch latch;
 
@@ -145,17 +147,30 @@ public abstract class ModifyPeerProcedure
 
   @Override
   protected LockState acquireLock(MasterProcedureEnv env) {
-return env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)
-  ? LockState.LOCK_EVENT_WAIT
-  : LockState.LOCK_ACQUIRED;
+if (env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)) {
+  return  LockState.LOCK_EVENT_WAIT;
+}
+locked = true;
+return LockState.LOCK_ACQUIRED;
   }
 
   @Override
   protected void releaseLock(MasterProcedureEnv env) {
+locked = false;
 env.getProcedureScheduler().wakePeerExclusiveLock(this, peerId);
   }
 
   @Override
+  protected boolean holdLock(MasterProcedureEnv env) {
+return true;
+  }
+
+  @Override
+  protected boolean hasLock(MasterProcedureEnv env) {
+return locked;
+  }
+
+  @Override
   protected void rollbackState(MasterProcedureEnv env, PeerModificationState 
state)
   throws IOException, InterruptedException {
 if (state == PeerModificationState.PRE_PEER_MODIFICATION) {



[02/46] hbase git commit: HBASE-19928 TestVisibilityLabelsOnNewVersionBehaviorTable fails

2018-02-04 Thread zhangduo
HBASE-19928 TestVisibilityLabelsOnNewVersionBehaviorTable fails


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a3233572
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a3233572
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a3233572

Branch: refs/heads/HBASE-19397-branch-2
Commit: a3233572ac8049fdbfa7edc9a48ca80a8c3de1ae
Parents: 00653a4
Author: Michael Stack 
Authored: Sat Feb 3 10:17:16 2018 -0800
Committer: Michael Stack 
Committed: Sat Feb 3 10:17:16 2018 -0800

--
 .../TestVisibilityLabelsOnNewVersionBehaviorTable.java| 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a3233572/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java
index c14438e..d3177f9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java
@@ -19,12 +19,21 @@ package org.apache.hadoop.hbase.security.visibility;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.SecurityTests;
+import org.junit.ClassRule;
+import org.junit.experimental.categories.Category;
 
+@Category({SecurityTests.class, MediumTests.class})
 public class TestVisibilityLabelsOnNewVersionBehaviorTable extends 
TestVisibilityLabelsWithDeletes {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  
HBaseClassTestRule.forClass(TestVisibilityLabelsOnNewVersionBehaviorTable.class);
 
   @Override
   protected Table createTable(HColumnDescriptor fam) throws IOException {
@@ -35,5 +44,4 @@ public class TestVisibilityLabelsOnNewVersionBehaviorTable 
extends TestVisibilit
 TEST_UTIL.getHBaseAdmin().createTable(table);
 return TEST_UTIL.getConnection().getTable(tableName);
   }
-
 }



[42/46] hbase git commit: HBASE-19873 addendum add missing rule for new tests

2018-02-04 Thread zhangduo
HBASE-19873 addendum add missing rule for new tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/88c4aa7a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/88c4aa7a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/88c4aa7a

Branch: refs/heads/HBASE-19397-branch-2
Commit: 88c4aa7ada3125ac8704fe24a33e89598be2668f
Parents: 8bb3cf6
Author: zhangduo 
Authored: Tue Jan 30 09:40:23 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:43:05 2018 +0800

--
 .../hbase/replication/TestZKReplicationPeerStorage.java  | 7 ++-
 .../hbase/replication/TestZKReplicationQueueStorage.java | 8 +++-
 .../hbase/replication/TestReplicationProcedureRetry.java | 7 ++-
 .../apache/hadoop/hbase/util/TestHBaseFsckReplication.java   | 6 ++
 4 files changed, 25 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/88c4aa7a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
--
diff --git 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
index 3eb11da..3290fb0 100644
--- 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
+++ 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
@@ -33,19 +33,24 @@ import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 import java.util.stream.Stream;
-
+import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseZKTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category({ ReplicationTests.class, MediumTests.class })
 public class TestZKReplicationPeerStorage {
 
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestZKReplicationPeerStorage.class);
+
   private static final HBaseZKTestingUtility UTIL = new 
HBaseZKTestingUtility();
 
   private static ZKReplicationPeerStorage STORAGE;

http://git-wip-us.apache.org/repos/asf/hbase/blob/88c4aa7a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
index 786730f..2c01a26 100644
--- 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
@@ -27,8 +27,8 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Set;
 import java.util.SortedSet;
-
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseZKTestingUtility;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -38,11 +38,17 @@ import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category({ ReplicationTests.class, MediumTests.class })
 public class TestZKReplicationQueueStorage {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestZKReplicationQueueStorage.class);
+
   private static final HBaseZKTestingUtility UTIL = new 
HBaseZKTestingUtility();
 
   private static ZKReplicationQueueStorage STORAGE;

http://git-wip-us.apache.org/repos/asf/hbase/blob/88c4aa7a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
index ab35b46..a2ae0b4 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java

[17/46] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2018-02-04 Thread zhangduo
HBASE-19543 Abstract a replication storage interface to extract the zk specific 
code


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12d321d4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12d321d4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12d321d4

Branch: refs/heads/HBASE-19397-branch-2
Commit: 12d321d4c26474a515bb11351e699b26a50381fe
Parents: a46f2a9
Author: zhangduo 
Authored: Fri Dec 22 14:37:28 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../hadoop/hbase/util/CollectionUtils.java  |   3 +
 hbase-replication/pom.xml   |  12 +
 .../replication/ReplicationPeerStorage.java |  74 
 .../replication/ReplicationQueueStorage.java| 164 +++
 .../replication/ReplicationStateZKBase.java |   1 -
 .../replication/ReplicationStorageFactory.java  |  49 +++
 .../replication/ZKReplicationPeerStorage.java   | 164 +++
 .../replication/ZKReplicationQueueStorage.java  | 425 +++
 .../replication/ZKReplicationStorageBase.java   |  75 
 .../TestZKReplicationPeerStorage.java   | 171 
 .../TestZKReplicationQueueStorage.java  | 171 
 .../org/apache/hadoop/hbase/master/HMaster.java |  36 +-
 .../hadoop/hbase/master/MasterServices.java |   6 +-
 .../master/procedure/MasterProcedureEnv.java|  24 +-
 .../master/replication/AddPeerProcedure.java|   6 +-
 .../replication/DisablePeerProcedure.java   |   7 +-
 .../master/replication/EnablePeerProcedure.java |   6 +-
 .../master/replication/ModifyPeerProcedure.java |  41 +-
 .../master/replication/RemovePeerProcedure.java |   6 +-
 .../master/replication/ReplicationManager.java  | 199 -
 .../replication/ReplicationPeerManager.java | 331 +++
 .../replication/UpdatePeerConfigProcedure.java  |   7 +-
 .../replication/TestReplicationAdmin.java   |  64 ++-
 .../hbase/master/MockNoopMasterServices.java|  13 +-
 .../hbase/master/TestMasterNoCluster.java   |   3 +-
 .../TestReplicationDisableInactivePeer.java |   6 +-
 26 files changed, 1750 insertions(+), 314 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/12d321d4/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
index 875b124..8bbb6f1 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
@@ -107,6 +107,9 @@ public class CollectionUtils {
 return list.get(list.size() - 1);
   }
 
+  public static  List nullToEmpty(List list) {
+return list != null ? list : Collections.emptyList();
+  }
   /**
* In HBASE-16648 we found that ConcurrentHashMap.get is much faster than 
computeIfAbsent if the
* value already exists. Notice that the implementation does not guarantee 
that the supplier will

http://git-wip-us.apache.org/repos/asf/hbase/blob/12d321d4/hbase-replication/pom.xml
--
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
index edce309..77517cc 100644
--- a/hbase-replication/pom.xml
+++ b/hbase-replication/pom.xml
@@ -104,6 +104,18 @@
   org.apache.hbase
   hbase-zookeeper
 
+
+  org.apache.hbase
+  hbase-common
+  test-jar
+  test
+
+
+  org.apache.hbase
+  hbase-zookeeper
+  test-jar
+  test
+
 
 
   org.apache.commons

http://git-wip-us.apache.org/repos/asf/hbase/blob/12d321d4/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
new file mode 100644
index 000..e00cd0d
--- /dev/null
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  

[26/46] hbase git commit: HBASE-19633 Clean up the replication queues in the postPeerModification stage when removing a peer

2018-02-04 Thread zhangduo
HBASE-19633 Clean up the replication queues in the postPeerModification stage 
when removing a peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7bb1768d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7bb1768d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7bb1768d

Branch: refs/heads/HBASE-19397-branch-2
Commit: 7bb1768d7955f44ac7eff8d772532dd3789ece3b
Parents: 3fc2f85
Author: zhangduo 
Authored: Tue Jan 2 09:57:23 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:41:30 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |  2 +-
 .../replication/VerifyReplication.java  | 34 ++---
 .../hbase/replication/ReplicationPeers.java | 32 ++--
 .../replication/ZKReplicationQueueStorage.java  |  3 +-
 .../replication/ZKReplicationStorageBase.java   |  4 +-
 .../replication/TestReplicationStateBasic.java  | 10 +
 .../master/replication/AddPeerProcedure.java|  5 +--
 .../replication/DisablePeerProcedure.java   |  3 +-
 .../master/replication/EnablePeerProcedure.java |  3 +-
 .../master/replication/ModifyPeerProcedure.java | 34 +
 .../replication/RefreshPeerProcedure.java   | 17 -
 .../master/replication/RemovePeerProcedure.java |  7 ++--
 .../replication/ReplicationPeerManager.java | 31 +++-
 .../replication/UpdatePeerConfigProcedure.java  |  3 +-
 .../RemoteProcedureResultReporter.java  |  3 +-
 .../regionserver/RefreshPeerCallable.java   |  5 ++-
 .../regionserver/ReplicationSourceManager.java  | 39 +++-
 .../TestReplicationAdminUsingProcedure.java |  7 ++--
 18 files changed, 124 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7bb1768d/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index b80ee16..fdae288 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -27,8 +27,8 @@ import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A configuration for the replication peer cluster.

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bb1768d/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index f0070f0..fe45762 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.mapreduce.replication;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.UUID;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -45,13 +44,14 @@ import org.apache.hadoop.hbase.filter.PrefixFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableMapper;
+import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -66,6 +66,7 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import or

[34/46] hbase git commit: HBASE-19544 Add UTs for testing concurrent modifications on replication peer

2018-02-04 Thread zhangduo
HBASE-19544 Add UTs for testing concurrent modifications on replication peer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2f5abfe5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2f5abfe5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2f5abfe5

Branch: refs/heads/HBASE-19397-branch-2
Commit: 2f5abfe5d94cd420379e19c6784bb854e74dc478
Parents: 74fea40
Author: Guanghao Zhang 
Authored: Tue Jan 2 17:07:41 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:42:08 2018 +0800

--
 .../replication/TestReplicationAdmin.java   | 69 
 1 file changed, 69 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2f5abfe5/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 772a9d6..a753d23 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -31,6 +31,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.regex.Pattern;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -58,6 +59,8 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Unit testing of ReplicationAdmin
@@ -69,6 +72,8 @@ public class TestReplicationAdmin {
   public static final HBaseClassTestRule CLASS_RULE =
   HBaseClassTestRule.forClass(TestReplicationAdmin.class);
 
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestReplicationAdmin.class);
+
   private final static HBaseTestingUtility TEST_UTIL =
   new HBaseTestingUtility();
 
@@ -118,6 +123,70 @@ public class TestReplicationAdmin {
   }
 
   @Test
+  public void testConcurrentPeerOperations() throws Exception {
+int threadNum = 5;
+AtomicLong successCount = new AtomicLong(0);
+
+// Test concurrent add peer operation
+Thread[] addPeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  addPeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.addReplicationPeer(ID_ONE,
+ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build());
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when add replication peer", e);
+}
+  });
+  addPeers[i].start();
+}
+for (Thread addPeer : addPeers) {
+  addPeer.join();
+}
+assertEquals(1, successCount.get());
+
+// Test concurrent remove peer operation
+successCount.set(0);
+Thread[] removePeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  removePeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.removeReplicationPeer(ID_ONE);
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when remove replication peer", e);
+}
+  });
+  removePeers[i].start();
+}
+for (Thread removePeer : removePeers) {
+  removePeer.join();
+}
+assertEquals(1, successCount.get());
+
+// Test concurrent add peer operation again
+successCount.set(0);
+addPeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  addPeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.addReplicationPeer(ID_ONE,
+ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build());
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when add replication peer", e);
+}
+  });
+  addPeers[i].start();
+}
+for (Thread addPeer : addPeers) {
+  addPeer.join();
+}
+assertEquals(1, successCount.get());
+  }
+
+  @Test
   public void testAddInvalidPeer() {
 ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
 builder.setClusterKey(KEY_ONE);



[21/46] hbase git commit: HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface

2018-02-04 Thread zhangduo
HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/76716528
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/76716528
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/76716528

Branch: refs/heads/HBASE-19397-branch-2
Commit: 767165288b71773e3f77202542c0e9d1ba00a96e
Parents: 51c8509
Author: Guanghao Zhang 
Authored: Tue Dec 26 11:39:34 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../replication/VerifyReplication.java  |   5 -
 .../hbase/replication/ReplicationPeer.java  |  42 ++--
 .../hbase/replication/ReplicationPeerImpl.java  | 169 ++
 .../replication/ReplicationPeerZKImpl.java  | 233 ---
 .../hbase/replication/ReplicationPeers.java |   4 +-
 .../replication/ReplicationPeersZKImpl.java |  23 +-
 .../replication/TestReplicationStateBasic.java  |   7 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  29 +--
 8 files changed, 216 insertions(+), 296 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/76716528/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 9065f4e..09d4b4b 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -333,7 +332,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   private static Pair 
getPeerQuorumConfig(
   final Configuration conf, String peerId) throws IOException {
 ZKWatcher localZKW = null;
-ReplicationPeerZKImpl peer = null;
 try {
   localZKW = new ZKWatcher(conf, "VerifyReplication",
   new Abortable() {
@@ -354,9 +352,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   throw new IOException(
   "An error occurred while trying to connect to the remove peer 
cluster", e);
 } finally {
-  if (peer != null) {
-peer.close();
-  }
   if (localZKW != null) {
 localZKW.close();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76716528/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index b66d76d..4846018 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
 
-
 /**
  * ReplicationPeer manages enabled / disabled state for the peer.
  */
@@ -49,65 +48,52 @@ public interface ReplicationPeer {
   String getId();
 
   /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig();
-
-  /**
-   * Get the peer config object. if loadFromBackingStore is true, it will load 
from backing store
-   * directly and update its load peer config. otherwise, just return the 
local cached peer config.
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig(boolean loadFromBackingStore)
-  throws ReplicationException;
-
-  /**
* Returns the state of the peer by reading local cache.
* @return the enabled state
*/
   PeerState getPeerState();
 
   /**
-   * Returns the state of peer, if loadFromBackingStore is true, it will load 
from backing store
-   * directly and update its local peer state. otherwise, just return th

[28/46] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly

2018-02-04 Thread zhangduo
HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c85716fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c85716fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c85716fc

Branch: refs/heads/HBASE-19397-branch-2
Commit: c85716fc3cc240abdd77a993ed66d7a6f7a26cac
Parents: a4d0440
Author: zhangduo 
Authored: Wed Dec 27 22:03:51 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:41:30 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |   9 +-
 .../hbase/replication/ReplicationQueues.java| 160 ---
 .../replication/ReplicationQueuesArguments.java |  70 ---
 .../replication/ReplicationQueuesZKImpl.java| 407 -
 .../hbase/replication/ReplicationTableBase.java | 442 ---
 .../replication/ReplicationTrackerZKImpl.java   |  21 +-
 .../replication/ZKReplicationQueueStorage.java  |  22 +
 .../replication/TestReplicationStateBasic.java  | 131 +++---
 .../replication/TestReplicationStateZKImpl.java |  41 +-
 .../regionserver/DumpReplicationQueues.java |  15 +-
 .../RecoveredReplicationSource.java |  17 +-
 .../RecoveredReplicationSourceShipper.java  |  22 +-
 .../replication/regionserver/Replication.java   |  20 +-
 .../regionserver/ReplicationSource.java |  16 +-
 .../ReplicationSourceInterface.java |  11 +-
 .../regionserver/ReplicationSourceManager.java  | 259 ++-
 .../regionserver/ReplicationSyncUp.java |  29 +-
 .../hbase/master/cleaner/TestLogsCleaner.java   |  12 +-
 .../cleaner/TestReplicationHFileCleaner.java|  23 +-
 .../cleaner/TestReplicationZKNodeCleaner.java   |  22 +-
 .../replication/ReplicationSourceDummy.java |   6 +-
 .../replication/TestReplicationSyncUpTool.java  |   6 +-
 .../TestReplicationSourceManager.java   |  97 ++--
 .../TestReplicationSourceManagerZkImpl.java |  57 +--
 24 files changed, 363 insertions(+), 1552 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c85716fc/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 6c1c213..5e70e57 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -17,12 +17,11 @@
  */
 package org.apache.hadoop.hbase.replication;
 
-import org.apache.commons.lang3.reflect.ConstructorUtils;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A factory class for instantiating replication objects that deal with 
replication state.
@@ -30,12 +29,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 @InterfaceAudience.Private
 public class ReplicationFactory {
 
-  public static ReplicationQueues 
getReplicationQueues(ReplicationQueuesArguments args)
-  throws Exception {
-return (ReplicationQueues) 
ConstructorUtils.invokeConstructor(ReplicationQueuesZKImpl.class,
-  args);
-  }
-
   public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
   Abortable abortable) {
 return getReplicationPeers(zk, conf, null, abortable);

http://git-wip-us.apache.org/repos/asf/hbase/blob/c85716fc/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
deleted file mode 100644
index 7f440b1..000
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apac

[40/46] hbase git commit: HBASE-19711 TestReplicationAdmin.testConcurrentPeerOperations hangs

2018-02-04 Thread zhangduo
HBASE-19711 TestReplicationAdmin.testConcurrentPeerOperations hangs

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/088927d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/088927d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/088927d1

Branch: refs/heads/HBASE-19397-branch-2
Commit: 088927d1ab3ef9acca788431f5b837cbee867c69
Parents: 1c5cbec
Author: Guanghao Zhang 
Authored: Fri Jan 5 15:39:06 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:43:05 2018 +0800

--
 .../procedure/MasterProcedureScheduler.java | 23 
 1 file changed, 19 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/088927d1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 4ecb3b1..0400de4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -402,7 +402,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
   @Override
   public void completionCleanup(final Procedure proc) {
 if (proc instanceof TableProcedureInterface) {
-  TableProcedureInterface iProcTable = (TableProcedureInterface)proc;
+  TableProcedureInterface iProcTable = (TableProcedureInterface) proc;
   boolean tableDeleted;
   if (proc.hasException()) {
 Exception procEx = proc.getException().unwrapRemoteException();
@@ -423,9 +423,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
   }
 } else if (proc instanceof PeerProcedureInterface) {
   PeerProcedureInterface iProcPeer = (PeerProcedureInterface) proc;
-  if (iProcPeer.getPeerOperationType() == PeerOperationType.REMOVE) {
-removePeerQueue(iProcPeer.getPeerId());
-  }
+  tryCleanupPeerQueue(iProcPeer.getPeerId(), proc);
 } else {
   // No cleanup for ServerProcedureInterface types, yet.
   return;
@@ -514,6 +512,23 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 locking.removePeerLock(peerId);
   }
 
+  private void tryCleanupPeerQueue(String peerId, Procedure procedure) {
+schedLock();
+try {
+  PeerQueue queue = AvlTree.get(peerMap, peerId, 
PEER_QUEUE_KEY_COMPARATOR);
+  if (queue == null) {
+return;
+  }
+
+  final LockAndQueue lock = locking.getPeerLock(peerId);
+  if (queue.isEmpty() && lock.tryExclusiveLock(procedure)) {
+removeFromRunQueue(peerRunQueue, queue);
+removePeerQueue(peerId);
+  }
+} finally {
+  schedUnlock();
+}
+  }
 
   private static boolean isPeerProcedure(Procedure proc) {
 return proc instanceof PeerProcedureInterface;



[19/46] hbase git commit: HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer Procedure classes

2018-02-04 Thread zhangduo
HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer 
Procedure classes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d97e83af
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d97e83af
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d97e83af

Branch: refs/heads/HBASE-19397-branch-2
Commit: d97e83af9737eb7fafa4acf23adbd07af7bc19f2
Parents: 12678c5
Author: zhangduo 
Authored: Thu Dec 21 21:59:46 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../hadoop/hbase/master/replication/AddPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/DisablePeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/EnablePeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/ModifyPeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/RefreshPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/RemovePeerProcedure.java   | 6 +++---
 .../hbase/master/replication/UpdatePeerConfigProcedure.java| 6 +++---
 7 files changed, 21 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d97e83af/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
index c3862d8..066c3e7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -28,6 +26,8 @@ import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AddPeerStateData;
 
@@ -37,7 +37,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.A
 @InterfaceAudience.Private
 public class AddPeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(AddPeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(AddPeerProcedure.class);
 
   private ReplicationPeerConfig peerConfig;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d97e83af/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
index 0b32db9..9a28de6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
@@ -19,11 +19,11 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The procedure for disabling a replication peer.
@@ -31,7 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DisablePeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(DisablePeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(DisablePeerProcedure.class);
 
   public DisablePeerProcedure() {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d97e83af/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java
--
diff --git 
a/hbase-server/src/main

[20/46] hbase git commit: HBASE-19630 Add peer cluster key check when add new replication peer

2018-02-04 Thread zhangduo
HBASE-19630 Add peer cluster key check when add new replication peer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/068dcb47
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/068dcb47
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/068dcb47

Branch: refs/heads/HBASE-19397-branch-2
Commit: 068dcb478e2fa38d41e5bd5ba2d73b2134b94a8b
Parents: 7671652
Author: Guanghao Zhang 
Authored: Tue Dec 26 21:10:00 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 54 
 .../replication/TestReplicationAdmin.java   | 22 
 2 files changed, 54 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/068dcb47/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 84abfeb..b78cbce 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master.replication;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -42,6 +43,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -216,36 +218,36 @@ public final class ReplicationPeerManager {
 return desc != null ? Optional.of(desc.getPeerConfig()) : Optional.empty();
   }
 
-  /**
-   * If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
-   * Then allow config exclude namespaces or exclude table-cfs which can't be 
replicated to peer
-   * cluster.
-   * 
-   * If replicate_all flag is false, it means all user tables can't be 
replicated to peer cluster.
-   * Then allow to config namespaces or table-cfs which will be replicated to 
peer cluster.
-   */
-  private static void checkPeerConfig(ReplicationPeerConfig peerConfig)
-  throws DoNotRetryIOException {
+  private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws 
DoNotRetryIOException {
+checkClusterKey(peerConfig.getClusterKey());
+
 if (peerConfig.replicateAllUserTables()) {
-  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty()) ||
-(peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
-throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly " +
-  "when you want replicate all cluster");
+  // If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
+  // Then allow config exclude namespaces or exclude table-cfs which can't 
be replicated to peer
+  // cluster.
+  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty())
+  || (peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
+throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly "
++ "when you want replicate all cluster");
   }
   
checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(),
 peerConfig.getExcludeTableCFsMap());
 } else {
-  if ((peerConfig.getExcludeNamespaces() != null &&
-!peerConfig.getExcludeNamespaces().isEmpty()) ||
-(peerConfig.getExcludeTableCFsMap() != null &&
-  !peerConfig.getExcludeTableCFsMap().isEmpty())) {
+  // If replicate_all flag is false, it means all user tables can't be 
replicated to peer
+  // cluster. Then allow to config namespaces or table-cfs which will be 
replicated to peer
+  // cluster.
+  if ((peerConfig.getExcludeNamespaces() != null
+  && !peerConfig.getExcludeNamespaces().isEmpty())
+  || (peerConfig.getExcludeTableCFsMap() != null
+  && !peerConfig.getExcludeTableCFsMap().isEmpty())) {
 throw new DoNotRetryIOException(
-"Need clean exclude-namespaces or exclude-table-cfs config 
firstly" 

[18/46] hbase git commit: HBASE-19525 RS side changes for moving peer modification from zk watcher to procedure

2018-02-04 Thread zhangduo
HBASE-19525 RS side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a46f2a9e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a46f2a9e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a46f2a9e

Branch: refs/heads/HBASE-19397-branch-2
Commit: a46f2a9eb64dfc878a01cc9851d145fdbb72eee0
Parents: d97e83a
Author: huzheng 
Authored: Wed Dec 20 10:47:18 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java |  11 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  13 +-
 .../hbase/replication/ReplicationListener.java  |  14 --
 .../hbase/replication/ReplicationPeer.java  |  28 ++-
 .../replication/ReplicationPeerZKImpl.java  | 186 ---
 .../replication/ReplicationPeersZKImpl.java |  19 +-
 .../replication/ReplicationTrackerZKImpl.java   |  73 +-
 .../regionserver/ReplicationSourceService.java  |   6 +
 .../handler/RSProcedureHandler.java |   3 +
 .../replication/BaseReplicationEndpoint.java|   2 +-
 .../regionserver/PeerProcedureHandler.java  |  38 
 .../regionserver/PeerProcedureHandlerImpl.java  |  81 +++
 .../regionserver/RefreshPeerCallable.java   |  39 +++-
 .../replication/regionserver/Replication.java   |   9 +
 .../regionserver/ReplicationSource.java |   8 +-
 .../regionserver/ReplicationSourceManager.java  |  37 ++-
 .../TestReplicationAdminUsingProcedure.java | 226 +++
 .../replication/DummyModifyPeerProcedure.java   |  48 
 .../TestDummyModifyPeerProcedure.java   |  80 ---
 .../TestReplicationTrackerZKImpl.java   |  51 -
 .../TestReplicationSourceManager.java   |  32 ++-
 21 files changed, 532 insertions(+), 472 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a46f2a9e/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 9739254..f500088 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.InvalidProtocolBufferException;
@@ -203,7 +205,7 @@ public final class ProtobufUtil {
* byte array that is bytes.length plus {@link 
ProtobufMagic#PB_MAGIC}.length.
*/
   public static byte [] prependPBMagic(final byte [] bytes) {
-return Bytes.add(ProtobufMagic.PB_MAGIC, bytes);
+return Bytes.add(PB_MAGIC, bytes);
   }
 
   /**
@@ -228,10 +230,11 @@ public final class ProtobufUtil {
* @param bytes bytes to check
* @throws DeserializationException if we are missing the pb magic prefix
*/
-  public static void expectPBMagicPrefix(final byte [] bytes) throws 
DeserializationException {
+  public static void expectPBMagicPrefix(final byte[] bytes) throws 
DeserializationException {
 if (!isPBMagicPrefix(bytes)) {
-  throw new DeserializationException("Missing pb magic " +
-  Bytes.toString(ProtobufMagic.PB_MAGIC) + " prefix");
+  String bytesPrefix = bytes == null ? "null" : 
Bytes.toStringBinary(bytes, 0, PB_MAGIC.length);
+  throw new DeserializationException(
+  "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " 
+ bytesPrefix);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a46f2a9e/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index b26802f..5e6b3db 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.shaded.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -280,7 +282,7 @@ public final class ProtobufUtil {
* byte array that is bytes.length plus {@link 
ProtobufMagi

[24/46] hbase git commit: HBASE-19592 Add UTs to test retry on update zk failure

2018-02-04 Thread zhangduo
HBASE-19592 Add UTs to test retry on update zk failure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b27510a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b27510a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b27510a

Branch: refs/heads/HBASE-19397-branch-2
Commit: 3b27510ac387a2e1e5510dbf85748c8e21699d71
Parents: 068dcb4
Author: zhangduo 
Authored: Tue Dec 26 20:39:00 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../replication/ReplicationPeerManager.java |   5 +-
 .../TestReplicationProcedureRetry.java  | 200 +++
 2 files changed, 202 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b27510a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index b78cbce..f4ccce8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Used to add/remove a replication peer.
  */
 @InterfaceAudience.Private
-public final class ReplicationPeerManager {
+public class ReplicationPeerManager {
 
   private final ReplicationPeerStorage peerStorage;
 
@@ -61,8 +61,7 @@ public final class ReplicationPeerManager {
 
   private final ConcurrentMap peers;
 
-  private ReplicationPeerManager(ReplicationPeerStorage peerStorage,
-  ReplicationQueueStorage queueStorage,
+  ReplicationPeerManager(ReplicationPeerStorage peerStorage, 
ReplicationQueueStorage queueStorage,
   ConcurrentMap peers) {
 this.peerStorage = peerStorage;
 this.queueStorage = queueStorage;

http://git-wip-us.apache.org/repos/asf/hbase/blob/3b27510a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
new file mode 100644
index 000..ab35b46
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.invocation.InvocationOnMock;
+
+/**
+ * All the modification method will fail once in the test and should finally 
succeed.
+ */
+@Category({ Replica

[44/46] hbase git commit: HBASE-19636 All rs should already start work with the new peer change when replication peer procedure is finished

2018-02-04 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/6c947a3b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 91ed98c..476fea1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import java.io.IOException;
@@ -33,7 +31,7 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.RejectedExecutionException;
@@ -70,27 +68,53 @@ import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
 import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
- * This class is responsible to manage all the replication
- * sources. There are two classes of sources:
+ * This class is responsible to manage all the replication sources. There are 
two classes of
+ * sources:
  * 
- *  Normal sources are persistent and one per peer cluster
- *  Old sources are recovered from a failed region server and our
- * only goal is to finish replicating the WAL queue it had up in ZK
+ * Normal sources are persistent and one per peer cluster
+ * Old sources are recovered from a failed region server and our only goal 
is to finish
+ * replicating the WAL queue it had
+ * 
+ * 
+ * When a region server dies, this class uses a watcher to get notified and it 
tries to grab a lock
+ * in order to transfer all the queues in a local old source.
+ * 
+ * Synchronization specification:
+ * 
+ * No need synchronized on {@link #sources}. {@link #sources} is a 
ConcurrentHashMap and there
+ * is a Lock for peer id in {@link PeerProcedureHandlerImpl}. So there is no 
race for peer
+ * operations.
+ * Need synchronized on {@link #walsById}. There are four methods which 
modify it,
+ * {@link #addPeer(String)}, {@link #removePeer(String)},
+ * {@link #cleanOldLogs(SortedSet, String, String)} and {@link 
#preLogRoll(Path)}. {@link #walsById}
+ * is a ConcurrentHashMap and there is a Lock for peer id in {@link 
PeerProcedureHandlerImpl}. So
+ * there is no race between {@link #addPeer(String)} and {@link 
#removePeer(String)}.
+ * {@link #cleanOldLogs(SortedSet, String, String)} is called by {@link 
ReplicationSourceInterface}.
+ * So no race with {@link #addPeer(String)}. {@link #removePeer(String)} will 
terminate the
+ * {@link ReplicationSourceInterface} firstly, then remove the wals from 
{@link #walsById}. So no
+ * race with {@link #removePeer(String)}. The only case need synchronized is
+ * {@link #cleanOldLogs(SortedSet, String, String)} and {@link 
#preLogRoll(Path)}.
+ * No need synchronized on {@link #walsByIdRecoveredQueues}. There are 
three methods which
+ * modify it, {@link #removePeer(String)} , {@link #cleanOldLogs(SortedSet, 
String, String)} and
+ * {@link ReplicationSourceManager.NodeFailoverWorker#run()}.
+ * {@link #cleanOldLogs(SortedSet, String, String)} is called by {@link 
ReplicationSourceInterface}.
+ * {@link #removePeer(String)} will terminate the {@link 
ReplicationSourceInterface} firstly, then
+ * remove the wals from {@link #walsByIdRecoveredQueues}. And
+ * {@link ReplicationSourceManager.NodeFailoverWorker#run()} will add the wals 
to
+ * {@link #walsByIdRecoveredQueues} firstly, then start up a {@link 
ReplicationSourceInterface}. So
+ * there is no race here. For {@link 
ReplicationSourceManager.NodeFailoverWorker#run()} and
+ * {@link #removePeer(String)}, there is already synchronized on {@link 
#oldsources}. So no need
+ * synchronized on {@link #walsByIdRecoveredQueues}.
+ * Need synchronized on {@link #latestPaths} to avoid the new open source 
miss new log.
+ * Need synchronized on {@link #oldsources} to avoid adding recovered 
source for the
+ * to-be-removed peer.
  * 
- *
- * When a region server dies, this class uses a watcher to get notified and it
- * tries to grab a lock in order to transfer all the queues in a local
- * old source

[39/46] hbase git commit: HBASE-19748 TestRegionReplicaFailover and TestRegionReplicaReplicationEndpoint UT hangs

2018-02-04 Thread zhangduo
HBASE-19748 TestRegionReplicaFailover and TestRegionReplicaReplicationEndpoint 
UT hangs


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d5ddc7eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d5ddc7eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d5ddc7eb

Branch: refs/heads/HBASE-19397-branch-2
Commit: d5ddc7eb1959917cb16d3b374c2dd77764dfcac2
Parents: 6fb48ee
Author: huzheng 
Authored: Wed Jan 10 15:00:30 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:43:05 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/master/HMaster.java   | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d5ddc7eb/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 7dd6788..cb56764 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -40,7 +40,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Objects;
-import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
@@ -72,6 +71,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.PleaseHoldException;
+import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
@@ -3384,13 +3384,12 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   cpHost.preGetReplicationPeerConfig(peerId);
 }
 LOG.info(getClientIdAuditPrefix() + " get replication peer config, id=" + 
peerId);
-Optional peerConfig =
-  this.replicationPeerManager.getPeerConfig(peerId);
-
+ReplicationPeerConfig peerConfig = 
this.replicationPeerManager.getPeerConfig(peerId)
+.orElseThrow(() -> new ReplicationPeerNotFoundException(peerId));
 if (cpHost != null) {
   cpHost.postGetReplicationPeerConfig(peerId);
 }
-return peerConfig.orElse(null);
+return peerConfig;
   }
 
   @Override



[25/46] hbase git commit: HBASE-19216 Implement a general framework to execute remote procedure on RS

2018-02-04 Thread zhangduo
HBASE-19216 Implement a general framework to execute remote procedure on RS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e2a72ae9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e2a72ae9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e2a72ae9

Branch: refs/heads/HBASE-19397-branch-2
Commit: e2a72ae9188a03c838d556f17140dcf2e64802d3
Parents: 3b603d2
Author: zhangduo 
Authored: Fri Dec 15 21:06:44 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../hbase/procedure2/LockedResourceType.java|   4 +-
 .../procedure2/RemoteProcedureDispatcher.java   |  23 +-
 .../src/main/protobuf/Admin.proto   |   9 +-
 .../src/main/protobuf/MasterProcedure.proto |  30 +++
 .../src/main/protobuf/RegionServerStatus.proto  |  15 ++
 .../apache/hadoop/hbase/executor/EventType.java |  26 ++-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  30 ++-
 .../hadoop/hbase/master/MasterRpcServices.java  |  13 ++
 .../assignment/RegionTransitionProcedure.java   |  18 +-
 .../procedure/MasterProcedureScheduler.java | 224 +--
 .../procedure/PeerProcedureInterface.java   |  34 +++
 .../master/procedure/RSProcedureDispatcher.java | 101 +
 .../master/replication/ModifyPeerProcedure.java | 127 +++
 .../master/replication/RefreshPeerCallable.java |  67 ++
 .../replication/RefreshPeerProcedure.java   | 197 
 .../hbase/procedure2/RSProcedureCallable.java   |  43 
 .../hbase/regionserver/HRegionServer.java   |  61 +
 .../hbase/regionserver/RSRpcServices.java   |  58 +++--
 .../handler/RSProcedureHandler.java |  51 +
 .../assignment/TestAssignmentManager.java   |  20 +-
 .../replication/DummyModifyPeerProcedure.java   |  41 
 .../TestDummyModifyPeerProcedure.java   |  80 +++
 .../security/access/TestAccessController.java   |   1 +
 24 files changed, 1107 insertions(+), 169 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e2a72ae9/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
index c5fe62b..dc9b5d4 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,5 +22,5 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public enum LockedResourceType {
-  SERVER, NAMESPACE, TABLE, REGION
+  SERVER, NAMESPACE, TABLE, REGION, PEER
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e2a72ae9/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 861e3b2..dca0bec 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -226,13 +226,30 @@ public abstract class RemoteProcedureDispatcher
-   * @param 
*/
   public interface RemoteProcedure {
+/**
+ * For building the remote operation.
+ */
 RemoteOperation remoteCallBuild(TEnv env, TRemote remote);
-void remoteCallCompleted(TEnv env, TRemote remote, RemoteOperation 
response);
+
+/**
+ * Called when the executeProcedure call is failed.
+ */
 void remoteCallFailed(TEnv env, TRemote remote, IOException exception);
+
+/**
+ * Called when RS tells the remote procedure is succeeded through the
+ * {@code reportProcedureDone} method.
+ */
+void remoteOperationCompleted(TEnv env);
+
+/**
+ * Called when RS tells the remote procedure is failed through the {@code 
reportProcedureDone}
+ * method.
+ * @param error the error message
+ */
+void remoteOperationFailed(TEnv env, String error);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/e2a72ae9/hbase-protocol-shaded/src/main/protobuf/Admin.prot

[31/46] hbase git commit: HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface

2018-02-04 Thread zhangduo
HBASE-19622 Reimplement ReplicationPeers with the new replication storage 
interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3fc2f857
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3fc2f857
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3fc2f857

Branch: refs/heads/HBASE-19397-branch-2
Commit: 3fc2f85765ef145614fff15c85949fed8e584536
Parents: ab52775
Author: huzheng 
Authored: Tue Dec 26 16:46:10 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:41:30 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  10 +-
 .../replication/VerifyReplication.java  |   9 +-
 .../hbase/replication/ReplicationFactory.java   |  10 +-
 .../hbase/replication/ReplicationPeerImpl.java  |  60 +-
 .../replication/ReplicationPeerStorage.java |   3 +-
 .../hbase/replication/ReplicationPeers.java | 238 
 .../replication/ReplicationPeersZKImpl.java | 552 ---
 .../replication/ZKReplicationPeerStorage.java   |  12 +-
 .../replication/ZKReplicationStorageBase.java   |   3 +-
 .../replication/TestReplicationStateBasic.java  | 125 ++---
 .../replication/TestReplicationStateZKImpl.java |   2 +-
 .../TestZKReplicationPeerStorage.java   |  12 +-
 .../cleaner/ReplicationZKNodeCleaner.java   |  57 +-
 .../replication/ReplicationPeerManager.java |   6 +-
 .../regionserver/DumpReplicationQueues.java |   2 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  49 +-
 .../replication/regionserver/Replication.java   |   2 +-
 .../regionserver/ReplicationSource.java |   7 +-
 .../regionserver/ReplicationSourceManager.java  |  45 +-
 .../cleaner/TestReplicationHFileCleaner.java|   7 +-
 .../replication/TestMultiSlaveReplication.java  |   2 -
 .../TestReplicationTrackerZKImpl.java   |  36 +-
 .../TestReplicationSourceManager.java   |  17 +-
 .../hadoop/hbase/HBaseZKTestingUtility.java |   3 +-
 24 files changed, 308 insertions(+), 961 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3fc2f857/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 022bf64..a234a9b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -247,22 +247,22 @@ public final class ReplicationPeerConfigUtil {
   public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes)
   throws DeserializationException {
 if (ProtobufUtil.isPBMagicPrefix(bytes)) {
-  int pblen = ProtobufUtil.lengthOfPBMagic();
+  int pbLen = ProtobufUtil.lengthOfPBMagic();
   ReplicationProtos.ReplicationPeer.Builder builder =
   ReplicationProtos.ReplicationPeer.newBuilder();
   ReplicationProtos.ReplicationPeer peer;
   try {
-ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
+ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen);
 peer = builder.build();
   } catch (IOException e) {
 throw new DeserializationException(e);
   }
   return convert(peer);
 } else {
-  if (bytes.length > 0) {
-return 
ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build();
+  if (bytes == null || bytes.length <= 0) {
+throw new DeserializationException("Bytes to deserialize should not be 
empty.");
   }
-  return ReplicationPeerConfig.newBuilder().setClusterKey("").build();
+  return 
ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3fc2f857/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 09d4b4b..f0070f0 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -339,15 +339,10 @@ public class VerifyReplication extends Configured 
implements Tool {
 @Override public boolean

[10/46] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2018-02-04 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/9e8400fd/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 6e27a21..d8f9625 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -21,13 +21,13 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileStatus;
@@ -48,17 +48,18 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
+import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.AtomicLongMap;
 
 /**
@@ -303,57 +304,53 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
   }
 
   public String dumpQueues(ClusterConnection connection, ZKWatcher zkw, 
Set peerIds,
-   boolean hdfs) throws Exception {
-ReplicationQueuesClient queuesClient;
+  boolean hdfs) throws Exception {
+ReplicationQueueStorage queueStorage;
 ReplicationPeers replicationPeers;
 ReplicationQueues replicationQueues;
 ReplicationTracker replicationTracker;
-ReplicationQueuesClientArguments replicationArgs =
-new ReplicationQueuesClientArguments(getConf(), new 
WarnOnlyAbortable(), zkw);
+ReplicationQueuesArguments replicationArgs =
+new ReplicationQueuesArguments(getConf(), new WarnOnlyAbortable(), 
zkw);
 StringBuilder sb = new StringBuilder();
 
-queuesClient = 
ReplicationFactory.getReplicationQueuesClient(replicationArgs);
-queuesClient.init();
+queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, 
getConf());
 replicationQueues = 
ReplicationFactory.getReplicationQueues(replicationArgs);
-replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), 
queuesClient, connection);
+replicationPeers =
+ReplicationFactory.getReplicationPeers(zkw, getConf(), queueStorage, 
connection);
 replicationTracker = ReplicationFactory.getReplicationTracker(zkw, 
replicationPeers, getConf(),
   new WarnOnlyAbortable(), new WarnOnlyStoppable());
-List liveRegionServers = 
replicationTracker.getListOfRegionServers();
+Set liveRegionServers = new 
HashSet<>(replicationTracker.getListOfRegionServers());
 
 // Loops each peer on each RS and dumps the queues
-try {
-  List regionservers = queuesClient.getListOfReplicators();
-  if (regionservers == null || regionservers.isEmpty()) {
-return sb.toString();
+List regionservers = queueStorage.getListOfReplicators();
+if (regionservers == null || regionservers.isEmpty()) {
+  return sb.toString();
+}
+for (ServerName regionserver : regionservers) {
+  List queueIds = queueStorage.getAllQueues(regionserver);
+  replicationQueues.init(regionserver.getServerName());
+  if (!liveRegionServers.contains(regionserver.getServerName())) {
+deadRegionServers.add(regionserver.getServerName());
   }
-  for (String regionserver : regionservers) {
-List queueIds = queuesClient.getAllQueues(regionserver);
-replicationQueues.init(regionserver);
-if (!liveRegionServers.contains(regionserver)) {
-  deadRegionServers.add(regionserver);
-}
-for (String 

[08/46] hbase git commit: HBASE-19726 Failed to start HMaster due to infinite retrying on meta assign; ADDENDUM Fix failing TestMetaWithReplicas#testShutdownHandling; it was reading meta TableState""

2018-02-04 Thread zhangduo
HBASE-19726 Failed to start HMaster due to infinite retrying on meta assign; 
ADDENDUM Fix failing TestMetaWithReplicas#testShutdownHandling; it was reading 
meta TableState""
Retry applying this addendum. Previous commit added it w/ wrong log
message so had to revert and then reapply here.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0ca7a2e9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0ca7a2e9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0ca7a2e9

Branch: refs/heads/HBASE-19397-branch-2
Commit: 0ca7a2e91629c5829144dff92cb1aaf9ebd6de4d
Parents: cd61060
Author: Michael Stack 
Authored: Sat Feb 3 21:49:42 2018 -0800
Committer: Michael Stack 
Committed: Sat Feb 3 21:49:42 2018 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0ca7a2e9/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index f80bbc0..5dc0565 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -1109,6 +1109,9 @@ public class MetaTableAccessor {
   @Nullable
   public static TableState getTableState(Connection conn, TableName tableName)
   throws IOException {
+if (tableName.equals(TableName.META_TABLE_NAME)) {
+  return new TableState(tableName, TableState.State.ENABLED);
+}
 Table metaHTable = getMetaHTable(conn);
 Get get = new Get(tableName.getName()).addColumn(getTableFamily(), 
getTableStateColumn());
 long time = EnvironmentEdgeManager.currentTime();



[07/46] hbase git commit: Revert "HBASE-19928 TestVisibilityLabelsOnNewVersionBehaviorTable fails; ADDENDUM Fix failing TestMetaWithReplicas#testShutdownHandling; it was reading meta TableState" Wrong

2018-02-04 Thread zhangduo
Revert "HBASE-19928 TestVisibilityLabelsOnNewVersionBehaviorTable fails; 
ADDENDUM Fix failing TestMetaWithReplicas#testShutdownHandling; it was reading 
meta TableState"
Wrong log message!

This reverts commit fbcb453ce2bbe4ffe0fa723b2ae05a7f91a6fc5d.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cd610607
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cd610607
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cd610607

Branch: refs/heads/HBASE-19397-branch-2
Commit: cd610607e76ebc8ae01a0efe30134bfa887c4679
Parents: fbcb453
Author: Michael Stack 
Authored: Sat Feb 3 21:48:39 2018 -0800
Committer: Michael Stack 
Committed: Sat Feb 3 21:48:39 2018 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java  | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd610607/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 5dc0565..f80bbc0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -1109,9 +1109,6 @@ public class MetaTableAccessor {
   @Nullable
   public static TableState getTableState(Connection conn, TableName tableName)
   throws IOException {
-if (tableName.equals(TableName.META_TABLE_NAME)) {
-  return new TableState(tableName, TableState.State.ENABLED);
-}
 Table metaHTable = getMetaHTable(conn);
 Get get = new Get(tableName.getName()).addColumn(getTableFamily(), 
getTableStateColumn());
 long time = EnvironmentEdgeManager.currentTime();



[15/46] hbase git commit: HBASE-19564 Procedure id is missing in the response of peer related operations

2018-02-04 Thread zhangduo
HBASE-19564 Procedure id is missing in the response of peer related operations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b734aef
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b734aef
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b734aef

Branch: refs/heads/HBASE-19397-branch-2
Commit: 3b734aefd790cfd178ff6fdd61b4616a6a8ada62
Parents: 762770b
Author: zhangduo 
Authored: Wed Dec 20 20:57:37 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../hadoop/hbase/master/MasterRpcServices.java  | 24 ++--
 .../master/replication/ModifyPeerProcedure.java |  4 +---
 2 files changed, 13 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b734aef/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 8025a51..72bf2d1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1886,10 +1886,10 @@ public class MasterRpcServices extends RSRpcServices
   public AddReplicationPeerResponse addReplicationPeer(RpcController 
controller,
   AddReplicationPeerRequest request) throws ServiceException {
 try {
-  master.addReplicationPeer(request.getPeerId(),
-ReplicationPeerConfigUtil.convert(request.getPeerConfig()), 
request.getPeerState()
-.getState().equals(ReplicationState.State.ENABLED));
-  return AddReplicationPeerResponse.newBuilder().build();
+  long procId = master.addReplicationPeer(request.getPeerId(),
+ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
+
request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
+  return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1899,8 +1899,8 @@ public class MasterRpcServices extends RSRpcServices
   public RemoveReplicationPeerResponse removeReplicationPeer(RpcController 
controller,
   RemoveReplicationPeerRequest request) throws ServiceException {
 try {
-  master.removeReplicationPeer(request.getPeerId());
-  return RemoveReplicationPeerResponse.newBuilder().build();
+  long procId = master.removeReplicationPeer(request.getPeerId());
+  return 
RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1910,8 +1910,8 @@ public class MasterRpcServices extends RSRpcServices
   public EnableReplicationPeerResponse enableReplicationPeer(RpcController 
controller,
   EnableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.enableReplicationPeer(request.getPeerId());
-  return EnableReplicationPeerResponse.newBuilder().build();
+  long procId = master.enableReplicationPeer(request.getPeerId());
+  return 
EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1921,8 +1921,8 @@ public class MasterRpcServices extends RSRpcServices
   public DisableReplicationPeerResponse disableReplicationPeer(RpcController 
controller,
   DisableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.disableReplicationPeer(request.getPeerId());
-  return DisableReplicationPeerResponse.newBuilder().build();
+  long procId = master.disableReplicationPeer(request.getPeerId());
+  return 
DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1948,9 +1948,9 @@ public class MasterRpcServices extends RSRpcServices
   public UpdateReplicationPeerConfigResponse 
updateReplicationPeerConfig(RpcController controller,
   UpdateReplicationPeerConfigRequest request) throws ServiceException {
 try {
-  master.updateReplicationPeerConfig(request.getPeerId(),
+  long procId = master.updateReplicationPeerConfig(request.getPeerId(),
 ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
-  return UpdateReplicationPeerConfigResponse.newBuilder().build();
+  return 
UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   thro

[30/46] hbase git commit: HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface

2018-02-04 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/3fc2f857/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 0214241..12a806b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -166,7 +166,6 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 this.clusterId = clusterId;
 this.walFileLengthProvider = walFileLengthProvider;
 this.replicationTracker.registerListener(this);
-this.replicationPeers.getAllPeerIds();
 // It's preferable to failover 1 RS at a time, but with good zk servers
 // more could be processed at the same time.
 int nbWorkers = conf.getInt("replication.executor.workers", 1);
@@ -270,8 +269,8 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 }
 List otherRegionServers = 
replicationTracker.getListOfRegionServers().stream()
 .map(ServerName::valueOf).collect(Collectors.toList());
-LOG.info(
-  "Current list of replicators: " + currentReplicators + " other RSs: " + 
otherRegionServers);
+LOG.info("Current list of replicators: " + currentReplicators + " other 
RSs: "
++ otherRegionServers);
 
 // Look if there's anything to process after a restart
 for (ServerName rs : currentReplicators) {
@@ -288,7 +287,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
* The returned future is for adoptAbandonedQueues task.
*/
   Future init() throws IOException, ReplicationException {
-for (String id : this.replicationPeers.getConnectedPeerIds()) {
+for (String id : this.replicationPeers.getAllPeerIds()) {
   addSource(id);
   if (replicationForBulkLoadDataEnabled) {
 // Check if peer exists in hfile-refs queue, if not add it. This can 
happen in the case
@@ -307,8 +306,8 @@ public class ReplicationSourceManager implements 
ReplicationListener {
*/
   @VisibleForTesting
   ReplicationSourceInterface addSource(String id) throws IOException, 
ReplicationException {
-ReplicationPeerConfig peerConfig = 
replicationPeers.getReplicationPeerConfig(id);
-ReplicationPeer peer = replicationPeers.getConnectedPeer(id);
+ReplicationPeerConfig peerConfig = replicationPeers.getPeerConfig(id);
+ReplicationPeer peer = replicationPeers.getPeer(id);
 ReplicationSourceInterface src = getReplicationSource(id, peerConfig, 
peer);
 synchronized (this.walsById) {
   this.sources.add(src);
@@ -354,7 +353,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   public void deleteSource(String peerId, boolean closeConnection) {
 abortWhenFail(() -> this.queueStorage.removeQueue(server.getServerName(), 
peerId));
 if (closeConnection) {
-  this.replicationPeers.peerDisconnected(peerId);
+  this.replicationPeers.removePeer(peerId);
 }
   }
 
@@ -447,12 +446,12 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 // update replication queues on ZK
 // synchronize on replicationPeers to avoid adding source for the 
to-be-removed peer
 synchronized (replicationPeers) {
-  for (String id : replicationPeers.getConnectedPeerIds()) {
+  for (String id : replicationPeers.getAllPeerIds()) {
 try {
   this.queueStorage.addWAL(server.getServerName(), id, logName);
 } catch (ReplicationException e) {
-  throw new IOException("Cannot add log to replication queue" +
-" when creating a new source, queueId=" + id + ", filename=" + 
logName, e);
+  throw new IOException("Cannot add log to replication queue"
+  + " when creating a new source, queueId=" + id + ", filename=" + 
logName, e);
 }
   }
 }
@@ -597,7 +596,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 
   public void addPeer(String id) throws ReplicationException, IOException {
 LOG.info("Trying to add peer, peerId: " + id);
-boolean added = this.replicationPeers.peerConnected(id);
+boolean added = this.replicationPeers.addPeer(id);
 if (added) {
   LOG.info("Peer " + id + " connected success, trying to start the 
replication source thread.");
   addSource(id);
@@ -733,19 +732,25 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   // there is not an actual peer defined corresponding to peerId for 
the failover.
   ReplicationQueueInfo replicationQueueInfo = new 
ReplicationQueueInfo(peerId);

[27/46] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly

2018-02-04 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/c85716fc/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 0460280..0214241 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -34,18 +34,21 @@ import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
@@ -59,7 +62,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -95,7 +98,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   private final List sources;
   // List of all the sources we got from died RSs
   private final List oldsources;
-  private final ReplicationQueues replicationQueues;
+  private final ReplicationQueueStorage queueStorage;
   private final ReplicationTracker replicationTracker;
   private final ReplicationPeers replicationPeers;
   // UUID for this cluster
@@ -130,7 +133,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 
   /**
* Creates a replication manager and sets the watch on all the other 
registered region servers
-   * @param replicationQueues the interface for manipulating replication queues
+   * @param queueStorage the interface for manipulating replication queues
* @param replicationPeers
* @param replicationTracker
* @param conf the configuration to use
@@ -140,14 +143,14 @@ public class ReplicationSourceManager implements 
ReplicationListener {
* @param oldLogDir the directory where old logs are archived
* @param clusterId
*/
-  public ReplicationSourceManager(ReplicationQueues replicationQueues,
+  public ReplicationSourceManager(ReplicationQueueStorage queueStorage,
   ReplicationPeers replicationPeers, ReplicationTracker 
replicationTracker, Configuration conf,
   Server server, FileSystem fs, Path logDir, Path oldLogDir, UUID 
clusterId,
   WALFileLengthProvider walFileLengthProvider) throws IOException {
 //CopyOnWriteArrayList is thread-safe.
 //Generally, reading is more than modifying.
 this.sources = new CopyOnWriteArrayList<>();
-this.replicationQueues = replicationQueues;
+this.queueStorage = queueStorage;
 this.replicationPeers = replicationPeers;
 this.replicationTracker = replicationTracker;
 this.server = server;
@@ -184,6 +187,19 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 connection = ConnectionFactory.createConnection(conf);
   }
 
+  @FunctionalInterface
+  private interface ReplicationQueueOperation {
+void exec() throws ReplicationException;
+  }
+
+  private void abortWhenFail(ReplicationQueueOperation op) {
+try {
+  op.exec();
+} catch (ReplicationException e) {
+  server.abort("Failed to operate on replication queue", e);
+}
+  }
+
   /**
* Provide the id of the peer and a log key and this method will figure which
* wal it belongs to and will log, for this region server, the current
@@ -195,12 +211,13 @@ public class ReplicationSourceManager implements 
ReplicationListener {
* @param queueRecovered indicates if this queue comes from another region 
server
* @param holdLogInZK if true then the

[45/46] hbase git commit: HBASE-19636 All rs should already start work with the new peer change when replication peer procedure is finished

2018-02-04 Thread zhangduo
HBASE-19636 All rs should already start work with the new peer change when 
replication peer procedure is finished

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c947a3b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c947a3b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c947a3b

Branch: refs/heads/HBASE-19397-branch-2
Commit: 6c947a3baade25c5875bab0cd27639c1aa4859bc
Parents: 0381e83
Author: Guanghao Zhang 
Authored: Thu Jan 4 16:58:01 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:43:05 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |   1 -
 .../hbase/replication/ReplicationPeerImpl.java  |   4 +-
 .../hbase/replication/ReplicationQueueInfo.java |  23 +-
 .../hbase/replication/ReplicationUtils.java |  56 ++
 .../replication/TestReplicationStateZKImpl.java |  21 -
 .../regionserver/ReplicationSourceService.java  |   3 +-
 .../regionserver/PeerProcedureHandler.java  |   3 +
 .../regionserver/PeerProcedureHandlerImpl.java  |  50 +-
 .../RecoveredReplicationSource.java |   6 +-
 .../RecoveredReplicationSourceShipper.java  |   8 +-
 .../replication/regionserver/Replication.java   |  11 +-
 .../regionserver/ReplicationSource.java |  34 +-
 .../regionserver/ReplicationSourceFactory.java  |   4 +-
 .../ReplicationSourceInterface.java |   8 +-
 .../regionserver/ReplicationSourceManager.java  | 895 ++-
 .../regionserver/ReplicationSourceShipper.java  |   6 +-
 .../ReplicationSourceWALReader.java |   2 +-
 .../replication/ReplicationSourceDummy.java |   2 +-
 .../replication/TestNamespaceReplication.java   |  57 +-
 .../TestReplicationSourceManager.java   |   5 +-
 20 files changed, 654 insertions(+), 545 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c947a3b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index fdae288..bf8d030 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -25,7 +25,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c947a3b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index 3e17025..604e0bb 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,6 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class ReplicationPeerImpl implements ReplicationPeer {
+
   private final Configuration conf;
 
   private final String id;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c947a3b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
index ecd888f..cd65f9b 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
@@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.ServerName;
 
 /**
- * This class is responsible for the parsing logic for a znode representing a 
queue.
+ * This class is responsible for the parsing logic for a queue id representing 
a queue.
  * It will extract the peerId if it's recovere

[12/46] hbase git commit: HBASE-19520 Add UTs for the new lock type PEER

2018-02-04 Thread zhangduo
HBASE-19520 Add UTs for the new lock type PEER

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12678c5d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12678c5d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12678c5d

Branch: refs/heads/HBASE-19397-branch-2
Commit: 12678c5d011b4e0cf0d6aa1098531c806d18b6c4
Parents: 3b734ae
Author: Guanghao Zhang 
Authored: Wed Dec 20 16:43:38 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../procedure/MasterProcedureScheduler.java |   9 +-
 .../procedure/TestMasterProcedureScheduler.java |  65 -
 ...TestMasterProcedureSchedulerConcurrency.java | 135 +++
 3 files changed, 201 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/12678c5d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 8ff2d12..a25217c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -389,6 +389,13 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 while (tableIter.hasNext()) {
   count += tableIter.next().size();
 }
+
+// Peer queues
+final AvlTreeIterator peerIter = new AvlTreeIterator<>(peerMap);
+while (peerIter.hasNext()) {
+  count += peerIter.next().size();
+}
+
 return count;
   }
 
@@ -1041,7 +1048,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
* @see #wakePeerExclusiveLock(Procedure, String)
* @param procedure the procedure trying to acquire the lock
* @param peerId peer to lock
-   * @return true if the procedure has to wait for the per to be available
+   * @return true if the procedure has to wait for the peer to be available
*/
   public boolean waitPeerExclusiveLock(Procedure procedure, String peerId) {
 schedLock();

http://git-wip-us.apache.org/repos/asf/hbase/blob/12678c5d/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 29d9489..05bb637 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -911,6 +911,27 @@ public class TestMasterProcedureScheduler {
 }
   }
 
+  public static class TestPeerProcedure extends TestProcedure implements 
PeerProcedureInterface {
+private final String peerId;
+private final PeerOperationType opType;
+
+public TestPeerProcedure(long procId, String peerId, PeerOperationType 
opType) {
+  super(procId);
+  this.peerId = peerId;
+  this.opType = opType;
+}
+
+@Override
+public String getPeerId() {
+  return peerId;
+}
+
+@Override
+public PeerOperationType getPeerOperationType() {
+  return opType;
+}
+  }
+
   private static LockProcedure createLockProcedure(LockType lockType, long 
procId) throws Exception {
 LockProcedure procedure = new LockProcedure();
 
@@ -933,22 +954,19 @@ public class TestMasterProcedureScheduler {
 return createLockProcedure(LockType.SHARED, procId);
   }
 
-  private static void assertLockResource(LockedResource resource,
-  LockedResourceType resourceType, String resourceName)
-  {
+  private static void assertLockResource(LockedResource resource, 
LockedResourceType resourceType,
+  String resourceName) {
 assertEquals(resourceType, resource.getResourceType());
 assertEquals(resourceName, resource.getResourceName());
   }
 
-  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure)
-  {
+  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure) {
 assertEquals(LockType.EXCLUSIVE, resource.getLockType());
 assertEquals(procedure, resource.getExclusiveLockOwnerProcedure());
 assertEquals(0, resource.getSharedLockCount());
   }
 
-  private static void assertSharedLock(LockedResource resource, int lockCount)
-

[29/46] hbase git commit: HBASE-19635 Introduce a thread at RS side to call reportProcedureDone

2018-02-04 Thread zhangduo
HBASE-19635 Introduce a thread at RS side to call reportProcedureDone


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ab527758
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ab527758
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ab527758

Branch: refs/heads/HBASE-19397-branch-2
Commit: ab5277582d362f464ed42fd01a69a8df16b95a47
Parents: c85716f
Author: zhangduo 
Authored: Wed Dec 27 20:13:42 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:41:30 2018 +0800

--
 .../src/main/protobuf/RegionServerStatus.proto  |   5 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  15 ++-
 .../hbase/regionserver/HRegionServer.java   |  72 
 .../RemoteProcedureResultReporter.java  | 111 +++
 .../handler/RSProcedureHandler.java |   2 +-
 5 files changed, 149 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ab527758/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 4f75941..3f836cd 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -146,7 +146,7 @@ message RegionSpaceUseReportRequest {
 message RegionSpaceUseReportResponse {
 }
 
-message ReportProcedureDoneRequest {
+message RemoteProcedureResult {
   required uint64 proc_id = 1;
   enum Status {
 SUCCESS = 1;
@@ -155,6 +155,9 @@ message ReportProcedureDoneRequest {
   required Status status = 2;
   optional ForeignExceptionMessage error = 3;
 }
+message ReportProcedureDoneRequest {
+  repeated RemoteProcedureResult result = 1;
+}
 
 message ReportProcedureDoneResponse {
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab527758/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 72bf2d1..377a9c6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -265,6 +265,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
@@ -2254,12 +2255,14 @@ public class MasterRpcServices extends RSRpcServices
   @Override
   public ReportProcedureDoneResponse reportProcedureDone(RpcController 
controller,
   ReportProcedureDoneRequest request) throws ServiceException {
-if (request.getStatus() == ReportProcedureDoneRequest.Status.SUCCESS) {
-  master.remoteProcedureCompleted(request.getProcId());
-} else {
-  master.remoteProcedureFailed(request.getProcId(),
-RemoteProcedureException.fromProto(request.getError()));
-}
+request.getResultList().forEach(result -> {
+  if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) {
+master.remoteProcedureCompleted(result.getProcId());
+  } else {
+master.remoteProcedureFailed(result.getProcId(),
+  RemoteProcedureException.fromProto(result.getError()));
+  }
+});
 return ReportProcedureDoneResponse.getDefaultInstance();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ab527758/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 677b8b4..3a93c76 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regions

[41/46] hbase git commit: HBASE-19719 Fix checkstyle issues

2018-02-04 Thread zhangduo
HBASE-19719 Fix checkstyle issues


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6fb48eed
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6fb48eed
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6fb48eed

Branch: refs/heads/HBASE-19397-branch-2
Commit: 6fb48eeda3244317660395e186e8f91d9b2f3553
Parents: 088927d
Author: zhangduo 
Authored: Sat Jan 6 08:30:55 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:43:05 2018 +0800

--
 .../hbase/replication/ReplicationStorageFactory.java   |  2 +-
 .../master/assignment/RegionTransitionProcedure.java   |  4 ++--
 .../hbase/master/procedure/RSProcedureDispatcher.java  | 13 ++---
 .../master/ReplicationPeerConfigUpgrader.java  |  8 
 4 files changed, 13 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6fb48eed/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
index 60d0749..462cfed 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
@@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * For now we only have zk based implementation.
  */
 @InterfaceAudience.Private
-public class ReplicationStorageFactory {
+public final class ReplicationStorageFactory {
 
   private ReplicationStorageFactory() {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6fb48eed/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
index 1724a38..8277dbe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
@@ -36,11 +36,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 
-import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-
 /**
  * Base class for the Assign and Unassign Procedure.
  *

http://git-wip-us.apache.org/repos/asf/hbase/blob/6fb48eed/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index 57a4535..6c78914 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
@@ -36,6 +35,12 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
@@ -47,12 +52,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionR
 import 
org.apache.hadoop.hbase.shaded.protobuf

[16/46] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2018-02-04 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/12d321d4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
deleted file mode 100644
index b6f8784..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.replication;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.replication.BaseReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Manages and performs all replication admin operations.
- * 
- * Used to add/remove a replication peer.
- */
-@InterfaceAudience.Private
-public class ReplicationManager {
-  private final ReplicationQueuesClient replicationQueuesClient;
-  private final ReplicationPeers replicationPeers;
-
-  public ReplicationManager(Configuration conf, ZKWatcher zkw, Abortable 
abortable)
-  throws IOException {
-try {
-  this.replicationQueuesClient = ReplicationFactory
-  .getReplicationQueuesClient(new 
ReplicationQueuesClientArguments(conf, abortable, zkw));
-  this.replicationQueuesClient.init();
-  this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf,
-this.replicationQueuesClient, abortable);
-  this.replicationPeers.init();
-} catch (Exception e) {
-  throw new IOException("Failed to construct ReplicationManager", e);
-}
-  }
-
-  public void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig, boolean enabled)
-  throws ReplicationException {
-checkPeerConfig(peerConfig);
-replicationPeers.registerPeer(peerId, peerConfig, enabled);
-replicationPeers.peerConnected(peerId);
-  }
-
-  public void removeReplicationPeer(String peerId) throws ReplicationException 
{
-replicationPeers.peerDisconnected(peerId);
-replicationPeers.unregisterPeer(peerId);
-  }
-
-  public void enableReplicationPeer(String peerId) throws ReplicationException 
{
-this.replicationPeers.enablePeer(peerId);
-  }
-
-  public void disableReplicationPeer(String peerId) throws 
ReplicationException {
-this.replicationPeers.disablePeer(peerId);
-  }
-
-  public ReplicationPeerConfig getPeerConfig(String peerId)
-  throws ReplicationException, ReplicationPeerNotFoundException {
-ReplicationPeerConfig peerConfig = 
replicationPeers.getReplicationPeerConfig(peerId);
-if (peerConfig == null) {
-  throw new ReplicationPeerNotFoundException(peerId);
-}
-return peerConfig;
-  }
-
-  public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig)
-  throws ReplicationException, IOException {
-checkPeerConfig(peerConfig);
-this.replicationPeers.updatePeerConfig(peerId, peerConfig);
-  }
-
-  public List listReplicationPeers(Pattern pattern)
-  throws ReplicationException {
-List peers = new ArrayList<>();
-List peerIds = replicationPeers.getAllPeerIds();
-for (String peerId : peer

[04/46] hbase git commit: HBASE-19914 Refactor TestVisibilityLabelsOnNewVersionBehaviorTable

2018-02-04 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/cb138c2d/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java
index 0e69a3a..7a5232a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hbase.security.visibility;
 
-import static 
org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -27,17 +26,13 @@ import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
@@ -48,251 +43,64 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import 
org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.ClassRule;
-import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
-/**
- * Tests visibility labels with deletes
- */
-@Category({SecurityTests.class, MediumTests.class})
-public class TestVisibilityLabelsWithDeletes {
+@Category({ SecurityTests.class, LargeTests.class })
+public class TestVisibilityLabelsWithDeletes extends 
VisibilityLabelsWithDeletesTestBase {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-  HBaseClassTestRule.forClass(TestVisibilityLabelsWithDeletes.class);
-
-  private static final Logger LOG = 
LoggerFactory.getLogger(TestVisibilityLabelsWithDeletes.class);
-  private static final String TOPSECRET = "TOPSECRET";
-  private static final String PUBLIC = "PUBLIC";
-  private static final String PRIVATE = "PRIVATE";
-  private static final String CONFIDENTIAL = "CONFIDENTIAL";
-  private static final String SECRET = "SECRET";
-  public static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
-  private static final byte[] row1 = Bytes.toBytes("row1");
-  private static final byte[] row2 = Bytes.toBytes("row2");
-  protected final static byte[] fam = Bytes.toBytes("info");
-  protected final static byte[] qual = Bytes.toBytes("qual");
-  private final static byte[] qual1 = Bytes.toBytes("qual1");
-  private final static byte[] qual2 = Bytes.toBytes("qual2");
-  protected final static byte[] value = Bytes.toBytes("value");
-  private final static byte[] value1 = Bytes.toBytes("value1");
-  public static Configuration conf;
-
-  @Rule
-  public final TestName TEST_NAME = new TestName();
-  public static User SUPERUSER;
-
-  @BeforeClass
-  public static void setupBeforeClass() throws Exception {
-// setup configuration
-conf = TEST_UTIL.getConfiguration();
-VisibilityTestUtil.enableVisiblityLabels(conf);
-conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, 
SimpleScanLabelGenerator.class,
-ScanLabelGenerator.class);
-conf.set("hbase.superuser", "admin");
-TEST_UTIL.startMiniCluster(2);
-SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { 
"supergroup" });
-
-  

[38/46] hbase git commit: HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase

2018-02-04 Thread zhangduo
HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f4c9da40
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f4c9da40
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f4c9da40

Branch: refs/heads/HBASE-19397-branch-2
Commit: f4c9da403e500969f22b86357fac94c6d24ebe35
Parents: 9631c6a
Author: huzheng 
Authored: Fri Dec 29 15:55:28 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:42:08 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |   5 +-
 .../replication/ReplicationStateZKBase.java | 153 ---
 .../replication/ReplicationTrackerZKImpl.java   |  21 +--
 .../replication/ZKReplicationPeerStorage.java   |  24 ++-
 .../replication/ZKReplicationStorageBase.java   |  13 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../master/ReplicationPeerConfigUpgrader.java   | 128 
 .../regionserver/DumpReplicationQueues.java |  18 +--
 .../replication/regionserver/Replication.java   |   3 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   3 +-
 .../TestReplicationTrackerZKImpl.java   |   3 +-
 .../replication/master/TestTableCFsUpdater.java |  41 ++---
 .../TestReplicationSourceManager.java   |   6 +-
 13 files changed, 136 insertions(+), 286 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f4c9da40/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 6c66aff..2a970ba 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -33,9 +33,8 @@ public class ReplicationFactory {
 return new ReplicationPeers(zk, conf);
   }
 
-  public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,
-  final ReplicationPeers replicationPeers, Configuration conf, Abortable 
abortable,
+  public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, 
Abortable abortable,
   Stoppable stopper) {
-return new ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, 
abortable, stopper);
+return new ReplicationTrackerZKImpl(zookeeper, abortable, stopper);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f4c9da40/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
deleted file mode 100644
index f49537c..000
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-import org.apache.hadoop.hbase.zookeeper.ZKConfig;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.yetus.audi

[03/46] hbase git commit: HBASE-19914 Refactor TestVisibilityLabelsOnNewVersionBehaviorTable

2018-02-04 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/cb138c2d/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsWithDeletesTestBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsWithDeletesTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsWithDeletesTestBase.java
new file mode 100644
index 000..ee5d3cc
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsWithDeletesTestBase.java
@@ -0,0 +1,313 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.security.visibility;
+
+import static 
org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import 
org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+/**
+ * Tests visibility labels with deletes
+ */
+public abstract class VisibilityLabelsWithDeletesTestBase {
+
+  protected static final String TOPSECRET = "TOPSECRET";
+  protected static final String PUBLIC = "PUBLIC";
+  protected static final String PRIVATE = "PRIVATE";
+  protected static final String CONFIDENTIAL = "CONFIDENTIAL";
+  protected static final String SECRET = "SECRET";
+  protected static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+  protected static final byte[] row1 = Bytes.toBytes("row1");
+  protected static final byte[] row2 = Bytes.toBytes("row2");
+  protected final static byte[] fam = Bytes.toBytes("info");
+  protected final static byte[] qual = Bytes.toBytes("qual");
+  protected final static byte[] qual1 = Bytes.toBytes("qual1");
+  protected final static byte[] qual2 = Bytes.toBytes("qual2");
+  protected final static byte[] value = Bytes.toBytes("value");
+  protected final static byte[] value1 = Bytes.toBytes("value1");
+  protected static Configuration conf;
+
+  @Rule
+  public final TestName testName = new TestName();
+  protected static User SUPERUSER;
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+// setup configuration
+conf = TEST_UTIL.getConfiguration();
+VisibilityTestUtil.enableVisiblityLabels(conf);
+conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, 
SimpleScanLabelGenerator.class,
+  ScanLabelGenerator.class);
+conf.set("hbase.superuser", "admin");
+TEST_UTIL.startMiniCluster(2);
+SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { 
"supergroup" });
+
+// Wait for the labels table to become available
+TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 5);
+addLabels();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+TEST_UTIL.shutdownMiniCluster();
+  }
+
+  public static void addLabels() throws Exception {
+PrivilegedExceptionAction action =
+  new PrivilegedExceptionAction() {
+@Override
+public VisibilityLabelsResponse run() throws Exception {
+  String[] labels = { SECRE

[32/46] hbase git commit: HBASE-19697 Remove TestReplicationAdminUsingProcedure

2018-02-04 Thread zhangduo
HBASE-19697 Remove TestReplicationAdminUsingProcedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/41125526
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/41125526
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/41125526

Branch: refs/heads/HBASE-19397-branch-2
Commit: 4112552692f997ba7d1be0da5181395799e00375
Parents: f4c9da4
Author: zhangduo 
Authored: Wed Jan 3 21:13:57 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:42:08 2018 +0800

--
 .../TestReplicationAdminUsingProcedure.java | 225 ---
 1 file changed, 225 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/41125526/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
deleted file mode 100644
index 1300376..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client.replication;
-
-import java.io.IOException;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.TestReplicationBase;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.log4j.Logger;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
-
-@Category({ MediumTests.class, ClientTests.class })
-public class TestReplicationAdminUsingProcedure extends TestReplicationBase {
-
-  private static final String PEER_ID = "2";
-  private static final Logger LOG = 
Logger.getLogger(TestReplicationAdminUsingProcedure.class);
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-conf1.setInt("hbase.multihconnection.threads.max", 10);
-
-// Start the master & slave mini cluster.
-TestReplicationBase.setUpBeforeClass();
-
-// Remove the replication peer
-hbaseAdmin.removeReplicationPeer(PEER_ID);
-  }
-
-  private void loadData(int startRowKey, int endRowKey) throws IOException {
-for (int i = startRowKey; i < endRowKey; i++) {
-  byte[] rowKey = Bytes.add(row, Bytes.toBytes(i));
-  Put put = new Put(rowKey);
-  put.addColumn(famName, null, Bytes.toBytes(i));
-  htable1.put(put);
-}
-  }
-
-  private void waitForReplication(int expectedRows, int retries)
-  throws IOException, InterruptedException {
-Scan scan;
-for (int i = 0; i < retries; i++) {
-  scan = new Scan();
-  if (i == retries - 1) {
-throw new IOException("Waited too much time for normal batch 
replication");
-  }
-  try (ResultScanner scanner = htable2.getScanner(scan)) {
-int count = 0;
-for (Result res : scanner) {
-  count++;
-}
-if (count != expectedRows) {
-  LOG.info("Only got " + count + " rows,  expected rows: " + 
expectedRows);
-  Thread.sleep(SLEEP_TIME);
-} else {
-  return;
-}
-  }
-}
-  }
-
-  @Before
-  public void setUp() throws IOException {
-ReplicationPeerConfig rpc = new ReplicationPeerConfig();
- 

[05/46] hbase git commit: HBASE-19914 Refactor TestVisibilityLabelsOnNewVersionBehaviorTable

2018-02-04 Thread zhangduo
HBASE-19914 Refactor TestVisibilityLabelsOnNewVersionBehaviorTable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cb138c2d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cb138c2d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cb138c2d

Branch: refs/heads/HBASE-19397-branch-2
Commit: cb138c2da84079fc639676e465b97b53faa40764
Parents: a323357
Author: zhangduo 
Authored: Sun Feb 4 08:38:46 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 13:37:09 2018 +0800

--
 .../client/ColumnFamilyDescriptorBuilder.java   |5 +
 ...sibilityLabelsOnNewVersionBehaviorTable.java |   25 +-
 ...ibilityLabelsWithDefaultVisLabelService.java |4 +-
 .../TestVisibilityLabelsWithDeletes.java| 1478 +++---
 .../VisibilityLabelsWithDeletesTestBase.java|  313 
 5 files changed, 887 insertions(+), 938 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cb138c2d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index aaa460b..d7feea6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -552,6 +552,11 @@ public class ColumnFamilyDescriptorBuilder {
 return this;
   }
 
+  public ColumnFamilyDescriptorBuilder setNewVersionBehavior(final boolean 
value) {
+desc.setNewVersionBehavior(value);
+return this;
+  }
+
   public ColumnFamilyDescriptorBuilder setValue(final Bytes key, final Bytes 
value) {
 desc.setValue(key, value);
 return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb138c2d/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java
index d3177f9..4093ace 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOnNewVersionBehaviorTable.java
@@ -18,30 +18,33 @@
 package org.apache.hadoop.hbase.security.visibility;
 
 import java.io.IOException;
-
 import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.junit.ClassRule;
 import org.junit.experimental.categories.Category;
 
-@Category({SecurityTests.class, MediumTests.class})
-public class TestVisibilityLabelsOnNewVersionBehaviorTable extends 
TestVisibilityLabelsWithDeletes {
+
+@Category({ SecurityTests.class, MediumTests.class })
+public class TestVisibilityLabelsOnNewVersionBehaviorTable
+extends VisibilityLabelsWithDeletesTestBase {
+
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
   
HBaseClassTestRule.forClass(TestVisibilityLabelsOnNewVersionBehaviorTable.class);
 
   @Override
-  protected Table createTable(HColumnDescriptor fam) throws IOException {
-fam.setNewVersionBehavior(true);
-TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
-HTableDescriptor table = new HTableDescriptor(tableName);
-table.addFamily(fam);
-TEST_UTIL.getHBaseAdmin().createTable(table);
+  protected Table createTable(byte[] fam) throws IOException {
+TableName tableName = TableName.valueOf(testName.getMethodName());
+TEST_UTIL.getAdmin()
+.createTable(TableDescriptorBuilder.newBuilder(tableName)
+.addColumnFamily(
+  
ColumnFamilyDescriptorBuilder.newBuilder(fam).setNewVersionBehavior(true).build())
+.build());
 return TEST_UTIL.getConnection().getTable(tableName);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb138c2d/hbase-server/src/test/java/org/apache/hadoop/hbase/security/vi

[37/46] hbase git commit: HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker and remove ReplicationZKNodeCleanerChore

2018-02-04 Thread zhangduo
HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker 
and remove ReplicationZKNodeCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9631c6a3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9631c6a3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9631c6a3

Branch: refs/heads/HBASE-19397-branch-2
Commit: 9631c6a31d729580dff13632d0e00f53616aa6af
Parents: 2f5abfe
Author: zhangduo 
Authored: Wed Jan 3 09:39:44 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:42:08 2018 +0800

--
 .../replication/VerifyReplication.java  |   6 +-
 .../hbase/replication/ReplicationPeers.java |  26 +--
 .../hbase/replication/ReplicationUtils.java |  38 
 .../replication/TestReplicationStateBasic.java  |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  13 --
 .../cleaner/ReplicationZKNodeCleaner.java   | 192 ---
 .../cleaner/ReplicationZKNodeCleanerChore.java  |  54 --
 .../replication/ReplicationPeerManager.java |  18 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  11 +-
 .../hbase/util/hbck/ReplicationChecker.java | 109 +++
 .../cleaner/TestReplicationZKNodeCleaner.java   | 115 ---
 .../hbase/util/TestHBaseFsckReplication.java| 101 ++
 .../hadoop/hbase/util/hbck/HbckTestingUtil.java |   6 +-
 13 files changed, 226 insertions(+), 465 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9631c6a3/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index fe45762..fac4875 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -50,8 +50,8 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -345,10 +345,10 @@ public class VerifyReplication extends Configured 
implements Tool {
 }
   });
   ReplicationPeerStorage storage =
-  ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf);
+ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf);
   ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId);
   return Pair.newPair(peerConfig,
-ReplicationPeers.getPeerClusterConfiguration(peerConfig, conf));
+ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf));
 } catch (ReplicationException e) {
   throw new IOException("An error occurred while trying to connect to the 
remove peer cluster",
   e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/9631c6a3/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index 45940a5..fcbc350 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -17,14 +17,11 @@
  */
 package org.apache.hadoop.hbase.replication;
 
-import java.io.IOException;
 import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CompoundConfiguration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -106,25 +103,6 @@ public class ReplicationPeers {
 return Collections.unmodifiableSet(peerCache.keySet());
   }
 
-  public s

[36/46] hbase git commit: HBASE-19686 Use KeyLocker instead of ReentrantLock in PeerProcedureHandlerImpl

2018-02-04 Thread zhangduo
HBASE-19686 Use KeyLocker instead of ReentrantLock in PeerProcedureHandlerImpl


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/74fea408
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/74fea408
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/74fea408

Branch: refs/heads/HBASE-19397-branch-2
Commit: 74fea408bd22b9ee5f78e531d08de2e13a483d4a
Parents: 3be1397
Author: zhangduo 
Authored: Tue Jan 2 16:13:55 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:42:08 2018 +0800

--
 .../regionserver/PeerProcedureHandlerImpl.java  | 41 ++--
 1 file changed, 29 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/74fea408/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
index 1efe180..c09c6a0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
@@ -19,10 +19,10 @@
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import java.io.IOException;
-import java.util.concurrent.locks.ReentrantLock;
-
+import java.util.concurrent.locks.Lock;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -32,7 +32,7 @@ public class PeerProcedureHandlerImpl implements 
PeerProcedureHandler {
   private static final Logger LOG = 
LoggerFactory.getLogger(PeerProcedureHandlerImpl.class);
 
   private final ReplicationSourceManager replicationSourceManager;
-  private final ReentrantLock peersLock = new ReentrantLock();
+  private final KeyLocker peersLock = new KeyLocker<>();
 
   public PeerProcedureHandlerImpl(ReplicationSourceManager 
replicationSourceManager) {
 this.replicationSourceManager = replicationSourceManager;
@@ -40,40 +40,57 @@ public class PeerProcedureHandlerImpl implements 
PeerProcedureHandler {
 
   @Override
   public void addPeer(String peerId) throws ReplicationException, IOException {
-peersLock.lock();
+Lock peerLock = peersLock.acquireLock(peerId);
 try {
   replicationSourceManager.addPeer(peerId);
 } finally {
-  peersLock.unlock();
+  peerLock.unlock();
 }
   }
 
   @Override
   public void removePeer(String peerId) throws ReplicationException, 
IOException {
-peersLock.lock();
+Lock peerLock = peersLock.acquireLock(peerId);
 try {
   if (replicationSourceManager.getReplicationPeers().getPeer(peerId) != 
null) {
 replicationSourceManager.removePeer(peerId);
   }
 } finally {
-  peersLock.unlock();
+  peerLock.unlock();
 }
   }
 
   @Override
   public void disablePeer(String peerId) throws ReplicationException, 
IOException {
-PeerState newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
-LOG.info("disable replication peer, id: " + peerId + ", new state: " + 
newState);
+PeerState newState;
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
+} finally {
+  peerLock.unlock();
+}
+LOG.info("disable replication peer, id: {}, new state: {}", peerId, 
newState);
   }
 
   @Override
   public void enablePeer(String peerId) throws ReplicationException, 
IOException {
-PeerState newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
-LOG.info("enable replication peer, id: " + peerId + ", new state: " + 
newState);
+PeerState newState;
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
+} finally {
+  peerLock.unlock();
+}
+LOG.info("enable replication peer, id: {}, new state: {}", peerId, 
newState);
   }
 
   @Override
   public void updatePeerConfig(String peerId) throws ReplicationException, 
IOException {
-replicationSourceManager.getReplicationPeers().refreshPeerConfig(peerId);
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  replicationSourceManager.getReplicationPeers().refreshPeerConfig(peerId);
+} finally {
+  peerLock.unlock()

[22/46] hbase git commit: HBASE-19579 Add peer lock test for shell command list_locks

2018-02-04 Thread zhangduo
HBASE-19579 Add peer lock test for shell command list_locks

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/51c85097
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/51c85097
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/51c85097

Branch: refs/heads/HBASE-19397-branch-2
Commit: 51c850971683f48f24cfd494870c108ece8a260d
Parents: 9e8400f
Author: Guanghao Zhang 
Authored: Sat Dec 23 21:04:27 2017 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:39:29 2018 +0800

--
 .../src/main/protobuf/LockService.proto  |  1 +
 .../src/test/ruby/shell/list_locks_test.rb   | 19 +++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/51c85097/hbase-protocol-shaded/src/main/protobuf/LockService.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto 
b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index b8d180c..0675070 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -77,6 +77,7 @@ enum LockedResourceType {
   NAMESPACE = 2;
   TABLE = 3;
   REGION = 4;
+  PEER = 5;
 }
 
 message LockedResource {

http://git-wip-us.apache.org/repos/asf/hbase/blob/51c85097/hbase-shell/src/test/ruby/shell/list_locks_test.rb
--
diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb 
b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
index f465a6b..ef1c0ce 100644
--- a/hbase-shell/src/test/ruby/shell/list_locks_test.rb
+++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
@@ -67,6 +67,25 @@ module Hbase
 proc_id)
 end
 
+define_test 'list peer locks' do
+  lock = create_exclusive_lock(0)
+  peer_id = '1'
+
+  @scheduler.waitPeerExclusiveLock(lock, peer_id)
+  output = capture_stdout { @list_locks.command }
+  @scheduler.wakePeerExclusiveLock(lock, peer_id)
+
+  assert_equal(
+"PEER(1)\n" \
+"Lock type: EXCLUSIVE, procedure: {" \
+  
"\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \
+  "\"procId\"=>\"0\", \"submittedTime\"=>\"0\", 
\"state\"=>\"RUNNABLE\", " \
+  "\"lastUpdate\"=>\"0\", " \
+  "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", 
\"description\"=>\"description\"}]" \
+"}\n\n",
+output)
+end
+
 define_test 'list server locks' do
   lock = create_exclusive_lock(0)
 



[46/46] hbase git commit: HBASE-19707 Race in start and terminate of a replication source after we async start replicatione endpoint

2018-02-04 Thread zhangduo
HBASE-19707 Race in start and terminate of a replication source after we async 
start replicatione endpoint


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c5cbeca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c5cbeca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c5cbeca

Branch: refs/heads/HBASE-19397-branch-2
Commit: 1c5cbeca149eef9d9b1ef3a5894a74a06789c6d7
Parents: 6c947a3
Author: zhangduo 
Authored: Fri Jan 5 18:28:44 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:43:05 2018 +0800

--
 .../RecoveredReplicationSource.java |  16 +-
 .../regionserver/ReplicationSource.java | 203 ++-
 2 files changed, 116 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c5cbeca/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
index 1be9a88..3cae0f2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
@@ -68,7 +68,7 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   LOG.debug("Someone has beat us to start a worker thread for wal group " 
+ walGroupId);
 } else {
   LOG.debug("Starting up worker for wal group " + walGroupId);
-  worker.startup(getUncaughtExceptionHandler());
+  worker.startup(this::uncaughtException);
   worker.setWALReader(
 startNewWALReader(worker.getName(), walGroupId, queue, 
worker.getStartPosition()));
   workerThreads.put(walGroupId, worker);
@@ -76,13 +76,13 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   }
 
   @Override
-  protected ReplicationSourceWALReader startNewWALReader(String threadName,
-  String walGroupId, PriorityBlockingQueue queue, long 
startPosition) {
-ReplicationSourceWALReader walReader = new 
RecoveredReplicationSourceWALReader(fs,
-conf, queue, startPosition, walEntryFilter, this);
-Threads.setDaemonThreadRunning(walReader, threadName
-+ ".replicationSource.replicationWALReaderThread." + walGroupId + "," 
+ queueId,
-  getUncaughtExceptionHandler());
+  protected ReplicationSourceWALReader startNewWALReader(String threadName, 
String walGroupId,
+  PriorityBlockingQueue queue, long startPosition) {
+ReplicationSourceWALReader walReader =
+  new RecoveredReplicationSourceWALReader(fs, conf, queue, startPosition, 
walEntryFilter, this);
+Threads.setDaemonThreadRunning(walReader,
+  threadName + ".replicationSource.replicationWALReaderThread." + 
walGroupId + "," + queueId,
+  this::uncaughtException);
 return walReader;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1c5cbeca/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 0092251..09b6cc1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -75,7 +75,7 @@ import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
  * 
  */
 @InterfaceAudience.Private
-public class ReplicationSource extends Thread implements 
ReplicationSourceInterface {
+public class ReplicationSource implements ReplicationSourceInterface {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationSource.class);
   // Queues of logs to process, entry in format of walGroupId->queue,
@@ -114,10 +114,8 @@ public class ReplicationSource extends Thread implements 
ReplicationSourceInterf
   private MetricsSource metrics;
   // WARN threshold for the number of queued logs, defaults to 2
   private int logQueueWarnThreshold;
-  // whether the replication endpoint has been initialized
-  private volatile boolean endpointInitialized = false;
   // ReplicationEndpoint which will handle the actual replication
-  private ReplicationEndpoint replicationEndpoint;
+  private volatile ReplicationEndpoint replicationEndpoint;
   // A filter (or a 

[43/46] hbase git commit: HBASE-19783 Change replication peer cluster key/endpoint from a not-null value to null is not allowed

2018-02-04 Thread zhangduo
HBASE-19783 Change replication peer cluster key/endpoint from a not-null value 
to null is not allowed

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8bb3cf66
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8bb3cf66
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8bb3cf66

Branch: refs/heads/HBASE-19397-branch-2
Commit: 8bb3cf6688799beb587df4bff8fa40be191af374
Parents: d5ddc7e
Author: Guanghao Zhang 
Authored: Fri Jan 12 22:04:38 2018 +0800
Committer: zhangduo 
Committed: Sun Feb 4 20:43:05 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 28 +---
 1 file changed, 19 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8bb3cf66/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 696b2d7..19fc7f4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -132,20 +132,19 @@ public class ReplicationPeerManager {
 checkPeerConfig(peerConfig);
 ReplicationPeerDescription desc = checkPeerExists(peerId);
 ReplicationPeerConfig oldPeerConfig = desc.getPeerConfig();
-if (!StringUtils.isBlank(peerConfig.getClusterKey()) &&
-  !peerConfig.getClusterKey().equals(oldPeerConfig.getClusterKey())) {
+if (!isStringEquals(peerConfig.getClusterKey(), 
oldPeerConfig.getClusterKey())) {
   throw new DoNotRetryIOException(
   "Changing the cluster key on an existing peer is not allowed. 
Existing key '" +
-oldPeerConfig.getClusterKey() + "' for peer " + peerId + " does 
not match new key '" +
-peerConfig.getClusterKey() + "'");
+  oldPeerConfig.getClusterKey() + "' for peer " + peerId + " does 
not match new key '" +
+  peerConfig.getClusterKey() + "'");
 }
 
-if (!StringUtils.isBlank(peerConfig.getReplicationEndpointImpl()) &&
-  
!peerConfig.getReplicationEndpointImpl().equals(oldPeerConfig.getReplicationEndpointImpl()))
 {
+if (!isStringEquals(peerConfig.getReplicationEndpointImpl(),
+  oldPeerConfig.getReplicationEndpointImpl())) {
   throw new DoNotRetryIOException("Changing the replication endpoint 
implementation class " +
-"on an existing peer is not allowed. Existing class '" +
-oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId +
-" does not match new class '" + 
peerConfig.getReplicationEndpointImpl() + "'");
+  "on an existing peer is not allowed. Existing class '" +
+  oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId +
+  " does not match new class '" + 
peerConfig.getReplicationEndpointImpl() + "'");
 }
   }
 
@@ -341,4 +340,15 @@ public class ReplicationPeerManager {
 return new ReplicationPeerManager(peerStorage,
 ReplicationStorageFactory.getReplicationQueueStorage(zk, conf), peers);
   }
+
+  /**
+   * For replication peer cluster key or endpoint class, null and empty string 
is same. So here
+   * don't use {@link StringUtils#equals(CharSequence, CharSequence)} directly.
+   */
+  private boolean isStringEquals(String s1, String s2) {
+if (StringUtils.isBlank(s1)) {
+  return StringUtils.isBlank(s2);
+}
+return s1.equals(s2);
+  }
 }



hbase git commit: HBASE-19905 ReplicationSyncUp tool will not exit if a peer replication is disabled

2018-02-04 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-1 a55f2c759 -> bdeab9319


HBASE-19905 ReplicationSyncUp tool will not exit if a peer replication is 
disabled

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bdeab931
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bdeab931
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bdeab931

Branch: refs/heads/branch-1
Commit: bdeab93196a247c7e3dcb090f8288de0050c5f24
Parents: a55f2c7
Author: Ashish Singhi 
Authored: Sun Feb 4 18:24:32 2018 +0530
Committer: Ashish Singhi 
Committed: Sun Feb 4 18:24:32 2018 +0530

--
 .../replication/regionserver/ReplicationSourceManager.java| 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bdeab931/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 77fd837..6ec30de 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -63,6 +63,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationListener;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
+import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
@@ -754,6 +755,12 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 replicationQueues.removeQueue(peerId);
 continue;
   }
+  if (server instanceof ReplicationSyncUp.DummyServer
+  && peer.getPeerState().equals(PeerState.DISABLED)) {
+LOG.warn("Peer " + actualPeerId + " is disbaled. ReplicationSyncUp 
tool will skip "
++ "replicating data to this peer.");
+continue;
+  }
   // track sources in walsByIdRecoveredQueues
   Map> walsByGroup = new HashMap>();
   walsByIdRecoveredQueues.put(peerId, walsByGroup);



hbase git commit: HBASE-19658 make the test testFlatteningToJumboCellChunkMap() stable, by eliminating the possibility of third cell to be added while in-memory-flush is still in progress

2018-02-04 Thread anastasia
Repository: hbase
Updated Branches:
  refs/heads/master 14420e1b4 -> 170ffbba6


HBASE-19658 make the test testFlatteningToJumboCellChunkMap() stable, by 
eliminating the possibility of third cell to be added while in-memory-flush is 
still in progress


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/170ffbba
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/170ffbba
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/170ffbba

Branch: refs/heads/master
Commit: 170ffbba683217bdb30e5c99f0e728e0dc660d56
Parents: 14420e1
Author: anastas 
Authored: Sun Feb 4 14:59:10 2018 +0200
Committer: anastas 
Committed: Sun Feb 4 14:59:10 2018 +0200

--
 .../TestCompactingToCellFlatMapMemStore.java | 19 ---
 1 file changed, 12 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/170ffbba/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
index 91a4b04..9b81c7f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
@@ -752,9 +752,9 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
 // set memstore to flat into CellChunkMap
 MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
 
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
-String.valueOf(compactionType));
-((MyCompactingMemStore)memstore).initiateType(compactionType, 
memstore.getConfiguration());
-
((CompactingMemStore)memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP);
+String.valueOf(compactionType));
+((MyCompactingMemStore) memstore).initiateType(compactionType, 
memstore.getConfiguration());
+((CompactingMemStore) 
memstore).setIndexType(CompactingMemStore.IndexType.CHUNK_MAP);
 
 int numOfCells = 1;
 char[] chars = new char[MemStoreLAB.CHUNK_SIZE_DEFAULT];
@@ -762,7 +762,7 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
   chars[i] = 'A';
 }
 String bigVal = new String(chars);
-String[] keys1 = { "A"};
+String[] keys1 = {"A"};
 
 // make one cell
 byte[] row = Bytes.toBytes(keys1[0]);
@@ -782,7 +782,7 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
 assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
 assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
 
-((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline 
and flatten
+((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline 
and flatten
 while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
   Threads.sleep(10);
 }
@@ -807,12 +807,17 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
 
 memstore.clearSnapshot(snapshot.getId());
 
-String[] keys2 = { "C", "D", "E"};
+// Allocating two big cells (too big for being copied into a regular 
chunk).
+String[] keys2 = {"C", "D"};
 addRowsByKeys(memstore, keys2, val);
 while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) {
   Threads.sleep(10);
 }
-totalHeapSize = 1 * oneCellOnCSLMHeapSize + MutableSegment.DEEP_OVERHEAD
+
+// The in-memory flush size is bigger than the size of a single cell,
+// but smaller than the size of two cells.
+// Therefore, the two created cells are flattened together.
+totalHeapSize = MutableSegment.DEEP_OVERHEAD
 + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM
 + 2 * oneCellOnCCMHeapSize;
 assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());



[09/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 7515d7b..3c4825d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -762,7 +762,7 @@
 754boolean wasUp = 
this.clusterStatusTracker.isClusterUp();
 755if (!wasUp) 
this.clusterStatusTracker.setClusterUp();
 756
-757LOG.info("Server active/primary 
master=" + this.serverName +
+757LOG.info("Active/primary master=" + 
this.serverName +
 758", sessionid=0x" +
 759
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
 760", setting cluster-up flag (Was=" 
+ wasUp + ")");
@@ -1161,7 +1161,7 @@
 1153   startProcedureExecutor();
 1154
 1155   // Start log cleaner thread
-1156   int cleanerInterval = 
conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
+1156   int cleanerInterval = 
conf.getInt("hbase.master.cleaner.interval", 600 * 1000);
 1157   this.logCleaner =
 1158  new LogCleaner(cleanerInterval,
 1159 this, conf, 
getMasterWalManager().getFileSystem(),
@@ -1227,2368 +1227,2369 @@
 1219procedureExecutor = new 
ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
 1220
configurationManager.registerObserver(procEnv);
 1221
-1222final int numThreads = 
conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
-1223
Math.max(Runtime.getRuntime().availableProcessors(),
-1224  
MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
-1225final boolean abortOnCorruption = 
conf.getBoolean(
-1226
MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
-1227
MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
-1228procedureStore.start(numThreads);
-1229procedureExecutor.start(numThreads, 
abortOnCorruption);
-1230
procEnv.getRemoteDispatcher().start();
-1231  }
-1232
-1233  private void stopProcedureExecutor() 
{
-1234if (procedureExecutor != null) {
-1235  
configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
-1236  
procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
-1237  procedureExecutor.stop();
-1238  procedureExecutor.join();
-1239  procedureExecutor = null;
-1240}
-1241
-1242if (procedureStore != null) {
-1243  
procedureStore.stop(isAborted());
-1244  procedureStore = null;
-1245}
-1246  }
-1247
-1248  private void stopChores() {
-1249if (this.expiredMobFileCleanerChore 
!= null) {
-1250  
this.expiredMobFileCleanerChore.cancel(true);
-1251}
-1252if (this.mobCompactChore != null) 
{
-1253  
this.mobCompactChore.cancel(true);
-1254}
-1255if (this.balancerChore != null) {
-1256  this.balancerChore.cancel(true);
-1257}
-1258if (this.normalizerChore != null) 
{
-1259  
this.normalizerChore.cancel(true);
-1260}
-1261if (this.clusterStatusChore != null) 
{
-1262  
this.clusterStatusChore.cancel(true);
-1263}
-1264if (this.catalogJanitorChore != 
null) {
-1265  
this.catalogJanitorChore.cancel(true);
-1266}
-1267if (this.clusterStatusPublisherChore 
!= null){
-1268  
clusterStatusPublisherChore.cancel(true);
-1269}
-1270if (this.mobCompactThread != null) 
{
-1271  this.mobCompactThread.close();
-1272}
-1273
-1274if (this.quotaObserverChore != null) 
{
-1275  quotaObserverChore.cancel();
-1276}
-1277if (this.snapshotQuotaChore != null) 
{
-1278  snapshotQuotaChore.cancel();
-1279}
-1280  }
-1281
-1282  /**
-1283   * @return Get remote side's 
InetAddress
-1284   */
-1285  InetAddress getRemoteInetAddress(final 
int port,
-1286  final long serverStartCode) throws 
UnknownHostException {
-1287// Do it out here in its own little 
method so can fake an address when
-1288// mocking up in tests.
-1289InetAddress ia = 
RpcServer.getRemoteIp();
-1290
-1291// The call could be from the local 
regionserver,
-1292// in which case, there is no remote 
address.
-1293if (ia == null && 
serverStartCode == startcode) {
-1294  InetSocketAddress isa = 
rpcServices.getSocketAddress();
-1295  if (isa != null && 
isa.getPort() == port) {
-1296ia = isa.getAddress();
-1297  }
-1298}
-1299return ia;
-1300  }
-1301
-1302  /**
-1303   * @return Maximum time we should run 
balancer for
-1304   */
-1305  private int getMaxBalancingTime() {
-1306int maxBalancingTime = 
getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, -1);
-1307if (maxBalancingTime == -1) {
-1308 

[40/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index ef600d2..1bacac1 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class AssignmentManager
+public class AssignmentManager
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements ServerListener
 The AssignmentManager is the coordinator for region 
assign/unassign operations.
@@ -950,7 +950,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -959,7 +959,7 @@ implements 
 
 BOOTSTRAP_THREAD_POOL_SIZE_CONF_KEY
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BOOTSTRAP_THREAD_POOL_SIZE_CONF_KEY
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BOOTSTRAP_THREAD_POOL_SIZE_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -972,7 +972,7 @@ implements 
 
 ASSIGN_DISPATCH_WAIT_MSEC_CONF_KEY
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ASSIGN_DISPATCH_WAIT_MSEC_CONF_KEY
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ASSIGN_DISPATCH_WAIT_MSEC_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -985,7 +985,7 @@ implements 
 
 DEFAULT_ASSIGN_DISPATCH_WAIT_MSEC
-private static final int DEFAULT_ASSIGN_DISPATCH_WAIT_MSEC
+private static final int DEFAULT_ASSIGN_DISPATCH_WAIT_MSEC
 
 See Also:
 Constant
 Field Values
@@ -998,7 +998,7 @@ implements 
 
 ASSIGN_DISPATCH_WAITQ_MAX_CONF_KEY
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ASSIGN_DISPATCH_WAITQ_MAX_CONF_KEY
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ASSIGN_DISPATCH_WAITQ_MAX_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -1011,7 +1011,7 @@ implements 
 
 DEFAULT_ASSIGN_DISPATCH_WAITQ_MAX
-private static final int DEFAULT_ASSIGN_DISPATCH_WAITQ_MAX
+private static final int DEFAULT_ASSIGN_DISPATCH_WAITQ_MAX
 
 See Also:
 Constant
 Field Values
@@ -1024,7 +1024,7 @@ implements 
 
 RIT_CHORE_INTERVAL_MSEC_CONF_KEY
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RIT_CHORE_INTERVAL_MSEC_CONF_KEY
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RIT_CHORE_INTERVAL_MSEC_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -1037,7 +1037,7 @@ implements 
 
 DEFAULT_RIT_CHORE_INTERVAL_MSEC
-private static final int DEFAULT_RIT_CHORE_INTERVAL_MSEC
+private static final int DEFAULT_RIT_CHORE_INTERVAL_MSEC
 
 See Also:
 Constant
 Field Values
@@ -1050,7 +1050,7 @@ implements 
 
 ASSIGN_MAX_ATTEMPTS
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ASSIGN_MAX_ATTEMPTS
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ASSIGN_MAX_ATTEMPTS
 
 See Also:
 Constant
 Field Values
@@ -1063,7 +1063,7 @@ implements 
 
 DEFAULT_ASSIGN_MAX_ATTEMPTS
-private static final int DEFAULT_ASSIGN_MAX_ATTEMPTS
+private static final int DEFAULT_ASSIGN_MAX_ATTEMPTS
 
 See Also:
 Constant
 Field Values
@@ -1076,7 +1076,7 @@ implements 
 
 METRICS_RIT_STUCK_WARNING_THRESHOLD
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String METRICS_RIT_STUCK_WARNING_THRESHOLD
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String METRICS_RIT_STUCK_WARNING_THRESHOLD
 Region in Transition metrics threshold time
 
 See Also:
@@ -1090,7 +1090,7 @@ implements 
 
 DEFAULT_RIT_STUCK_WARNING_THRESHOLD
-private static final int DEFAULT_RIT_STUCK_WARNING_THRESHOLD
+private sta

[04/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TaskBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TaskBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TaskBatch.html
index 2939a56..681e263 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TaskBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TaskBatch.html
@@ -61,602 +61,608 @@
 053import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
 054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 055import 
org.apache.hadoop.hbase.util.FSUtils;
-056import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import org.slf4j.Logger;
-059import org.slf4j.LoggerFactory;
-060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-061
-062/**
-063 * Distributes the task of log splitting 
to the available region servers.
-064 * Coordination happens via coordination 
engine. For every log file that has to be split a
-065 * task is created. SplitLogWorkers race 
to grab a task.
-066 *
-067 * 

SplitLogManager monitors the tasks that it creates using the -068 * timeoutMonitor thread. If a task's progress is slow then -069 * {@link SplitLogManagerCoordination#checkTasks} will take away the -070 * task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} -071 * and the task will be up for grabs again. When the task is done then it is -072 * deleted by SplitLogManager. -073 * -074 *

Clients call {@link #splitLogDistributed(Path)} to split a region server's -075 * log files. The caller thread waits in this method until all the log files -076 * have been split. -077 * -078 *

All the coordination calls made by this class are asynchronous. This is mainly -079 * to help reduce response time seen by the callers. -080 * -081 *

There is race in this design between the SplitLogManager and the -082 * SplitLogWorker. SplitLogManager might re-queue a task that has in reality -083 * already been completed by a SplitLogWorker. We rely on the idempotency of -084 * the log splitting task for correctness. -085 * -086 *

It is also assumed that every log splitting task is unique and once -087 * completed (either with success or with error) it will be not be submitted -088 * again. If a task is resubmitted then there is a risk that old "delete task" -089 * can delete the re-submission. -090 */ -091@InterfaceAudience.Private -092public class SplitLogManager { -093 private static final Logger LOG = LoggerFactory.getLogger(SplitLogManager.class); -094 -095 private final MasterServices server; -096 -097 private final Configuration conf; -098 private final ChoreService choreService; -099 -100 public static final int DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min -101 -102 private long unassignedTimeout; -103 private long lastTaskCreateTime = Long.MAX_VALUE; -104 -105 @VisibleForTesting -106 final ConcurrentMap tasks = new ConcurrentHashMap<>(); -107 private TimeoutMonitor timeoutMonitor; -108 -109 private volatile Set deadWorkers = null; -110 private final Object deadWorkersLock = new Object(); -111 -112 /** -113 * Its OK to construct this object even when region-servers are not online. It does lookup the -114 * orphan tasks in coordination engine but it doesn't block waiting for them to be done. -115 * @param master the master services -116 * @param conf the HBase configuration -117 * @throws IOException -118 */ -119 public SplitLogManager(MasterServices master, Configuration conf) -120 throws IOException { -121this.server = master; -122this.conf = conf; -123this.choreService = new ChoreService(master.getServerName() + "_splitLogManager_"); -124if (server.getCoordinatedStateManager() != null) { -125 SplitLogManagerCoordination coordination = getSplitLogManagerCoordination(); -126 Set failedDeletions = Collections.synchronizedSet(new HashSet()); -127 SplitLogManagerDetails details = new SplitLogManagerDetails(tasks, master, failedDeletions); -128 coordination.setDetails(details); -129 coordination.init(); -130} -131this.unassignedTimeout = -132 conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT); -133this.timeoutMonitor = -134new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000), -135master); -136 choreService.scheduleChore(timeoutMonitor); -137 } -138 -139 private SplitLogManagerCoordination getSplitLogManagerCoordination() { -140return server.getCoordinatedStateManager().getSplitLogManagerCoordination


[36/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.html
index 9ea7f07..344ed0d 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.html
@@ -729,7 +729,7 @@ extends 
 
 rollback
-protected void rollback(TEnvironment env)
+protected void rollback(TEnvironment env)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException,
 http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true";
 title="class or interface in java.lang">InterruptedException
 Description copied from 
class: Procedure
@@ -756,7 +756,7 @@ extends 
 
 isEofState
-private boolean isEofState()
+private boolean isEofState()
 
 
 
@@ -767,7 +767,7 @@ extends 
 
 abort
-protected boolean abort(TEnvironment env)
+protected boolean abort(TEnvironment env)
 Description copied from 
class: Procedure
 The abort() call is asynchronous and each procedure must 
decide how to deal
  with it, if they want to be abortable. The simplest implementation
@@ -790,7 +790,7 @@ extends 
 
 failIfAborted
-protected final void failIfAborted()
+protected final void failIfAborted()
 If procedure has more states then abort it otherwise 
procedure is finished and abort can be
  ignored.
 
@@ -803,7 +803,7 @@ extends 
 
 isRollbackSupported
-protected boolean isRollbackSupported(TState state)
+protected boolean isRollbackSupported(TState state)
 Used by the default implementation of abort() to know if 
the current state can be aborted
  and rollback can be triggered.
 
@@ -816,7 +816,7 @@ extends 
 
 isYieldAfterExecutionStep
-protected boolean isYieldAfterExecutionStep(TEnvironment env)
+protected boolean isYieldAfterExecutionStep(TEnvironment env)
 Description copied from 
class: Procedure
 By default, the procedure framework/executor will try to 
run procedures start to finish.
  Return true to make the executor yield between each execution step to
@@ -838,7 +838,7 @@ extends 
 
 hasMoreState
-private boolean hasMoreState()
+private boolean hasMoreState()
 
 
 
@@ -847,7 +847,7 @@ extends 
 
 getCurrentState
-protected TState getCurrentState()
+protected TState getCurrentState()
 
 
 
@@ -856,7 +856,7 @@ extends 
 
 setNextState
-private void setNextState(int stateId)
+private void setNextState(int stateId)
 Set the next state for the procedure.
 
 Parameters:
@@ -870,7 +870,7 @@ extends 
 
 toStringState
-protected void toStringState(http://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true";
 title="class or interface in java.lang">StringBuilder builder)
+protected void toStringState(http://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true";
 title="class or interface in java.lang">StringBuilder builder)
 Description copied from 
class: Procedure
 Called from Procedure.toString()
 when interpolating Procedure 
State.
  Allows decorating generic Procedure State with Procedure particulars.
@@ -888,7 +888,7 @@ extends 
 
 serializeStateData
-protected void serializeStateData(ProcedureStateSerializer serializer)
+protected void serializeStateData(ProcedureStateSerializer serializer)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
class: Procedure
 The user-level code of the procedure may have some state to
@@ -910,7 +910,7 @@ extends 
 
 deserializeStateData
-protected void deserializeStateData(ProcedureStateSerializer serializer)
+protected void deserializeStateData(ProcedureStateSerializer serializer)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
class: Procedure
 Called on store load to allow the user to decode the 
previously serialized

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 33a3702..44b49f3 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -208,11 +208,11 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-ex

hbase-site git commit: INFRA-10751 Empty commit

2018-02-04 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 6674e3ab7 -> 03d2c36ec


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/03d2c36e
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/03d2c36e
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/03d2c36e

Branch: refs/heads/asf-site
Commit: 03d2c36eca0c4f67bc008f2a9a3d15678eadaba0
Parents: 6674e3a
Author: jenkins 
Authored: Sun Feb 4 15:14:16 2018 +
Committer: jenkins 
Committed: Sun Feb 4 15:14:16 2018 +

--

--




[44/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
index a3a5c6d..96ee806 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":9,"i4":9,"i5":10,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":9};
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":9,"i4":9,"i5":10,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -711,40 +711,44 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ColumnFamilyDescriptorBuilder
-setPrefetchBlocksOnOpen(boolean value) 
+setNewVersionBehavior(boolean value) 
 
 
 ColumnFamilyDescriptorBuilder
-setScope(int value) 
+setPrefetchBlocksOnOpen(boolean value) 
 
 
 ColumnFamilyDescriptorBuilder
-setStoragePolicy(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String value) 
+setScope(int value) 
 
 
 ColumnFamilyDescriptorBuilder
-setTimeToLive(int value) 
+setStoragePolicy(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String value) 
 
 
 ColumnFamilyDescriptorBuilder
-setTimeToLive(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String value) 
+setTimeToLive(int value) 
 
 
 ColumnFamilyDescriptorBuilder
+setTimeToLive(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String value) 
+
+
+ColumnFamilyDescriptorBuilder
 setValue(byte[] key,
 byte[] value) 
 
-
+
 ColumnFamilyDescriptorBuilder
 setValue(Bytes key,
 Bytes value) 
 
-
+
 ColumnFamilyDescriptorBuilder
 setValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String value) 
 
-
+
 static byte[]
 toByteArray(ColumnFamilyDescriptor desc) 
 
@@ -2183,13 +2187,22 @@ public static final http://docs.oracle.com/javase/8/docs/api/java/
 
 
 
+
+
+
+
+
+setNewVersionBehavior
+public ColumnFamilyDescriptorBuilder setNewVersionBehavior(boolean value)
+
+
 
 
 
 
 
 setValue
-public ColumnFamilyDescriptorBuilder setValue(Bytes key,
+public ColumnFamilyDescriptorBuilder setValue(Bytes key,
   Bytes value)
 
 
@@ -2199,7 +2212,7 @@ public static final http://docs.oracle.com/javase/8/docs/api/java/
 
 
 setValue
-public ColumnFamilyDescriptorBuilder setValue(byte[] key,
+public ColumnFamilyDescriptorBuilder setValue(byte[] key,
   byte[] value)
 
 
@@ -2209,7 +,7 @@ public static final http://docs.oracle.com/javase/8/docs/api/java/
 
 
 setValue
-public ColumnFamilyDescriptorBuilder setValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key,
+public ColumnFamilyDescriptorBuilder setValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String value)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptorBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamily

[27/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
index ad601c4..53e455f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
@@ -1117,1183 +1117,1186 @@
 1109  @Nullable
 1110  public static TableState 
getTableState(Connection conn, TableName tableName)
   throws IOException {
-1112Table metaHTable = 
getMetaHTable(conn);
-1113Get get = new 
Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
-1114long time = 
EnvironmentEdgeManager.currentTime();
-1115get.setTimeRange(0, time);
-1116Result result =
-1117metaHTable.get(get);
-1118return getTableState(result);
-1119  }
-1120
-1121  /**
-1122   * Fetch table states from META 
table
-1123   * @param conn connection to use
-1124   * @return map {tableName -> 
state}
-1125   * @throws IOException
-1126   */
-1127  public static Map getTableStates(Connection conn)
-1128  throws IOException {
-1129final Map states = new LinkedHashMap<>();
-1130Visitor collector = new Visitor() 
{
-1131  @Override
-1132  public boolean visit(Result r) 
throws IOException {
-1133TableState state = 
getTableState(r);
-1134if (state != null)
-1135  
states.put(state.getTableName(), state);
-1136return true;
-1137  }
-1138};
-1139fullScanTables(conn, collector);
-1140return states;
-1141  }
-1142
-1143  /**
-1144   * Updates state in META
-1145   * @param conn connection to use
-1146   * @param tableName table to look 
for
-1147   * @throws IOException
-1148   */
-1149  public static void 
updateTableState(Connection conn, TableName tableName,
-1150  TableState.State actual) throws 
IOException {
-1151updateTableState(conn, new 
TableState(tableName, actual));
-1152  }
-1153
-1154  /**
-1155   * Decode table state from META 
Result.
-1156   * Should contain cell from 
HConstants.TABLE_FAMILY
-1157   * @param r result
-1158   * @return null if not found
-1159   * @throws IOException
-1160   */
-1161  @Nullable
-1162  public static TableState 
getTableState(Result r)
-1163  throws IOException {
-1164Cell cell = 
r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
-1165if (cell == null) return null;
-1166try {
-1167  return 
TableState.parseFrom(TableName.valueOf(r.getRow()),
-1168  
Arrays.copyOfRange(cell.getValueArray(),
-1169  cell.getValueOffset(), 
cell.getValueOffset() + cell.getValueLength()));
-1170} catch (DeserializationException e) 
{
-1171  throw new IOException(e);
-1172}
-1173
-1174  }
-1175
-1176  /**
-1177   * Implementations 'visit' a catalog 
table row.
-1178   */
-1179  public interface Visitor {
-1180/**
-1181 * Visit the catalog table row.
-1182 * @param r A row from catalog 
table
-1183 * @return True if we are to proceed 
scanning the table, else false if
-1184 * we are to stop now.
-1185 */
-1186boolean visit(final Result r) throws 
IOException;
-1187  }
-1188
-1189  /**
-1190   * Implementations 'visit' a catalog 
table row but with close() at the end.
-1191   */
-1192  public interface CloseableVisitor 
extends Visitor, Closeable {
-1193  }
-1194
-1195  /**
-1196   * A {@link Visitor} that collects 
content out of passed {@link Result}.
-1197   */
-1198  static abstract class 
CollectingVisitor implements Visitor {
-1199final List results = new 
ArrayList<>();
-1200@Override
-1201public boolean visit(Result r) 
throws IOException {
-1202  if (r ==  null || r.isEmpty()) 
return true;
-1203  add(r);
-1204  return true;
-1205}
-1206
-1207abstract void add(Result r);
-1208
-1209/**
-1210 * @return Collected results; wait 
till visits complete to collect all
-1211 * possible results
-1212 */
-1213List getResults() {
-1214  return this.results;
-1215}
-1216  }
-1217
-1218  /**
-1219   * Collects all returned.
-1220   */
-1221  static class CollectAllVisitor extends 
CollectingVisitor {
-1222@Override
-1223void add(Result r) {
-1224  this.results.add(r);
-1225}
-1226  }
-1227
-1228  /**
-1229   * A Visitor that skips offline 
regions and split parents
-1230   */
-1231  public static abstract class 
DefaultVisitorBase implements Visitor {
-1232
-1233public DefaultVisitorBase() {
-1234  super();
-1235}
-1236
-1237public abstract boolean 
visitInternal(Result rowResult) throws IOException;
-1238

[07/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
index 6a0cad6..6fcf3a7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
@@ -158,7 +158,7 @@
 150// only one MasterCoprocessorHost 
instance in the master process
 151boolean coprocessorsEnabled = 
conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY,
 152  DEFAULT_COPROCESSORS_ENABLED);
-153LOG.info("System coprocessor loading 
is " + (coprocessorsEnabled ? "enabled" : "disabled"));
+153LOG.trace("System coprocessor loading 
is {}",  (coprocessorsEnabled ? "enabled" : "disabled"));
 154loadSystemCoprocessors(conf, 
MASTER_COPROCESSOR_CONF_KEY);
 155  }
 156

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
index 6a0cad6..6fcf3a7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
@@ -158,7 +158,7 @@
 150// only one MasterCoprocessorHost 
instance in the master process
 151boolean coprocessorsEnabled = 
conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY,
 152  DEFAULT_COPROCESSORS_ENABLED);
-153LOG.info("System coprocessor loading 
is " + (coprocessorsEnabled ? "enabled" : "disabled"));
+153LOG.trace("System coprocessor loading 
is {}",  (coprocessorsEnabled ? "enabled" : "disabled"));
 154loadSystemCoprocessors(conf, 
MASTER_COPROCESSOR_CONF_KEY);
 155  }
 156

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
index 6a0cad6..6fcf3a7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
@@ -158,7 +158,7 @@
 150// only one MasterCoprocessorHost 
instance in the master process
 151boolean coprocessorsEnabled = 
conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY,
 152  DEFAULT_COPROCESSORS_ENABLED);
-153LOG.info("System coprocessor loading 
is " + (coprocessorsEnabled ? "enabled" : "disabled"));
+153LOG.trace("System coprocessor loading 
is {}",  (coprocessorsEnabled ? "enabled" : "disabled"));
 154loadSystemCoprocessors(conf, 
MASTER_COPROCESSOR_CONF_KEY);
 155  }
 156

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
index 6a0cad6..6fcf3a7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
@@ -158,7 +158,7 @@
 150// only one MasterCoprocessorHost 
instance in the master process
 151boolean coprocessorsEnabled = 
conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY,
 152  DEFAULT_COPROCESSORS_ENABLED);
-153LOG.info("System coprocessor loading 
is " + (coprocessorsEnabled ? "enabled" : "disabled"));
+153LOG.trace("System coprocessor loading 
is {}",  (coprocessorsEnabled ? "enabled" : "disabled"));
 154loadSystemCoprocessors(conf, 
MASTER_COPROCESSOR_CONF_KEY);
 155  }
 156

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
--
diff --git 

[45/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
index 4eaf8fe..888136f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public static class ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
+public static class ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements ColumnFamilyDescriptor, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable
 An ModifyableFamilyDescriptor contains information about a 
column family such as the
@@ -619,7 +619,7 @@ implements 
 
 name
-private final byte[] name
+private final byte[] name
 
 
 
@@ -628,7 +628,7 @@ implements 
 
 values
-private final http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map values
+private final http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map values
 
 
 
@@ -637,7 +637,7 @@ implements 
 
 configuration
-private final http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String> configuration
+private final http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String> configuration
 A map which holds the configuration specific to the column 
family. The
  keys of the map have the same names as config keys and override the
  defaults with cf-specific settings. Example usage may be for compactions,
@@ -659,7 +659,7 @@ implements 
 ModifyableColumnFamilyDescriptor
 @InterfaceAudience.Private
-public ModifyableColumnFamilyDescriptor(byte[] name)
+public ModifyableColumnFamilyDescriptor(byte[] name)
 Construct a column descriptor specifying only the family 
name The other
  attributes are defaulted.
 
@@ -677,7 +677,7 @@ public 
 ModifyableColumnFamilyDescriptor
 @InterfaceAudience.Private
-public ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc)
+public ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc)
 Constructor. Makes a deep copy of the supplied descriptor.
  TODO: make this private after the HCD is removed.
 
@@ -692,7 +692,7 @@ public 
 
 ModifyableColumnFamilyDescriptor
-private ModifyableColumnFamilyDescriptor(byte[] name,
+private ModifyableColumnFamilyDescriptor(byte[] name,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map values,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String> config)
 
@@ -711,7 +711,7 @@ public 
 
 getName
-public byte[] getName()
+public byte[] getName()
 
 Specified by:
 getName in
 interface ColumnFamilyDescriptor
@@ -726,7 +726,7 @@ public 
 
 getNameAsString
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getNameAsString()
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getNameAsString()
 
 Speci

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.TaskFinisher.Status.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.TaskFinisher.Status.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.TaskFinisher.Status.html
index 5844c3b..80259dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.TaskFinisher.Status.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.TaskFinisher.Status.html
@@ -159,7 +159,7 @@
 151}
 152Task task = 
findOrCreateOrphanTask(path);
 153if (task.isOrphan() && 
(task.incarnation.get() == 0)) {
-154  LOG.info("resubmitting unassigned 
orphan task " + path);
+154  LOG.info("Resubmitting unassigned 
orphan task " + path);
 155  // ignore failure to resubmit. The 
timeout-monitor will handle it later
 156  // albeit in a more crude fashion
 157  resubmitTask(path, task, FORCE);
@@ -210,7 +210,7 @@
 202  
SplitLogCounters.tot_mgr_resubmit_force.increment();
 203  version = -1;
 204}
-205LOG.info("resubmitting task " + 
path);
+205LOG.info("Resubmitting task " + 
path);
 206task.incarnation.incrementAndGet();
 207boolean result = resubmit(path, 
version);
 208if (!result) {
@@ -288,7 +288,7 @@
 280
SplitLogCounters.tot_mgr_rescan_deleted.increment();
 281  }
 282  
SplitLogCounters.tot_mgr_missing_state_in_delete.increment();
-283  LOG.debug("deleted task without in 
memory state " + path);
+283  LOG.debug("Deleted task without in 
memory state " + path);
 284  return;
 285}
 286synchronized (task) {
@@ -336,13 +336,13 @@
 328  }
 329
 330  private void createNodeSuccess(String 
path) {
-331LOG.debug("put up splitlog task at 
znode " + path);
+331LOG.debug("Put up splitlog task at 
znode " + path);
 332getDataSetWatch(path, zkretries);
 333  }
 334
 335  private void createNodeFailure(String 
path) {
 336// TODO the Manager should split the 
log locally instead of giving up
-337LOG.warn("failed to create task node" 
+ path);
+337LOG.warn("Failed to create task node 
" + path);
 338setDone(path, FAILURE);
 339  }
 340
@@ -368,15 +368,15 @@
 360data = 
ZKMetadata.removeMetaData(data);
 361SplitLogTask slt = 
SplitLogTask.parseFrom(data);
 362if (slt.isUnassigned()) {
-363  LOG.debug("task not yet acquired " 
+ path + " ver = " + version);
+363  LOG.debug("Task not yet acquired " 
+ path + ", ver=" + version);
 364  handleUnassignedTask(path);
 365} else if (slt.isOwned()) {
 366  heartbeat(path, version, 
slt.getServerName());
 367} else if (slt.isResigned()) {
-368  LOG.info("task " + path + " entered 
state: " + slt.toString());
+368  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 369  resubmitOrFail(path, FORCE);
 370} else if (slt.isDone()) {
-371  LOG.info("task " + path + " entered 
state: " + slt.toString());
+371  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 372  if (taskFinisher != null && 
!ZKSplitLog.isRescanNode(watcher, path)) {
 373if 
(taskFinisher.finish(slt.getServerName(), ZKSplitLog.getFileName(path)) == 
Status.DONE) {
 374  setDone(path, SUCCESS);
@@ -387,7 +387,7 @@
 379setDone(path, SUCCESS);
 380  }
 381} else if (slt.isErr()) {
-382  LOG.info("task " + path + " entered 
state: " + slt.toString());
+382  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 383  resubmitOrFail(path, CHECK);
 384} else {
 385  LOG.error(HBaseMarkers.FATAL, 
"logic error - unexpected zk state for path = "
@@ -403,7 +403,7 @@
 395  }
 396
 397  private void 
getDataSetWatchFailure(String path) {
-398LOG.warn("failed to set data watch " 
+ path);
+398LOG.warn("Failed to set data watch " 
+ path);
 399setDone(path, FAILURE);
 400  }
 401
@@ -412,7 +412,7 @@
 404if (task == null) {
 405  if 
(!ZKSplitLog.isRescanNode(watcher, path)) {
 406
SplitLogCounters.tot_mgr_unacquired_orphan_done.increment();
-407LOG.debug("unacquired orphan task 
is done " + path);
+407LOG.debug("Unacquired orphan task 
is done " + path);
 408  }
 409} else {
 410  synchronized (task) {
@@ -449,7 +449,7 @@
 441
 442  private Task 
findOrCreateOrphanTask(String path) {
 443return 
computeIfAbsent(details.getTasks(), path, Task::new, () -> {
-444  LOG.info("creating orphan task " + 
path);
+444  LOG.info("Creating orphan task " + 
path);
 445  
SplitLogCounters.tot_mgr_orphan_task_acquired.increment();
 446});
 447  }
@@ -458,7 +458,7 @@
 450Task task = 
findOrCreateOrphanTask(path);
 451  

[38/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RemoteProcedureResolver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RemoteProcedureResolver.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RemoteProcedureResolver.html
index ee8aa36..8f7e01c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RemoteProcedureResolver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RemoteProcedureResolver.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static interface RSProcedureDispatcher.RemoteProcedureResolver
+private static interface RSProcedureDispatcher.RemoteProcedureResolver
 
 
 
@@ -164,7 +164,7 @@ var activeTableTab = "activeTableTab";
 
 
 dispatchOpenRequests
-void dispatchOpenRequests(MasterProcedureEnv env,
+void dispatchOpenRequests(MasterProcedureEnv env,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List operations)
 
 
@@ -174,7 +174,7 @@ var activeTableTab = "activeTableTab";
 
 
 dispatchCloseRequests
-void dispatchCloseRequests(MasterProcedureEnv env,
+void dispatchCloseRequests(MasterProcedureEnv env,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List operations)
 
 
@@ -184,7 +184,7 @@ var activeTableTab = "activeTableTab";
 
 
 dispatchServerOperations
-void dispatchServerOperations(MasterProcedureEnv env,
+void dispatchServerOperations(MasterProcedureEnv env,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List operations)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ServerOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ServerOperation.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ServerOperation.html
index e91e960..6227d95 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ServerOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ServerOperation.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static final class RSProcedureDispatcher.ServerOperation
+public static final class RSProcedureDispatcher.ServerOperation
 extends RemoteProcedureDispatcher.RemoteOperation
 
 
@@ -224,7 +224,7 @@ extends 
 
 procId
-private final long procId
+private final long procId
 
 
 
@@ -233,7 +233,7 @@ extends 
 
 rsProcClass
-private final http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class rsProcClass
+private final http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class rsProcClass
 
 
 
@@ -242,7 +242,7 @@ extends 
 
 rsProcData
-private final byte[] rsProcData
+private final byte[] rsProcData
 
 
 
@@ -259,7 +259,7 @@ extends 
 
 ServerOperation
-public ServerOperation(RemoteProcedureDispatcher.RemoteProcedure remoteProcedure,
+public ServerOperation(RemoteProcedureDispatcher.RemoteProcedure remoteProcedure,
long procId,
http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class rsProcClass,
byte[] rsProcData)
@@ -279,7 +279,7 @@ extends 
 
 buildRequest
-public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RemoteProcedureRequest buildRequest()
+public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RemoteProcedureRequest buildRequest()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
index 4849b68..34132f7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
@@ -507,7 +507,7 @@ implements 
 

[01/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 250fddb76 -> 6674e3ab7


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.html
index 2939a56..681e263 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.html
@@ -61,602 +61,608 @@
 053import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
 054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 055import 
org.apache.hadoop.hbase.util.FSUtils;
-056import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import org.slf4j.Logger;
-059import org.slf4j.LoggerFactory;
-060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-061
-062/**
-063 * Distributes the task of log splitting 
to the available region servers.
-064 * Coordination happens via coordination 
engine. For every log file that has to be split a
-065 * task is created. SplitLogWorkers race 
to grab a task.
-066 *
-067 * 

SplitLogManager monitors the tasks that it creates using the -068 * timeoutMonitor thread. If a task's progress is slow then -069 * {@link SplitLogManagerCoordination#checkTasks} will take away the -070 * task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} -071 * and the task will be up for grabs again. When the task is done then it is -072 * deleted by SplitLogManager. -073 * -074 *

Clients call {@link #splitLogDistributed(Path)} to split a region server's -075 * log files. The caller thread waits in this method until all the log files -076 * have been split. -077 * -078 *

All the coordination calls made by this class are asynchronous. This is mainly -079 * to help reduce response time seen by the callers. -080 * -081 *

There is race in this design between the SplitLogManager and the -082 * SplitLogWorker. SplitLogManager might re-queue a task that has in reality -083 * already been completed by a SplitLogWorker. We rely on the idempotency of -084 * the log splitting task for correctness. -085 * -086 *

It is also assumed that every log splitting task is unique and once -087 * completed (either with success or with error) it will be not be submitted -088 * again. If a task is resubmitted then there is a risk that old "delete task" -089 * can delete the re-submission. -090 */ -091@InterfaceAudience.Private -092public class SplitLogManager { -093 private static final Logger LOG = LoggerFactory.getLogger(SplitLogManager.class); -094 -095 private final MasterServices server; -096 -097 private final Configuration conf; -098 private final ChoreService choreService; -099 -100 public static final int DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min -101 -102 private long unassignedTimeout; -103 private long lastTaskCreateTime = Long.MAX_VALUE; -104 -105 @VisibleForTesting -106 final ConcurrentMap tasks = new ConcurrentHashMap<>(); -107 private TimeoutMonitor timeoutMonitor; -108 -109 private volatile Set deadWorkers = null; -110 private final Object deadWorkersLock = new Object(); -111 -112 /** -113 * Its OK to construct this object even when region-servers are not online. It does lookup the -114 * orphan tasks in coordination engine but it doesn't block waiting for them to be done. -115 * @param master the master services -116 * @param conf the HBase configuration -117 * @throws IOException -118 */ -119 public SplitLogManager(MasterServices master, Configuration conf) -120 throws IOException { -121this.server = master; -122this.conf = conf; -123this.choreService = new ChoreService(master.getServerName() + "_splitLogManager_"); -124if (server.getCoordinatedStateManager() != null) { -125 SplitLogManagerCoordination coordination = getSplitLogManagerCoordination(); -126 Set failedDeletions = Collections.synchronizedSet(new HashSet()); -127 SplitLogManagerDetails details = new SplitLogManagerDetails(tasks, master, failedDeletions); -128 coordination.setDetails(details); -129 coordination.init(); -130} -131this.unassignedTimeout = -132 conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT); -133this.timeoutMonitor = -134new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000), -135master); -136 choreService.scheduleChore(timeoutMonitor); -137 } -138 -139 private SplitLogManagerCoordination getSplitLogManagerCoordination() { -140return server.getCoordinatedStateManag


[15/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
index 77fb9b5..c4e8c8b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
@@ -165,520 +165,519 @@
 157E env = 
checkAndLoadInstance(implClass, priority, conf);
 158if (env != null) {
 159  
this.coprocEnvironments.add(env);
-160  LOG.info(
-161  "System coprocessor " + 
className + " was loaded " + "successfully with priority (" + priority + 
").");
-162  ++priority;
-163}
-164  } catch (Throwable t) {
-165// We always abort if system 
coprocessors cannot be loaded
-166abortServer(className, t);
-167  }
-168}
-169  }
-170
-171  /**
-172   * Load a coprocessor implementation 
into the host
-173   * @param path path to implementation 
jar
-174   * @param className the main class 
name
-175   * @param priority chaining priority
-176   * @param conf configuration for 
coprocessor
-177   * @throws java.io.IOException 
Exception
-178   */
-179  public E load(Path path, String 
className, int priority,
-180  Configuration conf) throws 
IOException {
-181String[] includedClassPrefixes = 
null;
-182if 
(conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){
-183  String prefixes = 
conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY);
-184  includedClassPrefixes = 
prefixes.split(";");
-185}
-186return load(path, className, 
priority, conf, includedClassPrefixes);
-187  }
-188
-189  /**
-190   * Load a coprocessor implementation 
into the host
-191   * @param path path to implementation 
jar
-192   * @param className the main class 
name
-193   * @param priority chaining priority
-194   * @param conf configuration for 
coprocessor
-195   * @param includedClassPrefixes class 
name prefixes to include
-196   * @throws java.io.IOException 
Exception
-197   */
-198  public E load(Path path, String 
className, int priority,
-199  Configuration conf, String[] 
includedClassPrefixes) throws IOException {
-200Class implClass;
-201LOG.debug("Loading coprocessor class 
" + className + " with path " +
-202path + " and priority " + 
priority);
-203
-204ClassLoader cl = null;
-205if (path == null) {
-206  try {
-207implClass = 
getClass().getClassLoader().loadClass(className);
-208  } catch (ClassNotFoundException e) 
{
-209throw new IOException("No jar 
path specified for " + className);
-210  }
-211} else {
-212  cl = 
CoprocessorClassLoader.getClassLoader(
-213path, 
getClass().getClassLoader(), pathPrefix, conf);
-214  try {
-215implClass = 
((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes);
-216  } catch (ClassNotFoundException e) 
{
-217throw new IOException("Cannot 
load external coprocessor class " + className, e);
-218  }
-219}
-220
-221//load custom code for coprocessor
-222Thread currentThread = 
Thread.currentThread();
-223ClassLoader hostClassLoader = 
currentThread.getContextClassLoader();
-224try{
-225  // switch temporarily to the thread 
classloader for custom CP
-226  
currentThread.setContextClassLoader(cl);
-227  E cpInstance = 
checkAndLoadInstance(implClass, priority, conf);
-228  return cpInstance;
-229} finally {
-230  // restore the fresh (host) 
classloader
-231  
currentThread.setContextClassLoader(hostClassLoader);
-232}
-233  }
-234
-235  @VisibleForTesting
-236  public void load(Class implClass, int priority, Configuration conf)
-237  throws IOException {
-238E env = 
checkAndLoadInstance(implClass, priority, conf);
-239coprocEnvironments.add(env);
-240  }
-241
-242  /**
-243   * @param implClass Implementation 
class
-244   * @param priority priority
-245   * @param conf configuration
-246   * @throws java.io.IOException 
Exception
-247   */
-248  public E 
checkAndLoadInstance(Class implClass, int priority, Configuration 
conf)
-249  throws IOException {
-250// create the instance
-251C impl;
-252try {
-253  impl = 
checkAndGetInstance(implClass);
-254  if (impl == null) {
-255LOG.error("Cannot load 
coprocessor " + implClass.getSimpleName());
-256return null;
-257  }
-258} catch 
(InstantiationException|IllegalAccessException e) {
-259  throw new IOException(e);
-260}
-261// create the environment
-262E env = createEnvironment(impl, 

[02/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
index 2939a56..681e263 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
@@ -61,602 +61,608 @@
 053import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
 054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 055import 
org.apache.hadoop.hbase.util.FSUtils;
-056import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import org.slf4j.Logger;
-059import org.slf4j.LoggerFactory;
-060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-061
-062/**
-063 * Distributes the task of log splitting 
to the available region servers.
-064 * Coordination happens via coordination 
engine. For every log file that has to be split a
-065 * task is created. SplitLogWorkers race 
to grab a task.
-066 *
-067 * 

SplitLogManager monitors the tasks that it creates using the -068 * timeoutMonitor thread. If a task's progress is slow then -069 * {@link SplitLogManagerCoordination#checkTasks} will take away the -070 * task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} -071 * and the task will be up for grabs again. When the task is done then it is -072 * deleted by SplitLogManager. -073 * -074 *

Clients call {@link #splitLogDistributed(Path)} to split a region server's -075 * log files. The caller thread waits in this method until all the log files -076 * have been split. -077 * -078 *

All the coordination calls made by this class are asynchronous. This is mainly -079 * to help reduce response time seen by the callers. -080 * -081 *

There is race in this design between the SplitLogManager and the -082 * SplitLogWorker. SplitLogManager might re-queue a task that has in reality -083 * already been completed by a SplitLogWorker. We rely on the idempotency of -084 * the log splitting task for correctness. -085 * -086 *

It is also assumed that every log splitting task is unique and once -087 * completed (either with success or with error) it will be not be submitted -088 * again. If a task is resubmitted then there is a risk that old "delete task" -089 * can delete the re-submission. -090 */ -091@InterfaceAudience.Private -092public class SplitLogManager { -093 private static final Logger LOG = LoggerFactory.getLogger(SplitLogManager.class); -094 -095 private final MasterServices server; -096 -097 private final Configuration conf; -098 private final ChoreService choreService; -099 -100 public static final int DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min -101 -102 private long unassignedTimeout; -103 private long lastTaskCreateTime = Long.MAX_VALUE; -104 -105 @VisibleForTesting -106 final ConcurrentMap tasks = new ConcurrentHashMap<>(); -107 private TimeoutMonitor timeoutMonitor; -108 -109 private volatile Set deadWorkers = null; -110 private final Object deadWorkersLock = new Object(); -111 -112 /** -113 * Its OK to construct this object even when region-servers are not online. It does lookup the -114 * orphan tasks in coordination engine but it doesn't block waiting for them to be done. -115 * @param master the master services -116 * @param conf the HBase configuration -117 * @throws IOException -118 */ -119 public SplitLogManager(MasterServices master, Configuration conf) -120 throws IOException { -121this.server = master; -122this.conf = conf; -123this.choreService = new ChoreService(master.getServerName() + "_splitLogManager_"); -124if (server.getCoordinatedStateManager() != null) { -125 SplitLogManagerCoordination coordination = getSplitLogManagerCoordination(); -126 Set failedDeletions = Collections.synchronizedSet(new HashSet()); -127 SplitLogManagerDetails details = new SplitLogManagerDetails(tasks, master, failedDeletions); -128 coordination.setDetails(details); -129 coordination.init(); -130} -131this.unassignedTimeout = -132 conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT); -133this.timeoutMonitor = -134new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000), -135master); -136 choreService.scheduleChore(timeoutMonitor); -137 } -138 -139 private SplitLogManagerCoordination getSplitLogManagerCoordination() { -140return server.getCoordinatedStateManager().getSp


[32/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
index 852f85c..174dfdd 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":9,"i38":10,"i39":10,"i40":10};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ReplicationSourceManager
+public class ReplicationSourceManager
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements ReplicationListener
 This class is responsible to manage all the replication 
sources. There are two classes of
@@ -329,7 +329,7 @@ implements 
-All Methods Static Methods Instance Methods Concrete Methods 
+All Methods Instance Methods Concrete Methods 
 
 Modifier and Type
 Method and Description
@@ -538,29 +538,16 @@ implements 
-(package private) void
-scopeWALEdits(WALKey logKey,
- WALEdit logEdit) 
-
-
-(package private) static void
-scopeWALEdits(WALKey logKey,
- WALEdit logEdit,
- org.apache.hadoop.conf.Configuration conf)
-Utility method used to set the correct scopes on each log 
key.
-
-
-
 private void
 throwIOExceptionWhenFail(ReplicationSourceManager.ReplicationQueueOperation op) 
 
-
+
 private void
 transferQueues(ServerName deadRS)
 Transfer all the queues of the specified to this region 
server.
 
 
-
+
 (package private) void
 waitUntilCanBePushed(byte[] encodedName,
 long seq,
@@ -596,7 +583,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -605,7 +592,7 @@ implements 
 
 sources
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true";
 title="class or interface in java.util.concurrent">ConcurrentMapString,ReplicationSourceInterface>
 sources
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true";
 title="class or interface in java.util.concurrent">ConcurrentMapString,ReplicationSourceInterface>
 sources
 
 
 
@@ -614,7 +601,7 @@ implements 
 
 oldsources
-private final http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 oldsources
+private final http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 oldsources
 
 
 
@@ -623,7 +610,7 @@ implements 
 
 queueStorage
-private final ReplicationQueueStorage queueStorage
+private final ReplicationQueueStorage queueStorage
 
 
 
@@ -632,7 +619,7 @@ implements 
 
 replicationTracker
-private final ReplicationTracker replicationTracker
+private final ReplicationTracker replicationTracker
 
 
 
@@ -641,7 +628,7 @@ implements 
 
 replicationPeers
-private final ReplicationPeers replicationPeers
+private final ReplicationPeers replicationPeers
 
 
 
@@ -650,7 +637,7 @@ implements 
 
 clusterId
-private final http://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true";
 title="class or interface in java.util">UUID clusterId
+priv

[14/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html
index 77fb9b5..c4e8c8b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html
@@ -165,520 +165,519 @@
 157E env = 
checkAndLoadInstance(implClass, priority, conf);
 158if (env != null) {
 159  
this.coprocEnvironments.add(env);
-160  LOG.info(
-161  "System coprocessor " + 
className + " was loaded " + "successfully with priority (" + priority + 
").");
-162  ++priority;
-163}
-164  } catch (Throwable t) {
-165// We always abort if system 
coprocessors cannot be loaded
-166abortServer(className, t);
-167  }
-168}
-169  }
-170
-171  /**
-172   * Load a coprocessor implementation 
into the host
-173   * @param path path to implementation 
jar
-174   * @param className the main class 
name
-175   * @param priority chaining priority
-176   * @param conf configuration for 
coprocessor
-177   * @throws java.io.IOException 
Exception
-178   */
-179  public E load(Path path, String 
className, int priority,
-180  Configuration conf) throws 
IOException {
-181String[] includedClassPrefixes = 
null;
-182if 
(conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){
-183  String prefixes = 
conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY);
-184  includedClassPrefixes = 
prefixes.split(";");
-185}
-186return load(path, className, 
priority, conf, includedClassPrefixes);
-187  }
-188
-189  /**
-190   * Load a coprocessor implementation 
into the host
-191   * @param path path to implementation 
jar
-192   * @param className the main class 
name
-193   * @param priority chaining priority
-194   * @param conf configuration for 
coprocessor
-195   * @param includedClassPrefixes class 
name prefixes to include
-196   * @throws java.io.IOException 
Exception
-197   */
-198  public E load(Path path, String 
className, int priority,
-199  Configuration conf, String[] 
includedClassPrefixes) throws IOException {
-200Class implClass;
-201LOG.debug("Loading coprocessor class 
" + className + " with path " +
-202path + " and priority " + 
priority);
-203
-204ClassLoader cl = null;
-205if (path == null) {
-206  try {
-207implClass = 
getClass().getClassLoader().loadClass(className);
-208  } catch (ClassNotFoundException e) 
{
-209throw new IOException("No jar 
path specified for " + className);
-210  }
-211} else {
-212  cl = 
CoprocessorClassLoader.getClassLoader(
-213path, 
getClass().getClassLoader(), pathPrefix, conf);
-214  try {
-215implClass = 
((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes);
-216  } catch (ClassNotFoundException e) 
{
-217throw new IOException("Cannot 
load external coprocessor class " + className, e);
-218  }
-219}
-220
-221//load custom code for coprocessor
-222Thread currentThread = 
Thread.currentThread();
-223ClassLoader hostClassLoader = 
currentThread.getContextClassLoader();
-224try{
-225  // switch temporarily to the thread 
classloader for custom CP
-226  
currentThread.setContextClassLoader(cl);
-227  E cpInstance = 
checkAndLoadInstance(implClass, priority, conf);
-228  return cpInstance;
-229} finally {
-230  // restore the fresh (host) 
classloader
-231  
currentThread.setContextClassLoader(hostClassLoader);
-232}
-233  }
-234
-235  @VisibleForTesting
-236  public void load(Class implClass, int priority, Configuration conf)
-237  throws IOException {
-238E env = 
checkAndLoadInstance(implClass, priority, conf);
-239coprocEnvironments.add(env);
-240  }
-241
-242  /**
-243   * @param implClass Implementation 
class
-244   * @param priority priority
-245   * @param conf configuration
-246   * @throws java.io.IOException 
Exception
-247   */
-248  public E 
checkAndLoadInstance(Class implClass, int priority, Configuration 
conf)
-249  throws IOException {
-250// create the instance
-251C impl;
-252try {
-253  impl = 
checkAndGetInstance(implClass);
-254  if (impl == null) {
-255LOG.error("Cannot load 
coprocessor " + implClass.getSimpleName());
-256return null;
-257  }
-258} catch 
(InstantiationException|IllegalAccessException e) {
-259  throw new IOException(e);
-260}
-261// create the en

[50/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
index b590002..9b2a580 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
@@ -560,806 +560,811 @@
 552return this;
 553  }
 554
-555  public ColumnFamilyDescriptorBuilder 
setValue(final Bytes key, final Bytes value) {
-556desc.setValue(key, value);
+555  public ColumnFamilyDescriptorBuilder 
setNewVersionBehavior(final boolean value) {
+556desc.setNewVersionBehavior(value);
 557return this;
 558  }
 559
-560  public ColumnFamilyDescriptorBuilder 
setValue(final byte[] key, final byte[] value) {
+560  public ColumnFamilyDescriptorBuilder 
setValue(final Bytes key, final Bytes value) {
 561desc.setValue(key, value);
 562return this;
 563  }
 564
-565  public ColumnFamilyDescriptorBuilder 
setValue(final String key, final String value) {
+565  public ColumnFamilyDescriptorBuilder 
setValue(final byte[] key, final byte[] value) {
 566desc.setValue(key, value);
 567return this;
 568  }
 569
-570  /**
-571   * An ModifyableFamilyDescriptor 
contains information about a column family such as the
-572   * number of versions, compression 
settings, etc.
-573   *
-574   * It is used as input when creating a 
table or adding a column.
-575   * TODO: make this package-private 
after removing the HColumnDescriptor
-576   */
-577  @InterfaceAudience.Private
-578  public static class 
ModifyableColumnFamilyDescriptor
-579  implements ColumnFamilyDescriptor, 
Comparable {
-580
-581// Column family name
-582private final byte[] name;
-583
-584// Column metadata
-585private final Map 
values = new HashMap<>();
-586
-587/**
-588 * A map which holds the 
configuration specific to the column family. The
-589 * keys of the map have the same 
names as config keys and override the
-590 * defaults with cf-specific 
settings. Example usage may be for compactions,
-591 * etc.
-592 */
-593private final Map configuration = new HashMap<>();
-594
-595/**
-596 * Construct a column descriptor 
specifying only the family name The other
-597 * attributes are defaulted.
-598 *
-599 * @param name Column family name. 
Must be 'printable' -- digit or
-600 * letter -- and may not contain a 
:
-601 * TODO: make this private after the 
HCD is removed.
-602 */
-603@InterfaceAudience.Private
-604public 
ModifyableColumnFamilyDescriptor(final byte[] name) {
-605  this(isLegalColumnFamilyName(name), 
getDefaultValuesBytes(), Collections.emptyMap());
-606}
-607
-608/**
-609 * Constructor. Makes a deep copy of 
the supplied descriptor.
-610 * TODO: make this private after the 
HCD is removed.
-611 * @param desc The descriptor.
-612 */
-613@InterfaceAudience.Private
-614public 
ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc) {
-615  this(desc.getName(), 
desc.getValues(), desc.getConfiguration());
-616}
-617
-618private 
ModifyableColumnFamilyDescriptor(byte[] name, Map values, 
Map config) {
-619  this.name = name;
-620  this.values.putAll(values);
-621  
this.configuration.putAll(config);
-622}
-623
-624@Override
-625public byte[] getName() {
-626  return Bytes.copy(name);
+570  public ColumnFamilyDescriptorBuilder 
setValue(final String key, final String value) {
+571desc.setValue(key, value);
+572return this;
+573  }
+574
+575  /**
+576   * An ModifyableFamilyDescriptor 
contains information about a column family such as the
+577   * number of versions, compression 
settings, etc.
+578   *
+579   * It is used as input when creating a 
table or adding a column.
+580   * TODO: make this package-private 
after removing the HColumnDescriptor
+581   */
+582  @InterfaceAudience.Private
+583  public static class 
ModifyableColumnFamilyDescriptor
+584  implements ColumnFamilyDescriptor, 
Comparable {
+585
+586// Column family name
+587private final byte[] name;
+588
+589// Column metadata
+590private final Map 
values = new HashMap<>();
+591
+592/**
+593 * A map which holds the 
configuration specific to the column family. The
+594 * keys of the map have the same 
names as config keys and override the
+595 * defaults with cf-specific 
settings. Example usage may be for compactions,
+596 * etc.
+597 */
+598private fi

[37/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
index 082a2a2..6cba39a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class ProcedureExecutor.WorkerThread
+private final class ProcedureExecutor.WorkerThread
 extends ProcedureExecutor.StoppableThread
 
 
@@ -271,7 +271,7 @@ extends 
 
 executionStartTime
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicLong executionStartTime
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicLong executionStartTime
 
 
 
@@ -280,7 +280,7 @@ extends 
 
 activeProcedure
-private Procedure activeProcedure
+private Procedure activeProcedure
 
 
 
@@ -297,7 +297,7 @@ extends 
 
 WorkerThread
-public WorkerThread(http://docs.oracle.com/javase/8/docs/api/java/lang/ThreadGroup.html?is-external=true";
 title="class or interface in java.lang">ThreadGroup group)
+public WorkerThread(http://docs.oracle.com/javase/8/docs/api/java/lang/ThreadGroup.html?is-external=true";
 title="class or interface in java.lang">ThreadGroup group)
 
 
 
@@ -314,7 +314,7 @@ extends 
 
 sendStopSignal
-public void sendStopSignal()
+public void sendStopSignal()
 
 Specified by:
 sendStopSignal in
 class ProcedureExecutor.StoppableThread
@@ -327,7 +327,7 @@ extends 
 
 run
-public void run()
+public void run()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--";
 title="class or interface in java.lang">run in 
interface http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true";
 title="class or interface in java.lang">Runnable
@@ -342,7 +342,7 @@ extends 
 
 toString
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true#toString--";
 title="class or interface in java.lang">toString in 
class http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true";
 title="class or interface in java.lang">Thread
@@ -355,7 +355,7 @@ extends 
 
 getCurrentRunTime
-public long getCurrentRunTime()
+public long getCurrentRunTime()
 
 Returns:
 the time since the current procedure is running
@@ -368,7 +368,7 @@ extends 
 
 keepAlive
-private boolean keepAlive(long lastUpdate)
+private boolean keepAlive(long lastUpdate)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index 07a08b8..f9f5705 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -753,7 +753,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 completed
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true";
 title="class or interface in java.util.concurrent">ConcurrentHashMapLong,ProcedureExecutor.CompletedProcedureRetainer>
 completed
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true";
 title="class or interface in java.util.concurrent">ConcurrentHashMapLong,ProcedureExecutor.CompletedProcedureRetainer>
 completed
 Map the the procId returned by submitProcedure(), the 
Root-ProcID, to the Procedure.
  Once a Root-Procedure completes (success or failure), the result will be 
added to this map.
  The user of ProcedureExecutor should call getResult(procId) to get the 
result.
@@ -765,7 +765,7 @@ extends 

[22/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index bb263a4..203e85c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"6519b98ac3115c4442a2778f6ed7b39ce5cd3b83";
+011  public static final String revision = 
"170ffbba683217bdb30e5c99f0e728e0dc660d56";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Sat 
Feb  3 14:41:05 UTC 2018";
+013  public static final String date = "Sun 
Feb  4 14:41:34 UTC 2018";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "e1f78921fcd876d508017ada55edc99a";
+015  public static final String srcChecksum 
= "3c3a6a55ea36b8cbac6c726d5fe311de";
 016}
 
 



[11/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcConnection.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcConnection.html
index 32c21a4..92a291e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcConnection.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcConnection.html
@@ -259,107 +259,105 @@
 251  }
 252
 253  private void connect() {
-254if (LOG.isDebugEnabled()) {
-255  LOG.debug("Connecting to " + 
remoteId.address);
-256}
-257
-258this.channel = new 
Bootstrap().group(rpcClient.group).channel(rpcClient.channelClass)
-259
.option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay())
-260
.option(ChannelOption.SO_KEEPALIVE, rpcClient.tcpKeepAlive)
-261
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, rpcClient.connectTO)
-262.handler(new 
BufferCallBeforeInitHandler()).localAddress(rpcClient.localAddr)
-263
.remoteAddress(remoteId.address).connect().addListener(new 
ChannelFutureListener() {
-264
-265  @Override
-266  public void 
operationComplete(ChannelFuture future) throws Exception {
-267Channel ch = 
future.channel();
-268if (!future.isSuccess()) {
-269  failInit(ch, 
toIOE(future.cause()));
-270  
rpcClient.failedServers.addToFailedServers(remoteId.address, future.cause());
-271  return;
-272}
-273
ch.writeAndFlush(connectionHeaderPreamble.retainedDuplicate());
-274if (useSasl) {
-275  saslNegotiate(ch);
-276} else {
-277  // send the connection 
header to server
-278  
ch.write(connectionHeaderWithLength.retainedDuplicate());
-279  established(ch);
-280}
-281  }
-282}).channel();
-283  }
-284
-285  private void write(Channel ch, final 
Call call) {
-286
ch.writeAndFlush(call).addListener(new ChannelFutureListener() {
-287
-288  @Override
-289  public void 
operationComplete(ChannelFuture future) throws Exception {
-290// Fail the call if we failed to 
write it out. This usually because the channel is
-291// closed. This is needed because 
we may shutdown the channel inside event loop and
-292// there may still be some 
pending calls in the event loop queue after us.
-293if (!future.isSuccess()) {
-294  
call.setException(toIOE(future.cause()));
-295}
-296  }
-297});
-298  }
-299
-300  @Override
-301  public synchronized void 
sendRequest(final Call call, HBaseRpcController hrc) throws IOException {
-302if (reloginInProgress) {
-303  throw new IOException("Can not send 
request because relogin is in progress.");
-304}
-305hrc.notifyOnCancel(new 
RpcCallback() {
-306
-307  @Override
-308  public void run(Object parameter) 
{
-309setCancelled(call);
-310synchronized (this) {
-311  if (channel != null) {
-312
channel.pipeline().fireUserEventTriggered(new CallEvent(CANCELLED, call));
-313  }
-314}
-315  }
-316}, new CancellationCallback() {
-317
-318  @Override
-319  public void run(boolean cancelled) 
throws IOException {
-320if (cancelled) {
-321  setCancelled(call);
-322} else {
-323  if (channel == null) {
-324connect();
-325  }
-326  scheduleTimeoutTask(call);
-327  final Channel ch = channel;
-328  // We must move the whole 
writeAndFlush call inside event loop otherwise there will be a
-329  // race condition.
-330  // In netty's 
DefaultChannelPipeline, it will find the first outbound handler in the
-331  // current thread and then 
schedule a task to event loop which will start the process from
-332  // that outbound handler. It is 
possible that the first handler is
-333  // BufferCallBeforeInitHandler 
when we call writeAndFlush here, but the connection is set
-334  // up at the same time so in 
the event loop thread we remove the
-335  // BufferCallBeforeInitHandler, 
and then our writeAndFlush task comes, still calls the
-336  // write method of 
BufferCallBeforeInitHandler.
-337  // This may be considered as a 
bug of netty, but anyway there is a work around so let's
-338  // fix it by ourselves first.
-339  if 
(ch.eventLoop().inEventLoop()) {
-340write(ch, call);
-341  } else {
-342ch.eventLoop().execute(new 
Runnable() {
-343
-344  @Override
-345  public void run() {
-346write(ch, call);
-347  }
-348});
-349  }
-350}
-351 

[12/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
index 77fb9b5..c4e8c8b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
@@ -165,520 +165,519 @@
 157E env = 
checkAndLoadInstance(implClass, priority, conf);
 158if (env != null) {
 159  
this.coprocEnvironments.add(env);
-160  LOG.info(
-161  "System coprocessor " + 
className + " was loaded " + "successfully with priority (" + priority + 
").");
-162  ++priority;
-163}
-164  } catch (Throwable t) {
-165// We always abort if system 
coprocessors cannot be loaded
-166abortServer(className, t);
-167  }
-168}
-169  }
-170
-171  /**
-172   * Load a coprocessor implementation 
into the host
-173   * @param path path to implementation 
jar
-174   * @param className the main class 
name
-175   * @param priority chaining priority
-176   * @param conf configuration for 
coprocessor
-177   * @throws java.io.IOException 
Exception
-178   */
-179  public E load(Path path, String 
className, int priority,
-180  Configuration conf) throws 
IOException {
-181String[] includedClassPrefixes = 
null;
-182if 
(conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){
-183  String prefixes = 
conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY);
-184  includedClassPrefixes = 
prefixes.split(";");
-185}
-186return load(path, className, 
priority, conf, includedClassPrefixes);
-187  }
-188
-189  /**
-190   * Load a coprocessor implementation 
into the host
-191   * @param path path to implementation 
jar
-192   * @param className the main class 
name
-193   * @param priority chaining priority
-194   * @param conf configuration for 
coprocessor
-195   * @param includedClassPrefixes class 
name prefixes to include
-196   * @throws java.io.IOException 
Exception
-197   */
-198  public E load(Path path, String 
className, int priority,
-199  Configuration conf, String[] 
includedClassPrefixes) throws IOException {
-200Class implClass;
-201LOG.debug("Loading coprocessor class 
" + className + " with path " +
-202path + " and priority " + 
priority);
-203
-204ClassLoader cl = null;
-205if (path == null) {
-206  try {
-207implClass = 
getClass().getClassLoader().loadClass(className);
-208  } catch (ClassNotFoundException e) 
{
-209throw new IOException("No jar 
path specified for " + className);
-210  }
-211} else {
-212  cl = 
CoprocessorClassLoader.getClassLoader(
-213path, 
getClass().getClassLoader(), pathPrefix, conf);
-214  try {
-215implClass = 
((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes);
-216  } catch (ClassNotFoundException e) 
{
-217throw new IOException("Cannot 
load external coprocessor class " + className, e);
-218  }
-219}
-220
-221//load custom code for coprocessor
-222Thread currentThread = 
Thread.currentThread();
-223ClassLoader hostClassLoader = 
currentThread.getContextClassLoader();
-224try{
-225  // switch temporarily to the thread 
classloader for custom CP
-226  
currentThread.setContextClassLoader(cl);
-227  E cpInstance = 
checkAndLoadInstance(implClass, priority, conf);
-228  return cpInstance;
-229} finally {
-230  // restore the fresh (host) 
classloader
-231  
currentThread.setContextClassLoader(hostClassLoader);
-232}
-233  }
-234
-235  @VisibleForTesting
-236  public void load(Class implClass, int priority, Configuration conf)
-237  throws IOException {
-238E env = 
checkAndLoadInstance(implClass, priority, conf);
-239coprocEnvironments.add(env);
-240  }
-241
-242  /**
-243   * @param implClass Implementation 
class
-244   * @param priority priority
-245   * @param conf configuration
-246   * @throws java.io.IOException 
Exception
-247   */
-248  public E 
checkAndLoadInstance(Class implClass, int priority, Configuration 
conf)
-249  throws IOException {
-250// create the instance
-251C impl;
-252try {
-253  impl = 
checkAndGetInstance(implClass);
-254  if (impl == null) {
-255LOG.error("Cannot load 
coprocessor " + implClass.getSimpleName());
-256return null;
-257  }
-258} catch 
(InstantiationException|IllegalAccessException e) {
-259  throw new IOException(e);
-260}
-261// create the environment
-262E env = createEnvironment(impl, 
priority, loadSequence.incrementAndGet(), conf);
-263assert env instanceof 
BaseEnviro

[43/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
index d56fed1..6cf5aef 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
@@ -672,7 +672,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 legacyWarning
-private static final http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetClass> legacyWarning
+private static final http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetClass> legacyWarning
 Used to limit legacy handling to once per Coprocessor class 
per classloader.
 
 
@@ -742,7 +742,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 load
-public E load(org.apache.hadoop.fs.Path path,
+public E load(org.apache.hadoop.fs.Path path,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String className,
   int priority,
   org.apache.hadoop.conf.Configuration conf)
@@ -765,7 +765,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 load
-public E load(org.apache.hadoop.fs.Path path,
+public E load(org.apache.hadoop.fs.Path path,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String className,
   int priority,
   org.apache.hadoop.conf.Configuration conf,
@@ -790,7 +790,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 load
-public void load(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass,
+public void load(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass,
  int priority,
  org.apache.hadoop.conf.Configuration conf)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -806,7 +806,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 checkAndLoadInstance
-public E checkAndLoadInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass,
+public E checkAndLoadInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass,
   int priority,
   org.apache.hadoop.conf.Configuration conf)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -828,7 +828,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createEnvironment
-public abstract E createEnvironment(C instance,
+public abstract E createEnvironment(C instance,
 int priority,
 int sequence,
 
org.apache.hadoop.conf.Configuration conf)
@@ -841,7 +841,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 checkAndGetInstance
-public abstract C checkAndGetInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass)
+public abstract C checkAndGetInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass)
throws http://docs.oracle.com/javase/8/docs/api/java/lang/InstantiationException.html?is-external=true";
 title="class or interface in java.lang">InstantiationException,
   http://docs.oracle.com/javase/8/docs/api/java/lang/IllegalAccessException.html?is-external=true";
 title="class or interface in java.lang">IllegalAccessException
 Called when a new Coprocessor class needs to be loa

[26/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
index ad601c4..53e455f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
@@ -1117,1183 +1117,1186 @@
 1109  @Nullable
 1110  public static TableState 
getTableState(Connection conn, TableName tableName)
   throws IOException {
-1112Table metaHTable = 
getMetaHTable(conn);
-1113Get get = new 
Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
-1114long time = 
EnvironmentEdgeManager.currentTime();
-1115get.setTimeRange(0, time);
-1116Result result =
-1117metaHTable.get(get);
-1118return getTableState(result);
-1119  }
-1120
-1121  /**
-1122   * Fetch table states from META 
table
-1123   * @param conn connection to use
-1124   * @return map {tableName -> 
state}
-1125   * @throws IOException
-1126   */
-1127  public static Map getTableStates(Connection conn)
-1128  throws IOException {
-1129final Map states = new LinkedHashMap<>();
-1130Visitor collector = new Visitor() 
{
-1131  @Override
-1132  public boolean visit(Result r) 
throws IOException {
-1133TableState state = 
getTableState(r);
-1134if (state != null)
-1135  
states.put(state.getTableName(), state);
-1136return true;
-1137  }
-1138};
-1139fullScanTables(conn, collector);
-1140return states;
-1141  }
-1142
-1143  /**
-1144   * Updates state in META
-1145   * @param conn connection to use
-1146   * @param tableName table to look 
for
-1147   * @throws IOException
-1148   */
-1149  public static void 
updateTableState(Connection conn, TableName tableName,
-1150  TableState.State actual) throws 
IOException {
-1151updateTableState(conn, new 
TableState(tableName, actual));
-1152  }
-1153
-1154  /**
-1155   * Decode table state from META 
Result.
-1156   * Should contain cell from 
HConstants.TABLE_FAMILY
-1157   * @param r result
-1158   * @return null if not found
-1159   * @throws IOException
-1160   */
-1161  @Nullable
-1162  public static TableState 
getTableState(Result r)
-1163  throws IOException {
-1164Cell cell = 
r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
-1165if (cell == null) return null;
-1166try {
-1167  return 
TableState.parseFrom(TableName.valueOf(r.getRow()),
-1168  
Arrays.copyOfRange(cell.getValueArray(),
-1169  cell.getValueOffset(), 
cell.getValueOffset() + cell.getValueLength()));
-1170} catch (DeserializationException e) 
{
-1171  throw new IOException(e);
-1172}
-1173
-1174  }
-1175
-1176  /**
-1177   * Implementations 'visit' a catalog 
table row.
-1178   */
-1179  public interface Visitor {
-1180/**
-1181 * Visit the catalog table row.
-1182 * @param r A row from catalog 
table
-1183 * @return True if we are to proceed 
scanning the table, else false if
-1184 * we are to stop now.
-1185 */
-1186boolean visit(final Result r) throws 
IOException;
-1187  }
-1188
-1189  /**
-1190   * Implementations 'visit' a catalog 
table row but with close() at the end.
-1191   */
-1192  public interface CloseableVisitor 
extends Visitor, Closeable {
-1193  }
-1194
-1195  /**
-1196   * A {@link Visitor} that collects 
content out of passed {@link Result}.
-1197   */
-1198  static abstract class 
CollectingVisitor implements Visitor {
-1199final List results = new 
ArrayList<>();
-1200@Override
-1201public boolean visit(Result r) 
throws IOException {
-1202  if (r ==  null || r.isEmpty()) 
return true;
-1203  add(r);
-1204  return true;
-1205}
-1206
-1207abstract void add(Result r);
-1208
-1209/**
-1210 * @return Collected results; wait 
till visits complete to collect all
-1211 * possible results
-1212 */
-1213List getResults() {
-1214  return this.results;
-1215}
-1216  }
-1217
-1218  /**
-1219   * Collects all returned.
-1220   */
-1221  static class CollectAllVisitor extends 
CollectingVisitor {
-1222@Override
-1223void add(Result r) {
-1224  this.results.add(r);
-1225}
-1226  }
-1227
-1228  /**
-1229   * A Visitor that skips offline 
regions and split parents
-1230   */
-1231  public static abstract class 
DefaultVisitorBase implements Visitor {
-1232
-1233public DefaultVisitorBase() {
-1234  super();
-1235}
-1236
-1237public abstract boolean 
visitInternal(Result rowResult) throws IOException;
-1238
-1239@Override
-1240public boolean vis

[24/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
index ad601c4..53e455f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
@@ -1117,1183 +1117,1186 @@
 1109  @Nullable
 1110  public static TableState 
getTableState(Connection conn, TableName tableName)
   throws IOException {
-1112Table metaHTable = 
getMetaHTable(conn);
-1113Get get = new 
Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
-1114long time = 
EnvironmentEdgeManager.currentTime();
-1115get.setTimeRange(0, time);
-1116Result result =
-1117metaHTable.get(get);
-1118return getTableState(result);
-1119  }
-1120
-1121  /**
-1122   * Fetch table states from META 
table
-1123   * @param conn connection to use
-1124   * @return map {tableName -> 
state}
-1125   * @throws IOException
-1126   */
-1127  public static Map getTableStates(Connection conn)
-1128  throws IOException {
-1129final Map states = new LinkedHashMap<>();
-1130Visitor collector = new Visitor() 
{
-1131  @Override
-1132  public boolean visit(Result r) 
throws IOException {
-1133TableState state = 
getTableState(r);
-1134if (state != null)
-1135  
states.put(state.getTableName(), state);
-1136return true;
-1137  }
-1138};
-1139fullScanTables(conn, collector);
-1140return states;
-1141  }
-1142
-1143  /**
-1144   * Updates state in META
-1145   * @param conn connection to use
-1146   * @param tableName table to look 
for
-1147   * @throws IOException
-1148   */
-1149  public static void 
updateTableState(Connection conn, TableName tableName,
-1150  TableState.State actual) throws 
IOException {
-1151updateTableState(conn, new 
TableState(tableName, actual));
-1152  }
-1153
-1154  /**
-1155   * Decode table state from META 
Result.
-1156   * Should contain cell from 
HConstants.TABLE_FAMILY
-1157   * @param r result
-1158   * @return null if not found
-1159   * @throws IOException
-1160   */
-1161  @Nullable
-1162  public static TableState 
getTableState(Result r)
-1163  throws IOException {
-1164Cell cell = 
r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
-1165if (cell == null) return null;
-1166try {
-1167  return 
TableState.parseFrom(TableName.valueOf(r.getRow()),
-1168  
Arrays.copyOfRange(cell.getValueArray(),
-1169  cell.getValueOffset(), 
cell.getValueOffset() + cell.getValueLength()));
-1170} catch (DeserializationException e) 
{
-1171  throw new IOException(e);
-1172}
-1173
-1174  }
-1175
-1176  /**
-1177   * Implementations 'visit' a catalog 
table row.
-1178   */
-1179  public interface Visitor {
-1180/**
-1181 * Visit the catalog table row.
-1182 * @param r A row from catalog 
table
-1183 * @return True if we are to proceed 
scanning the table, else false if
-1184 * we are to stop now.
-1185 */
-1186boolean visit(final Result r) throws 
IOException;
-1187  }
-1188
-1189  /**
-1190   * Implementations 'visit' a catalog 
table row but with close() at the end.
-1191   */
-1192  public interface CloseableVisitor 
extends Visitor, Closeable {
-1193  }
-1194
-1195  /**
-1196   * A {@link Visitor} that collects 
content out of passed {@link Result}.
-1197   */
-1198  static abstract class 
CollectingVisitor implements Visitor {
-1199final List results = new 
ArrayList<>();
-1200@Override
-1201public boolean visit(Result r) 
throws IOException {
-1202  if (r ==  null || r.isEmpty()) 
return true;
-1203  add(r);
-1204  return true;
-1205}
-1206
-1207abstract void add(Result r);
-1208
-1209/**
-1210 * @return Collected results; wait 
till visits complete to collect all
-1211 * possible results
-1212 */
-1213List getResults() {
-1214  return this.results;
-1215}
-1216  }
-1217
-1218  /**
-1219   * Collects all returned.
-1220   */
-1221  static class CollectAllVisitor extends 
CollectingVisitor {
-1222@Override
-1223void add(Result r) {
-1224  this.results.add(r);
-1225}
-1226  }
-1227
-1228  /**
-1229   * A Visitor that skips offline 
regions and split parents
-1230   */
-1231  public static abstract class 
DefaultVisitorBase implements Visitor {
-1232
-1233public DefaultVisitorBase() {
-1234  super();
-1235}
-1236
-1237public abstract boolean 
visitInternal(Result rowResult) throws IOException;
-1238
-1239@Override
-1240public boolean visit(Result 
r

[46/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html 
b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
index d0b68a0..0846203 100644
--- a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
+++ b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
@@ -2099,7 +2099,7 @@ public static 
 
 getTableStates
-public static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map getTableStates(Connection conn)
+public static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map getTableStates(Connection conn)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Fetch table states from META table
 
@@ -2118,7 +2118,7 @@ public static 
 
 updateTableState
-public static void updateTableState(Connection conn,
+public static void updateTableState(Connection conn,
 TableName tableName,
 TableState.State actual)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -2139,7 +2139,7 @@ public static 
 getTableState
 @Nullable
-public static TableState getTableState(Result r)
+public static TableState getTableState(Result r)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Decode table state from META Result.
  Should contain cell from HConstants.TABLE_FAMILY
@@ -2159,7 +2159,7 @@ public static 
 
 getRegionCount
-public static int getRegionCount(org.apache.hadoop.conf.Configuration c,
+public static int getRegionCount(org.apache.hadoop.conf.Configuration c,
  TableName tableName)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Count regions in hbase:meta for passed 
table.
@@ -2180,7 +2180,7 @@ public static 
 
 getRegionCount
-public static int getRegionCount(Connection connection,
+public static int getRegionCount(Connection connection,
  TableName tableName)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Count regions in hbase:meta for passed 
table.
@@ -2201,7 +2201,7 @@ public static 
 
 makePutFromRegionInfo
-public static Put makePutFromRegionInfo(RegionInfo regionInfo)
+public static Put makePutFromRegionInfo(RegionInfo regionInfo)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Generates and returns a Put containing the region into for 
the catalog table
 
@@ -2216,7 +2216,7 @@ public static 
 
 makePutFromRegionInfo
-public static Put makePutFromRegionInfo(RegionInfo regionInfo,
+public static Put makePutFromRegionInfo(RegionInfo regionInfo,
 long ts)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Generates and returns a Put containing the region into for 
the catalog table
@@ -2232,7 +2232,7 @@ public static 
 
 makeDeleteFromRegionInfo
-public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo)
+public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo)
 Generates and returns a Delete containing the region info 
for the catalog
  table
 
@@ -2243,7 +2243,7 @@ public static 
 
 makeDeleteFromRegionInfo
-public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo,
+public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo,
   long ts)
 Generates and returns a Delete containing the region info 
for the catalog
  table
@@ -2255,7 +2255,7 @@ public static 
 
 makeBarrierPut
-public static Put makeBarrierPut(byte[] encodedRegionName,
+public static Put makeBarrierPut(byte[] encodedRegionName,
  long seq,
  byte[] tableName)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -2271,7

[08/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 7515d7b..3c4825d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -762,7 +762,7 @@
 754boolean wasUp = 
this.clusterStatusTracker.isClusterUp();
 755if (!wasUp) 
this.clusterStatusTracker.setClusterUp();
 756
-757LOG.info("Server active/primary 
master=" + this.serverName +
+757LOG.info("Active/primary master=" + 
this.serverName +
 758", sessionid=0x" +
 759
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
 760", setting cluster-up flag (Was=" 
+ wasUp + ")");
@@ -1161,7 +1161,7 @@
 1153   startProcedureExecutor();
 1154
 1155   // Start log cleaner thread
-1156   int cleanerInterval = 
conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
+1156   int cleanerInterval = 
conf.getInt("hbase.master.cleaner.interval", 600 * 1000);
 1157   this.logCleaner =
 1158  new LogCleaner(cleanerInterval,
 1159 this, conf, 
getMasterWalManager().getFileSystem(),
@@ -1227,2368 +1227,2369 @@
 1219procedureExecutor = new 
ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
 1220
configurationManager.registerObserver(procEnv);
 1221
-1222final int numThreads = 
conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
-1223
Math.max(Runtime.getRuntime().availableProcessors(),
-1224  
MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
-1225final boolean abortOnCorruption = 
conf.getBoolean(
-1226
MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
-1227
MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
-1228procedureStore.start(numThreads);
-1229procedureExecutor.start(numThreads, 
abortOnCorruption);
-1230
procEnv.getRemoteDispatcher().start();
-1231  }
-1232
-1233  private void stopProcedureExecutor() 
{
-1234if (procedureExecutor != null) {
-1235  
configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
-1236  
procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
-1237  procedureExecutor.stop();
-1238  procedureExecutor.join();
-1239  procedureExecutor = null;
-1240}
-1241
-1242if (procedureStore != null) {
-1243  
procedureStore.stop(isAborted());
-1244  procedureStore = null;
-1245}
-1246  }
-1247
-1248  private void stopChores() {
-1249if (this.expiredMobFileCleanerChore 
!= null) {
-1250  
this.expiredMobFileCleanerChore.cancel(true);
-1251}
-1252if (this.mobCompactChore != null) 
{
-1253  
this.mobCompactChore.cancel(true);
-1254}
-1255if (this.balancerChore != null) {
-1256  this.balancerChore.cancel(true);
-1257}
-1258if (this.normalizerChore != null) 
{
-1259  
this.normalizerChore.cancel(true);
-1260}
-1261if (this.clusterStatusChore != null) 
{
-1262  
this.clusterStatusChore.cancel(true);
-1263}
-1264if (this.catalogJanitorChore != 
null) {
-1265  
this.catalogJanitorChore.cancel(true);
-1266}
-1267if (this.clusterStatusPublisherChore 
!= null){
-1268  
clusterStatusPublisherChore.cancel(true);
-1269}
-1270if (this.mobCompactThread != null) 
{
-1271  this.mobCompactThread.close();
-1272}
-1273
-1274if (this.quotaObserverChore != null) 
{
-1275  quotaObserverChore.cancel();
-1276}
-1277if (this.snapshotQuotaChore != null) 
{
-1278  snapshotQuotaChore.cancel();
-1279}
-1280  }
-1281
-1282  /**
-1283   * @return Get remote side's 
InetAddress
-1284   */
-1285  InetAddress getRemoteInetAddress(final 
int port,
-1286  final long serverStartCode) throws 
UnknownHostException {
-1287// Do it out here in its own little 
method so can fake an address when
-1288// mocking up in tests.
-1289InetAddress ia = 
RpcServer.getRemoteIp();
-1290
-1291// The call could be from the local 
regionserver,
-1292// in which case, there is no remote 
address.
-1293if (ia == null && 
serverStartCode == startcode) {
-1294  InetSocketAddress isa = 
rpcServices.getSocketAddress();
-1295  if (isa != null && 
isa.getPort() == port) {
-1296ia = isa.getAddress();
-1297  }
-1298}
-1299return ia;
-1300  }
-1301
-1302  /**
-1303   * @return Maximum time we should run 
balancer for
-1304   */
-1305  private int getMaxBalancingTime() {
-1306int maxBalancingTime = 
getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, -1);
-1307if (maxBalancingTime == -1) {
-1308  // if max balancing time isn't 
set, defaulting it to period time
-1309  ma

[05/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.Task.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
index 2939a56..681e263 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
@@ -61,602 +61,608 @@
 053import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
 054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 055import 
org.apache.hadoop.hbase.util.FSUtils;
-056import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import org.slf4j.Logger;
-059import org.slf4j.LoggerFactory;
-060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-061
-062/**
-063 * Distributes the task of log splitting 
to the available region servers.
-064 * Coordination happens via coordination 
engine. For every log file that has to be split a
-065 * task is created. SplitLogWorkers race 
to grab a task.
-066 *
-067 * 

SplitLogManager monitors the tasks that it creates using the -068 * timeoutMonitor thread. If a task's progress is slow then -069 * {@link SplitLogManagerCoordination#checkTasks} will take away the -070 * task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} -071 * and the task will be up for grabs again. When the task is done then it is -072 * deleted by SplitLogManager. -073 * -074 *

Clients call {@link #splitLogDistributed(Path)} to split a region server's -075 * log files. The caller thread waits in this method until all the log files -076 * have been split. -077 * -078 *

All the coordination calls made by this class are asynchronous. This is mainly -079 * to help reduce response time seen by the callers. -080 * -081 *

There is race in this design between the SplitLogManager and the -082 * SplitLogWorker. SplitLogManager might re-queue a task that has in reality -083 * already been completed by a SplitLogWorker. We rely on the idempotency of -084 * the log splitting task for correctness. -085 * -086 *

It is also assumed that every log splitting task is unique and once -087 * completed (either with success or with error) it will be not be submitted -088 * again. If a task is resubmitted then there is a risk that old "delete task" -089 * can delete the re-submission. -090 */ -091@InterfaceAudience.Private -092public class SplitLogManager { -093 private static final Logger LOG = LoggerFactory.getLogger(SplitLogManager.class); -094 -095 private final MasterServices server; -096 -097 private final Configuration conf; -098 private final ChoreService choreService; -099 -100 public static final int DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min -101 -102 private long unassignedTimeout; -103 private long lastTaskCreateTime = Long.MAX_VALUE; -104 -105 @VisibleForTesting -106 final ConcurrentMap tasks = new ConcurrentHashMap<>(); -107 private TimeoutMonitor timeoutMonitor; -108 -109 private volatile Set deadWorkers = null; -110 private final Object deadWorkersLock = new Object(); -111 -112 /** -113 * Its OK to construct this object even when region-servers are not online. It does lookup the -114 * orphan tasks in coordination engine but it doesn't block waiting for them to be done. -115 * @param master the master services -116 * @param conf the HBase configuration -117 * @throws IOException -118 */ -119 public SplitLogManager(MasterServices master, Configuration conf) -120 throws IOException { -121this.server = master; -122this.conf = conf; -123this.choreService = new ChoreService(master.getServerName() + "_splitLogManager_"); -124if (server.getCoordinatedStateManager() != null) { -125 SplitLogManagerCoordination coordination = getSplitLogManagerCoordination(); -126 Set failedDeletions = Collections.synchronizedSet(new HashSet()); -127 SplitLogManagerDetails details = new SplitLogManagerDetails(tasks, master, failedDeletions); -128 coordination.setDetails(details); -129 coordination.init(); -130} -131this.unassignedTimeout = -132 conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT); -133this.timeoutMonitor = -134new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000), -135master); -136 choreService.scheduleChore(timeoutMonitor); -137 } -138 -139 private SplitLogManagerCoordination getSplitLogManagerCoordination() { -140return server.getCoordinatedStateManager().getSplitLogManagerCoordination(); -141 } -142 -143 pri


[39/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.Action.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.Action.html 
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.Action.html
index 0e05a6e..f51f582 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.Action.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.Action.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static interface CleanerChore.Action
+private static interface CleanerChore.Action
 
 
 
@@ -149,7 +149,7 @@ var activeTableTab = "activeTableTab";
 
 
 act
-T act()
+T act()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
 
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
index 338c3f6..dfa4ac0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class CleanerChore.CleanerTask
+private class CleanerChore.CleanerTask
 extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/RecursiveTask.html?is-external=true";
 title="class or interface in java.util.concurrent">RecursiveTaskBoolean>
 
 
@@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/R
 
 
 dir
-private final org.apache.hadoop.fs.Path dir
+private final org.apache.hadoop.fs.Path dir
 
 
 
@@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/R
 
 
 root
-private final boolean root
+private final boolean root
 
 
 
@@ -283,7 +283,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/R
 
 
 CleanerTask
-CleanerTask(org.apache.hadoop.fs.FileStatus dir,
+CleanerTask(org.apache.hadoop.fs.FileStatus dir,
 boolean root)
 
 
@@ -293,7 +293,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/R
 
 
 CleanerTask
-CleanerTask(org.apache.hadoop.fs.Path dir,
+CleanerTask(org.apache.hadoop.fs.Path dir,
 boolean root)
 
 
@@ -311,7 +311,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/R
 
 
 compute
-protected http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true";
 title="class or interface in java.lang">Boolean compute()
+protected http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true";
 title="class or interface in java.lang">Boolean compute()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/RecursiveTask.html?is-external=true#compute--";
 title="class or interface in java.util.concurrent">compute in 
class http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/RecursiveTask.html?is-external=true";
 title="class or interface in java.util.concurrent">RecursiveTaskBoolean>
@@ -324,7 +324,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/R
 
 
 getFilteredStatus
-private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List getFilteredStatus(org.apache.hbase.thirdparty.com.google.common.base.Predicate function)
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List getFilteredStatus(org.apache.hbase.thirdparty.com.google.common.base.Predicate function)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Get FileStatus with filter.
  Pay attention that FSUtils #listStatusWithStatusFilter would return null,
@@ -345,7 +345,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/R
 
 
 deleteAction
-private boolean deleteAction(CleanerChore.Action

[16/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
index 77fb9b5..c4e8c8b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
@@ -165,520 +165,519 @@
 157E env = 
checkAndLoadInstance(implClass, priority, conf);
 158if (env != null) {
 159  
this.coprocEnvironments.add(env);
-160  LOG.info(
-161  "System coprocessor " + 
className + " was loaded " + "successfully with priority (" + priority + 
").");
-162  ++priority;
-163}
-164  } catch (Throwable t) {
-165// We always abort if system 
coprocessors cannot be loaded
-166abortServer(className, t);
-167  }
-168}
-169  }
-170
-171  /**
-172   * Load a coprocessor implementation 
into the host
-173   * @param path path to implementation 
jar
-174   * @param className the main class 
name
-175   * @param priority chaining priority
-176   * @param conf configuration for 
coprocessor
-177   * @throws java.io.IOException 
Exception
-178   */
-179  public E load(Path path, String 
className, int priority,
-180  Configuration conf) throws 
IOException {
-181String[] includedClassPrefixes = 
null;
-182if 
(conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){
-183  String prefixes = 
conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY);
-184  includedClassPrefixes = 
prefixes.split(";");
-185}
-186return load(path, className, 
priority, conf, includedClassPrefixes);
-187  }
-188
-189  /**
-190   * Load a coprocessor implementation 
into the host
-191   * @param path path to implementation 
jar
-192   * @param className the main class 
name
-193   * @param priority chaining priority
-194   * @param conf configuration for 
coprocessor
-195   * @param includedClassPrefixes class 
name prefixes to include
-196   * @throws java.io.IOException 
Exception
-197   */
-198  public E load(Path path, String 
className, int priority,
-199  Configuration conf, String[] 
includedClassPrefixes) throws IOException {
-200Class implClass;
-201LOG.debug("Loading coprocessor class 
" + className + " with path " +
-202path + " and priority " + 
priority);
-203
-204ClassLoader cl = null;
-205if (path == null) {
-206  try {
-207implClass = 
getClass().getClassLoader().loadClass(className);
-208  } catch (ClassNotFoundException e) 
{
-209throw new IOException("No jar 
path specified for " + className);
-210  }
-211} else {
-212  cl = 
CoprocessorClassLoader.getClassLoader(
-213path, 
getClass().getClassLoader(), pathPrefix, conf);
-214  try {
-215implClass = 
((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes);
-216  } catch (ClassNotFoundException e) 
{
-217throw new IOException("Cannot 
load external coprocessor class " + className, e);
-218  }
-219}
-220
-221//load custom code for coprocessor
-222Thread currentThread = 
Thread.currentThread();
-223ClassLoader hostClassLoader = 
currentThread.getContextClassLoader();
-224try{
-225  // switch temporarily to the thread 
classloader for custom CP
-226  
currentThread.setContextClassLoader(cl);
-227  E cpInstance = 
checkAndLoadInstance(implClass, priority, conf);
-228  return cpInstance;
-229} finally {
-230  // restore the fresh (host) 
classloader
-231  
currentThread.setContextClassLoader(hostClassLoader);
-232}
-233  }
-234
-235  @VisibleForTesting
-236  public void load(Class implClass, int priority, Configuration conf)
-237  throws IOException {
-238E env = 
checkAndLoadInstance(implClass, priority, conf);
-239coprocEnvironments.add(env);
-240  }
-241
-242  /**
-243   * @param implClass Implementation 
class
-244   * @param priority priority
-245   * @param conf configuration
-246   * @throws java.io.IOException 
Exception
-247   */
-248  public E 
checkAndLoadInstance(Class implClass, int priority, Configuration 
conf)
-249  throws IOException {
-250// create the instance
-251C impl;
-252try {
-253  impl = 
checkAndGetInstance(implClass);
-254  if (impl == null) {
-255LOG.error("Cannot load 
coprocessor " + implClass.getSimpleName());
-256return null;
-257  }
-258} catch 
(InstantiationException|IllegalAccessException e) {
-259  throw new IOException(e);
-260}
-261// create the environment
-262E env = createEnvironment(impl, 
priority, load

[48/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index fab5084..b1630e2 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -286,10 +286,10 @@
  Warnings
  Errors
 
-3524
+3527
 0
 0
-16593
+16583
 
 Files
 
@@ -2287,7 +2287,7 @@
 org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
 0
 0
-16
+15
 
 org/apache/hadoop/hbase/coprocessor/CoprocessorService.java
 0
@@ -4857,7 +4857,7 @@
 org/apache/hadoop/hbase/master/TableStateManager.java
 0
 0
-10
+9
 
 org/apache/hadoop/hbase/master/TestActiveMasterManager.java
 0
@@ -5187,7 +5187,7 @@
 org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
 0
 0
-2
+4
 
 org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java
 0
@@ -5667,4643 +5667,4648 @@
 org/apache/hadoop/hbase/net/Address.java
 0
 0
-3
+4
 
+org/apache/hadoop/hbase/net/TestAddress.java
+0
+0
+2
+
 org/apache/hadoop/hbase/nio/ByteBuff.java
 0
 0
 24
-
+
 org/apache/hadoop/hbase/nio/MultiByteBuff.java
 0
 0
 29
-
+
 org/apache/hadoop/hbase/nio/SingleByteBuff.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/procedure/Procedure.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/procedure/ProcedureCoordinatorRpcs.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/procedure/ProcedureManagerHost.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/procedure/ProcedureMember.java
 0
 0
 16
-
+
 org/apache/hadoop/hbase/procedure/ProcedureMemberRpcs.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/procedure/Subprocedure.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/procedure/TestProcedureCoordinator.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/procedure/TestProcedureDescriber.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure/TestProcedureMember.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/procedure/TestZKProcedure.java
 0
 0
 29
-
+
 org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/procedure/flush/FlushTableSubprocedure.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/procedure2/BadProcedureException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure2/LockAndQueue.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/procedure2/LockedResource.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/procedure2/Procedure.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/procedure2/ProcedureDeque.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/procedure2/ProcedureException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 0
 0
 29
-
+
 org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/procedure2/ProcedureUtil.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/procedure2/RootProcedureState.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/procedure2/TestProcedureToString.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/procedure2/store/ProcedureStor

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
index ad601c4..53e455f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
@@ -1117,1183 +1117,1186 @@
 1109  @Nullable
 1110  public static TableState 
getTableState(Connection conn, TableName tableName)
   throws IOException {
-1112Table metaHTable = 
getMetaHTable(conn);
-1113Get get = new 
Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
-1114long time = 
EnvironmentEdgeManager.currentTime();
-1115get.setTimeRange(0, time);
-1116Result result =
-1117metaHTable.get(get);
-1118return getTableState(result);
-1119  }
-1120
-1121  /**
-1122   * Fetch table states from META 
table
-1123   * @param conn connection to use
-1124   * @return map {tableName -> 
state}
-1125   * @throws IOException
-1126   */
-1127  public static Map getTableStates(Connection conn)
-1128  throws IOException {
-1129final Map states = new LinkedHashMap<>();
-1130Visitor collector = new Visitor() 
{
-1131  @Override
-1132  public boolean visit(Result r) 
throws IOException {
-1133TableState state = 
getTableState(r);
-1134if (state != null)
-1135  
states.put(state.getTableName(), state);
-1136return true;
-1137  }
-1138};
-1139fullScanTables(conn, collector);
-1140return states;
-1141  }
-1142
-1143  /**
-1144   * Updates state in META
-1145   * @param conn connection to use
-1146   * @param tableName table to look 
for
-1147   * @throws IOException
-1148   */
-1149  public static void 
updateTableState(Connection conn, TableName tableName,
-1150  TableState.State actual) throws 
IOException {
-1151updateTableState(conn, new 
TableState(tableName, actual));
-1152  }
-1153
-1154  /**
-1155   * Decode table state from META 
Result.
-1156   * Should contain cell from 
HConstants.TABLE_FAMILY
-1157   * @param r result
-1158   * @return null if not found
-1159   * @throws IOException
-1160   */
-1161  @Nullable
-1162  public static TableState 
getTableState(Result r)
-1163  throws IOException {
-1164Cell cell = 
r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
-1165if (cell == null) return null;
-1166try {
-1167  return 
TableState.parseFrom(TableName.valueOf(r.getRow()),
-1168  
Arrays.copyOfRange(cell.getValueArray(),
-1169  cell.getValueOffset(), 
cell.getValueOffset() + cell.getValueLength()));
-1170} catch (DeserializationException e) 
{
-1171  throw new IOException(e);
-1172}
-1173
-1174  }
-1175
-1176  /**
-1177   * Implementations 'visit' a catalog 
table row.
-1178   */
-1179  public interface Visitor {
-1180/**
-1181 * Visit the catalog table row.
-1182 * @param r A row from catalog 
table
-1183 * @return True if we are to proceed 
scanning the table, else false if
-1184 * we are to stop now.
-1185 */
-1186boolean visit(final Result r) throws 
IOException;
-1187  }
-1188
-1189  /**
-1190   * Implementations 'visit' a catalog 
table row but with close() at the end.
-1191   */
-1192  public interface CloseableVisitor 
extends Visitor, Closeable {
-1193  }
-1194
-1195  /**
-1196   * A {@link Visitor} that collects 
content out of passed {@link Result}.
-1197   */
-1198  static abstract class 
CollectingVisitor implements Visitor {
-1199final List results = new 
ArrayList<>();
-1200@Override
-1201public boolean visit(Result r) 
throws IOException {
-1202  if (r ==  null || r.isEmpty()) 
return true;
-1203  add(r);
-1204  return true;
-1205}
-1206
-1207abstract void add(Result r);
-1208
-1209/**
-1210 * @return Collected results; wait 
till visits complete to collect all
-1211 * possible results
-1212 */
-1213List getResults() {
-1214  return this.results;
-1215}
-1216  }
-1217
-1218  /**
-1219   * Collects all returned.
-1220   */
-1221  static class CollectAllVisitor extends 
CollectingVisitor {
-1222@Override
-1223void add(Result r) {
-1224  this.results.add(r);
-1225}
-1226  }
-1227
-1228  /**
-1229   * A Visitor that skips offline 
regions and split parents
-1230   */
-1231  public static abstract class 
DefaultVisitorBase implements Visitor {
-1232
-1233public DefaultVisitorBase() {
-1234  super();
-1235}
-1236
-1237public abstract boolean 
visitInternal(Result rowResult) throws IOException;
-1238
-1239

[03/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
index 2939a56..681e263 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
@@ -61,602 +61,608 @@
 053import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
 054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 055import 
org.apache.hadoop.hbase.util.FSUtils;
-056import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import org.slf4j.Logger;
-059import org.slf4j.LoggerFactory;
-060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-061
-062/**
-063 * Distributes the task of log splitting 
to the available region servers.
-064 * Coordination happens via coordination 
engine. For every log file that has to be split a
-065 * task is created. SplitLogWorkers race 
to grab a task.
-066 *
-067 * 

SplitLogManager monitors the tasks that it creates using the -068 * timeoutMonitor thread. If a task's progress is slow then -069 * {@link SplitLogManagerCoordination#checkTasks} will take away the -070 * task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} -071 * and the task will be up for grabs again. When the task is done then it is -072 * deleted by SplitLogManager. -073 * -074 *

Clients call {@link #splitLogDistributed(Path)} to split a region server's -075 * log files. The caller thread waits in this method until all the log files -076 * have been split. -077 * -078 *

All the coordination calls made by this class are asynchronous. This is mainly -079 * to help reduce response time seen by the callers. -080 * -081 *

There is race in this design between the SplitLogManager and the -082 * SplitLogWorker. SplitLogManager might re-queue a task that has in reality -083 * already been completed by a SplitLogWorker. We rely on the idempotency of -084 * the log splitting task for correctness. -085 * -086 *

It is also assumed that every log splitting task is unique and once -087 * completed (either with success or with error) it will be not be submitted -088 * again. If a task is resubmitted then there is a risk that old "delete task" -089 * can delete the re-submission. -090 */ -091@InterfaceAudience.Private -092public class SplitLogManager { -093 private static final Logger LOG = LoggerFactory.getLogger(SplitLogManager.class); -094 -095 private final MasterServices server; -096 -097 private final Configuration conf; -098 private final ChoreService choreService; -099 -100 public static final int DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min -101 -102 private long unassignedTimeout; -103 private long lastTaskCreateTime = Long.MAX_VALUE; -104 -105 @VisibleForTesting -106 final ConcurrentMap tasks = new ConcurrentHashMap<>(); -107 private TimeoutMonitor timeoutMonitor; -108 -109 private volatile Set deadWorkers = null; -110 private final Object deadWorkersLock = new Object(); -111 -112 /** -113 * Its OK to construct this object even when region-servers are not online. It does lookup the -114 * orphan tasks in coordination engine but it doesn't block waiting for them to be done. -115 * @param master the master services -116 * @param conf the HBase configuration -117 * @throws IOException -118 */ -119 public SplitLogManager(MasterServices master, Configuration conf) -120 throws IOException { -121this.server = master; -122this.conf = conf; -123this.choreService = new ChoreService(master.getServerName() + "_splitLogManager_"); -124if (server.getCoordinatedStateManager() != null) { -125 SplitLogManagerCoordination coordination = getSplitLogManagerCoordination(); -126 Set failedDeletions = Collections.synchronizedSet(new HashSet()); -127 SplitLogManagerDetails details = new SplitLogManagerDetails(tasks, master, failedDeletions); -128 coordination.setDetails(details); -129 coordination.init(); -130} -131this.unassignedTimeout = -132 conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT); -133this.timeoutMonitor = -134new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000), -135master); -136 choreService.scheduleChore(timeoutMonitor); -137 } -138 -139 private SplitLogManagerCoordination getSplitLogManagerCoordination() { -140return server.getCoordinatedState


[25/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
index ad601c4..53e455f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
@@ -1117,1183 +1117,1186 @@
 1109  @Nullable
 1110  public static TableState 
getTableState(Connection conn, TableName tableName)
   throws IOException {
-1112Table metaHTable = 
getMetaHTable(conn);
-1113Get get = new 
Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
-1114long time = 
EnvironmentEdgeManager.currentTime();
-1115get.setTimeRange(0, time);
-1116Result result =
-1117metaHTable.get(get);
-1118return getTableState(result);
-1119  }
-1120
-1121  /**
-1122   * Fetch table states from META 
table
-1123   * @param conn connection to use
-1124   * @return map {tableName -> 
state}
-1125   * @throws IOException
-1126   */
-1127  public static Map getTableStates(Connection conn)
-1128  throws IOException {
-1129final Map states = new LinkedHashMap<>();
-1130Visitor collector = new Visitor() 
{
-1131  @Override
-1132  public boolean visit(Result r) 
throws IOException {
-1133TableState state = 
getTableState(r);
-1134if (state != null)
-1135  
states.put(state.getTableName(), state);
-1136return true;
-1137  }
-1138};
-1139fullScanTables(conn, collector);
-1140return states;
-1141  }
-1142
-1143  /**
-1144   * Updates state in META
-1145   * @param conn connection to use
-1146   * @param tableName table to look 
for
-1147   * @throws IOException
-1148   */
-1149  public static void 
updateTableState(Connection conn, TableName tableName,
-1150  TableState.State actual) throws 
IOException {
-1151updateTableState(conn, new 
TableState(tableName, actual));
-1152  }
-1153
-1154  /**
-1155   * Decode table state from META 
Result.
-1156   * Should contain cell from 
HConstants.TABLE_FAMILY
-1157   * @param r result
-1158   * @return null if not found
-1159   * @throws IOException
-1160   */
-1161  @Nullable
-1162  public static TableState 
getTableState(Result r)
-1163  throws IOException {
-1164Cell cell = 
r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
-1165if (cell == null) return null;
-1166try {
-1167  return 
TableState.parseFrom(TableName.valueOf(r.getRow()),
-1168  
Arrays.copyOfRange(cell.getValueArray(),
-1169  cell.getValueOffset(), 
cell.getValueOffset() + cell.getValueLength()));
-1170} catch (DeserializationException e) 
{
-1171  throw new IOException(e);
-1172}
-1173
-1174  }
-1175
-1176  /**
-1177   * Implementations 'visit' a catalog 
table row.
-1178   */
-1179  public interface Visitor {
-1180/**
-1181 * Visit the catalog table row.
-1182 * @param r A row from catalog 
table
-1183 * @return True if we are to proceed 
scanning the table, else false if
-1184 * we are to stop now.
-1185 */
-1186boolean visit(final Result r) throws 
IOException;
-1187  }
-1188
-1189  /**
-1190   * Implementations 'visit' a catalog 
table row but with close() at the end.
-1191   */
-1192  public interface CloseableVisitor 
extends Visitor, Closeable {
-1193  }
-1194
-1195  /**
-1196   * A {@link Visitor} that collects 
content out of passed {@link Result}.
-1197   */
-1198  static abstract class 
CollectingVisitor implements Visitor {
-1199final List results = new 
ArrayList<>();
-1200@Override
-1201public boolean visit(Result r) 
throws IOException {
-1202  if (r ==  null || r.isEmpty()) 
return true;
-1203  add(r);
-1204  return true;
-1205}
-1206
-1207abstract void add(Result r);
-1208
-1209/**
-1210 * @return Collected results; wait 
till visits complete to collect all
-1211 * possible results
-1212 */
-1213List getResults() {
-1214  return this.results;
-1215}
-1216  }
-1217
-1218  /**
-1219   * Collects all returned.
-1220   */
-1221  static class CollectAllVisitor extends 
CollectingVisitor {
-1222@Override
-1223void add(Result r) {
-1224  this.results.add(r);
-1225}
-1226  }
-1227
-1228  /**
-1229   * A Visitor that skips offline 
regions and split parents
-1230   */
-1231  public static abstract class 
DefaultVisitorBase implements Visitor {
-1232
-1233public DefaultVisitorBase() {
-1234  super();
-1235}
-1236
-1237public abstract boolean 
visitInternal(Result rowResult) throws IOException;
-1238
-1239@

[49/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/apidocs/src-html/org/apache/hadoop/hbase/net/Address.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/net/Address.html 
b/apidocs/src-html/org/apache/hadoop/hbase/net/Address.html
index 733f195..66aba38 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/net/Address.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/net/Address.html
@@ -25,74 +25,93 @@
 017 */
 018package org.apache.hadoop.hbase.net;
 019
-020import 
org.apache.yetus.audience.InterfaceAudience;
-021
-022import 
org.apache.hbase.thirdparty.com.google.common.net.HostAndPort;
-023
-024/**
-025 * An immutable type to hold a hostname 
and port combo, like an Endpoint
-026 * or java.net.InetSocketAddress (but 
without danger of our calling
-027 * resolve -- we do NOT want a resolve 
happening every time we want
-028 * to hold a hostname and port combo). 
This class is also <>.
-029 * 

In implementation this class is a facade over Guava's {@link HostAndPort}. -030 * We cannot have Guava classes in our API hence this Type. -031 */ -032@InterfaceAudience.Public -033public class Address implements Comparable

{ -034 private HostAndPort hostAndPort; -035 -036 private Address(HostAndPort hostAndPort) { -037this.hostAndPort = hostAndPort; -038 } -039 -040 public static Address fromParts(String hostname, int port) { -041return new Address(HostAndPort.fromParts(hostname, port)); -042 } -043 -044 public static Address fromString(String hostnameAndPort) { -045return new Address(HostAndPort.fromString(hostnameAndPort)); -046 } -047 -048 public String getHostname() { -049return this.hostAndPort.getHost(); -050 } -051 -052 public int getPort() { -053return this.hostAndPort.getPort(); -054 } -055 -056 @Override -057 public String toString() { -058return this.hostAndPort.toString(); -059 } -060 -061 @Override -062 // Don't use HostAndPort equals... It is wonky including -063 // ipv6 brackets -064 public boolean equals(Object other) { -065if (this == other) { -066 return true; -067} -068if (other instanceof Address) { -069 Address that = (Address)other; -070 return this.getHostname().equals(that.getHostname()) && -071 this.getPort() == that.getPort(); -072} -073return false; -074 } -075 -076 @Override -077 public int hashCode() { -078return this.getHostname().hashCode() ^ getPort(); -079 } -080 -081 @Override -082 public int compareTo(Address that) { -083int compare = this.getHostname().compareTo(that.getHostname()); -084if (compare != 0) return compare; -085return this.getPort() - that.getPort(); -086 } -087} +020import org.apache.commons.lang.StringUtils; +021import org.apache.yetus.audience.InterfaceAudience; +022 +023import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; +024 +025/** +026 * An immutable type to hold a hostname and port combo, like an Endpoint +027 * or java.net.InetSocketAddress (but without danger of our calling +028 * resolve -- we do NOT want a resolve happening every time we want +029 * to hold a hostname and port combo). This class is also <>. +030 *

In implementation this class is a facade over Guava's {@link HostAndPort}. +031 * We cannot have Guava classes in our API hence this Type. +032 */ +033@InterfaceAudience.Public +034public class Address implements Comparable

{ +035 private HostAndPort hostAndPort; +036 +037 private Address(HostAndPort hostAndPort) { +038this.hostAndPort = hostAndPort; +039 } +040 +041 public static Address fromParts(String hostname, int port) { +042return new Address(HostAndPort.fromParts(hostname, port)); +043 } +044 +045 public static Address fromString(String hostnameAndPort) { +046return new Address(HostAndPort.fromString(hostnameAndPort)); +047 } +048 +049 public String getHostname() { +050return this.hostAndPort.getHost(); +051 } +052 +053 public int getPort() { +054return this.hostAndPort.getPort(); +055 } +056 +057 @Override +058 public String toString() { +059return this.hostAndPort.toString(); +060 } +061 +062 /** +063 * If hostname is a.b.c and the port is 123, return a:123 instead of a.b.c:123. +064 * @return if host looks like it is resolved -- not an IP -- then strip the domain portion +065 * otherwise returns same as {@link #toString()}} +066 */ +067 public String toStringWithoutDomain() { +068String hostname = getHostname(); +069String [] parts = hostname.split("\\."); +070if (parts.length > 1) { +071 for (String part: parts) { +072if (!StringUtils.isNumeric(part)) { +073 return Address.fromParts(parts[0], getPort()).toString(); +074} +075 } +076} +077return toString(); +078 } +079 +080 @Override +081 // Don't use HostAndPort equals... It i

[06/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
index 2939a56..681e263 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
@@ -61,602 +61,608 @@
 053import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
 054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 055import 
org.apache.hadoop.hbase.util.FSUtils;
-056import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import org.slf4j.Logger;
-059import org.slf4j.LoggerFactory;
-060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-061
-062/**
-063 * Distributes the task of log splitting 
to the available region servers.
-064 * Coordination happens via coordination 
engine. For every log file that has to be split a
-065 * task is created. SplitLogWorkers race 
to grab a task.
-066 *
-067 * 

SplitLogManager monitors the tasks that it creates using the -068 * timeoutMonitor thread. If a task's progress is slow then -069 * {@link SplitLogManagerCoordination#checkTasks} will take away the -070 * task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} -071 * and the task will be up for grabs again. When the task is done then it is -072 * deleted by SplitLogManager. -073 * -074 *

Clients call {@link #splitLogDistributed(Path)} to split a region server's -075 * log files. The caller thread waits in this method until all the log files -076 * have been split. -077 * -078 *

All the coordination calls made by this class are asynchronous. This is mainly -079 * to help reduce response time seen by the callers. -080 * -081 *

There is race in this design between the SplitLogManager and the -082 * SplitLogWorker. SplitLogManager might re-queue a task that has in reality -083 * already been completed by a SplitLogWorker. We rely on the idempotency of -084 * the log splitting task for correctness. -085 * -086 *

It is also assumed that every log splitting task is unique and once -087 * completed (either with success or with error) it will be not be submitted -088 * again. If a task is resubmitted then there is a risk that old "delete task" -089 * can delete the re-submission. -090 */ -091@InterfaceAudience.Private -092public class SplitLogManager { -093 private static final Logger LOG = LoggerFactory.getLogger(SplitLogManager.class); -094 -095 private final MasterServices server; -096 -097 private final Configuration conf; -098 private final ChoreService choreService; -099 -100 public static final int DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min -101 -102 private long unassignedTimeout; -103 private long lastTaskCreateTime = Long.MAX_VALUE; -104 -105 @VisibleForTesting -106 final ConcurrentMap tasks = new ConcurrentHashMap<>(); -107 private TimeoutMonitor timeoutMonitor; -108 -109 private volatile Set deadWorkers = null; -110 private final Object deadWorkersLock = new Object(); -111 -112 /** -113 * Its OK to construct this object even when region-servers are not online. It does lookup the -114 * orphan tasks in coordination engine but it doesn't block waiting for them to be done. -115 * @param master the master services -116 * @param conf the HBase configuration -117 * @throws IOException -118 */ -119 public SplitLogManager(MasterServices master, Configuration conf) -120 throws IOException { -121this.server = master; -122this.conf = conf; -123this.choreService = new ChoreService(master.getServerName() + "_splitLogManager_"); -124if (server.getCoordinatedStateManager() != null) { -125 SplitLogManagerCoordination coordination = getSplitLogManagerCoordination(); -126 Set failedDeletions = Collections.synchronizedSet(new HashSet()); -127 SplitLogManagerDetails details = new SplitLogManagerDetails(tasks, master, failedDeletions); -128 coordination.setDetails(details); -129 coordination.init(); -130} -131this.unassignedTimeout = -132 conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT); -133this.timeoutMonitor = -134new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000), -135master); -136 choreService.scheduleChore(timeoutMonitor); -137 } -138 -139 private SplitLogManagerCoordination getSplitLogManagerCoordination() { -140return server.getCoordinatedState


[17/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
index 77fb9b5..c4e8c8b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
@@ -165,520 +165,519 @@
 157E env = 
checkAndLoadInstance(implClass, priority, conf);
 158if (env != null) {
 159  
this.coprocEnvironments.add(env);
-160  LOG.info(
-161  "System coprocessor " + 
className + " was loaded " + "successfully with priority (" + priority + 
").");
-162  ++priority;
-163}
-164  } catch (Throwable t) {
-165// We always abort if system 
coprocessors cannot be loaded
-166abortServer(className, t);
-167  }
-168}
-169  }
-170
-171  /**
-172   * Load a coprocessor implementation 
into the host
-173   * @param path path to implementation 
jar
-174   * @param className the main class 
name
-175   * @param priority chaining priority
-176   * @param conf configuration for 
coprocessor
-177   * @throws java.io.IOException 
Exception
-178   */
-179  public E load(Path path, String 
className, int priority,
-180  Configuration conf) throws 
IOException {
-181String[] includedClassPrefixes = 
null;
-182if 
(conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){
-183  String prefixes = 
conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY);
-184  includedClassPrefixes = 
prefixes.split(";");
-185}
-186return load(path, className, 
priority, conf, includedClassPrefixes);
-187  }
-188
-189  /**
-190   * Load a coprocessor implementation 
into the host
-191   * @param path path to implementation 
jar
-192   * @param className the main class 
name
-193   * @param priority chaining priority
-194   * @param conf configuration for 
coprocessor
-195   * @param includedClassPrefixes class 
name prefixes to include
-196   * @throws java.io.IOException 
Exception
-197   */
-198  public E load(Path path, String 
className, int priority,
-199  Configuration conf, String[] 
includedClassPrefixes) throws IOException {
-200Class implClass;
-201LOG.debug("Loading coprocessor class 
" + className + " with path " +
-202path + " and priority " + 
priority);
-203
-204ClassLoader cl = null;
-205if (path == null) {
-206  try {
-207implClass = 
getClass().getClassLoader().loadClass(className);
-208  } catch (ClassNotFoundException e) 
{
-209throw new IOException("No jar 
path specified for " + className);
-210  }
-211} else {
-212  cl = 
CoprocessorClassLoader.getClassLoader(
-213path, 
getClass().getClassLoader(), pathPrefix, conf);
-214  try {
-215implClass = 
((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes);
-216  } catch (ClassNotFoundException e) 
{
-217throw new IOException("Cannot 
load external coprocessor class " + className, e);
-218  }
-219}
-220
-221//load custom code for coprocessor
-222Thread currentThread = 
Thread.currentThread();
-223ClassLoader hostClassLoader = 
currentThread.getContextClassLoader();
-224try{
-225  // switch temporarily to the thread 
classloader for custom CP
-226  
currentThread.setContextClassLoader(cl);
-227  E cpInstance = 
checkAndLoadInstance(implClass, priority, conf);
-228  return cpInstance;
-229} finally {
-230  // restore the fresh (host) 
classloader
-231  
currentThread.setContextClassLoader(hostClassLoader);
-232}
-233  }
-234
-235  @VisibleForTesting
-236  public void load(Class implClass, int priority, Configuration conf)
-237  throws IOException {
-238E env = 
checkAndLoadInstance(implClass, priority, conf);
-239coprocEnvironments.add(env);
-240  }
-241
-242  /**
-243   * @param implClass Implementation 
class
-244   * @param priority priority
-245   * @param conf configuration
-246   * @throws java.io.IOException 
Exception
-247   */
-248  public E 
checkAndLoadInstance(Class implClass, int priority, Configuration 
conf)
-249  throws IOException {
-250// create the instance
-251C impl;
-252try {
-253  impl = 
checkAndGetInstance(implClass);
-254  if (impl == null) {
-255LOG.error("Cannot load 
coprocessor " + implClass.getSimpleName());
-256return null;
-257  }
-258} catch 
(InstantiationException|IllegalAccessException e) {
-259  throw new IOException(e);
-260}
-261// cre

[21/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
index b590002..9b2a580 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
@@ -560,806 +560,811 @@
 552return this;
 553  }
 554
-555  public ColumnFamilyDescriptorBuilder 
setValue(final Bytes key, final Bytes value) {
-556desc.setValue(key, value);
+555  public ColumnFamilyDescriptorBuilder 
setNewVersionBehavior(final boolean value) {
+556desc.setNewVersionBehavior(value);
 557return this;
 558  }
 559
-560  public ColumnFamilyDescriptorBuilder 
setValue(final byte[] key, final byte[] value) {
+560  public ColumnFamilyDescriptorBuilder 
setValue(final Bytes key, final Bytes value) {
 561desc.setValue(key, value);
 562return this;
 563  }
 564
-565  public ColumnFamilyDescriptorBuilder 
setValue(final String key, final String value) {
+565  public ColumnFamilyDescriptorBuilder 
setValue(final byte[] key, final byte[] value) {
 566desc.setValue(key, value);
 567return this;
 568  }
 569
-570  /**
-571   * An ModifyableFamilyDescriptor 
contains information about a column family such as the
-572   * number of versions, compression 
settings, etc.
-573   *
-574   * It is used as input when creating a 
table or adding a column.
-575   * TODO: make this package-private 
after removing the HColumnDescriptor
-576   */
-577  @InterfaceAudience.Private
-578  public static class 
ModifyableColumnFamilyDescriptor
-579  implements ColumnFamilyDescriptor, 
Comparable {
-580
-581// Column family name
-582private final byte[] name;
-583
-584// Column metadata
-585private final Map 
values = new HashMap<>();
-586
-587/**
-588 * A map which holds the 
configuration specific to the column family. The
-589 * keys of the map have the same 
names as config keys and override the
-590 * defaults with cf-specific 
settings. Example usage may be for compactions,
-591 * etc.
-592 */
-593private final Map configuration = new HashMap<>();
-594
-595/**
-596 * Construct a column descriptor 
specifying only the family name The other
-597 * attributes are defaulted.
-598 *
-599 * @param name Column family name. 
Must be 'printable' -- digit or
-600 * letter -- and may not contain a 
:
-601 * TODO: make this private after the 
HCD is removed.
-602 */
-603@InterfaceAudience.Private
-604public 
ModifyableColumnFamilyDescriptor(final byte[] name) {
-605  this(isLegalColumnFamilyName(name), 
getDefaultValuesBytes(), Collections.emptyMap());
-606}
-607
-608/**
-609 * Constructor. Makes a deep copy of 
the supplied descriptor.
-610 * TODO: make this private after the 
HCD is removed.
-611 * @param desc The descriptor.
-612 */
-613@InterfaceAudience.Private
-614public 
ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc) {
-615  this(desc.getName(), 
desc.getValues(), desc.getConfiguration());
-616}
-617
-618private 
ModifyableColumnFamilyDescriptor(byte[] name, Map values, 
Map config) {
-619  this.name = name;
-620  this.values.putAll(values);
-621  
this.configuration.putAll(config);
-622}
-623
-624@Override
-625public byte[] getName() {
-626  return Bytes.copy(name);
+570  public ColumnFamilyDescriptorBuilder 
setValue(final String key, final String value) {
+571desc.setValue(key, value);
+572return this;
+573  }
+574
+575  /**
+576   * An ModifyableFamilyDescriptor 
contains information about a column family such as the
+577   * number of versions, compression 
settings, etc.
+578   *
+579   * It is used as input when creating a 
table or adding a column.
+580   * TODO: make this package-private 
after removing the HColumnDescriptor
+581   */
+582  @InterfaceAudience.Private
+583  public static class 
ModifyableColumnFamilyDescriptor
+584  implements ColumnFamilyDescriptor, 
Comparable {
+585
+586// Column family name
+587private final byte[] name;
+588
+589// Column metadata
+590private final Map 
values = new HashMap<>();
+591
+592/**
+593 * A map which holds the 
configuration specific to the column family. The
+594 * keys of the map have 

[23/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
index ad601c4..53e455f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
@@ -1117,1183 +1117,1186 @@
 1109  @Nullable
 1110  public static TableState 
getTableState(Connection conn, TableName tableName)
   throws IOException {
-1112Table metaHTable = 
getMetaHTable(conn);
-1113Get get = new 
Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
-1114long time = 
EnvironmentEdgeManager.currentTime();
-1115get.setTimeRange(0, time);
-1116Result result =
-1117metaHTable.get(get);
-1118return getTableState(result);
-1119  }
-1120
-1121  /**
-1122   * Fetch table states from META 
table
-1123   * @param conn connection to use
-1124   * @return map {tableName -> 
state}
-1125   * @throws IOException
-1126   */
-1127  public static Map getTableStates(Connection conn)
-1128  throws IOException {
-1129final Map states = new LinkedHashMap<>();
-1130Visitor collector = new Visitor() 
{
-1131  @Override
-1132  public boolean visit(Result r) 
throws IOException {
-1133TableState state = 
getTableState(r);
-1134if (state != null)
-1135  
states.put(state.getTableName(), state);
-1136return true;
-1137  }
-1138};
-1139fullScanTables(conn, collector);
-1140return states;
-1141  }
-1142
-1143  /**
-1144   * Updates state in META
-1145   * @param conn connection to use
-1146   * @param tableName table to look 
for
-1147   * @throws IOException
-1148   */
-1149  public static void 
updateTableState(Connection conn, TableName tableName,
-1150  TableState.State actual) throws 
IOException {
-1151updateTableState(conn, new 
TableState(tableName, actual));
-1152  }
-1153
-1154  /**
-1155   * Decode table state from META 
Result.
-1156   * Should contain cell from 
HConstants.TABLE_FAMILY
-1157   * @param r result
-1158   * @return null if not found
-1159   * @throws IOException
-1160   */
-1161  @Nullable
-1162  public static TableState 
getTableState(Result r)
-1163  throws IOException {
-1164Cell cell = 
r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
-1165if (cell == null) return null;
-1166try {
-1167  return 
TableState.parseFrom(TableName.valueOf(r.getRow()),
-1168  
Arrays.copyOfRange(cell.getValueArray(),
-1169  cell.getValueOffset(), 
cell.getValueOffset() + cell.getValueLength()));
-1170} catch (DeserializationException e) 
{
-1171  throw new IOException(e);
-1172}
-1173
-1174  }
-1175
-1176  /**
-1177   * Implementations 'visit' a catalog 
table row.
-1178   */
-1179  public interface Visitor {
-1180/**
-1181 * Visit the catalog table row.
-1182 * @param r A row from catalog 
table
-1183 * @return True if we are to proceed 
scanning the table, else false if
-1184 * we are to stop now.
-1185 */
-1186boolean visit(final Result r) throws 
IOException;
-1187  }
-1188
-1189  /**
-1190   * Implementations 'visit' a catalog 
table row but with close() at the end.
-1191   */
-1192  public interface CloseableVisitor 
extends Visitor, Closeable {
-1193  }
-1194
-1195  /**
-1196   * A {@link Visitor} that collects 
content out of passed {@link Result}.
-1197   */
-1198  static abstract class 
CollectingVisitor implements Visitor {
-1199final List results = new 
ArrayList<>();
-1200@Override
-1201public boolean visit(Result r) 
throws IOException {
-1202  if (r ==  null || r.isEmpty()) 
return true;
-1203  add(r);
-1204  return true;
-1205}
-1206
-1207abstract void add(Result r);
-1208
-1209/**
-1210 * @return Collected results; wait 
till visits complete to collect all
-1211 * possible results
-1212 */
-1213List getResults() {
-1214  return this.results;
-1215}
-1216  }
-1217
-1218  /**
-1219   * Collects all returned.
-1220   */
-1221  static class CollectAllVisitor extends 
CollectingVisitor {
-1222@Override
-1223void add(Result r) {
-1224  this.results.add(r);
-1225}
-1226  }
-1227
-1228  /**
-1229   * A Visitor that skips offline 
regions and split parents
-1230   */
-1231  public static abstract class 
DefaultVisitorBase implements Visitor {
-1232
-1233public DefaultVisitorBase() {
-1234  super();
-1235}
-1236
-1237public abstract boolean 
visitInternal(Result rowResult) throws IOException;
-1238
-1239@Override
-1240public boolean visit(Result 
rowResult) throws IOException {
-1241 

[51/51] [partial] hbase-site git commit: Published site at .

Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/6674e3ab
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/6674e3ab
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/6674e3ab

Branch: refs/heads/asf-site
Commit: 6674e3ab76ca2c8b867fc92b94e07154326b8f7b
Parents: 250fddb
Author: jenkins 
Authored: Sun Feb 4 15:13:52 2018 +
Committer: jenkins 
Committed: Sun Feb 4 15:13:52 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apidocs/index-all.html  | 6 +
 .../client/ColumnFamilyDescriptorBuilder.html   |37 +-
 .../ColumnFamilyDescriptorBuilder.html  |18 +-
 .../org/apache/hadoop/hbase/net/Address.html|41 +-
 .../org/apache/hadoop/hbase/ChoreService.html   | 2 +-
 .../client/ColumnFamilyDescriptorBuilder.html   |  1471 +-
 .../org/apache/hadoop/hbase/net/Address.html|   155 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 24468 -
 checkstyle.rss  |56 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/allclasses-frame.html| 1 +
 devapidocs/allclasses-noframe.html  | 1 +
 devapidocs/constant-values.html |10 +-
 devapidocs/index-all.html   |26 +-
 .../MetaTableAccessor.CloseableVisitor.html | 2 +-
 .../MetaTableAccessor.CollectAllVisitor.html| 6 +-
 .../MetaTableAccessor.CollectingVisitor.html|12 +-
 .../MetaTableAccessor.DefaultVisitorBase.html   | 8 +-
 .../MetaTableAccessor.TableVisitorBase.html | 8 +-
 .../hadoop/hbase/MetaTableAccessor.Visitor.html | 4 +-
 .../apache/hadoop/hbase/MetaTableAccessor.html  |   108 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 ...uilder.ModifyableColumnFamilyDescriptor.html |   176 +-
 .../client/ColumnFamilyDescriptorBuilder.html   |37 +-
 .../ColumnFamilyDescriptorBuilder.html  |18 +-
 .../hadoop/hbase/client/package-tree.html   |24 +-
 ...essorHost.EnvironmentPriorityComparator.html | 6 +-
 .../CoprocessorHost.ObserverGetter.html | 2 +-
 .../CoprocessorHost.ObserverOperation.html  |16 +-
 ...ocessorHost.ObserverOperationWithResult.html |18 +-
 ...ssorHost.ObserverOperationWithoutResult.html |12 +-
 .../hbase/coprocessor/CoprocessorHost.html  |38 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../hadoop/hbase/ipc/NettyRpcConnection.html| 4 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../org/apache/hadoop/hbase/master/HMaster.html |   272 +-
 .../SplitLogManager.ResubmitDirective.html  |10 +-
 .../hbase/master/SplitLogManager.Task.html  |32 +-
 .../hbase/master/SplitLogManager.TaskBatch.html |14 +-
 .../SplitLogManager.TerminationStatus.html  |18 +-
 .../master/SplitLogManager.TimeoutMonitor.html  | 8 +-
 .../hadoop/hbase/master/SplitLogManager.html|54 +-
 ...signmentManager.RegionInTransitionChore.html | 6 +-
 ...ssignmentManager.RegionInTransitionStat.html |40 +-
 .../master/assignment/AssignmentManager.html|   246 +-
 .../master/cleaner/CleanerChore.Action.html | 4 +-
 .../cleaner/CleanerChore.CleanerTask.html   |18 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 ...rocedureDispatcher.AbstractRSRemoteCall.html |22 +-
 ...ocedureDispatcher.CloseRegionRemoteCall.html |14 +-
 ...ispatcher.CompatRemoteProcedureResolver.html |16 +-
 ...eDispatcher.ExecuteProceduresRemoteCall.html |20 +-
 ...rocedureDispatcher.OpenRegionRemoteCall.html |12 +-
 ...rocedureDispatcher.RegionCloseOperation.html |16 +-
 ...ProcedureDispatcher.RegionOpenOperation.html |16 +-
 .../RSProcedureDispatcher.RegionOperation.html  | 8 +-
 ...edureDispatcher.RemoteProcedureResolver.html | 8 +-
 .../RSProcedureDispatcher.ServerOperation.html  |12 +-
 .../master/procedure/RSProcedureDispatcher.html |10 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../org/apache/hadoop/hbase/net/Address.html|45 +-
 .../org/apache/hadoop

[35/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
index 99f1fad..747c594 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class RSRpcServices.ScannerListener
+private class RSRpcServices.ScannerListener
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements LeaseListener
 Instantiated as a scanner lease. If the lease times out, 
the scanner is
@@ -210,7 +210,7 @@ implements 
 
 scannerName
-private final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String scannerName
+private final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String scannerName
 
 
 
@@ -227,7 +227,7 @@ implements 
 
 ScannerListener
-ScannerListener(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String n)
+ScannerListener(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String n)
 
 
 
@@ -244,7 +244,7 @@ implements 
 
 leaseExpired
-public void leaseExpired()
+public void leaseExpired()
 Description copied from 
interface: LeaseListener
 When a lease expires, this method is called.
 



[41/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
 
b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
index 6e416db..225ac26 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static enum SplitLogManager.ResubmitDirective
+public static enum SplitLogManager.ResubmitDirective
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum
 
 
@@ -210,7 +210,7 @@ the order they are declared.
 
 
 CHECK
-public static final SplitLogManager.ResubmitDirective CHECK
+public static final SplitLogManager.ResubmitDirective CHECK
 
 
 
@@ -219,7 +219,7 @@ the order they are declared.
 
 
 FORCE
-public static final SplitLogManager.ResubmitDirective FORCE
+public static final SplitLogManager.ResubmitDirective FORCE
 
 
 
@@ -236,7 +236,7 @@ the order they are declared.
 
 
 values
-public static SplitLogManager.ResubmitDirective[] values()
+public static SplitLogManager.ResubmitDirective[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -256,7 +256,7 @@ for (SplitLogManager.ResubmitDirective c : 
SplitLogManager.ResubmitDirective.val
 
 
 valueOf
-public static SplitLogManager.ResubmitDirective valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static SplitLogManager.ResubmitDirective valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.Task.html 
b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
index c563b64..a3bb42d 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public static class SplitLogManager.Task
+public static class SplitLogManager.Task
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 in memory state of an active task.
 
@@ -253,7 +253,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 last_update
-public volatile long last_update
+public volatile long last_update
 
 
 
@@ -262,7 +262,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 last_version
-public volatile int last_version
+public volatile int last_version
 
 
 
@@ -271,7 +271,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 cur_worker_name
-public volatile ServerName cur_worker_name
+public volatile ServerName cur_worker_name
 
 
 
@@ -280,7 +280,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 batch
-public volatile SplitLogManager.TaskBatch 
batch
+public volatile SplitLogManager.TaskBatch 
batch
 
 
 
@@ -289,7 +289,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 status
-public volatile SplitLogManager.TerminationStatus status
+public volatile SplitLogManager.TerminationStatus status
 
 
 
@@ -298,7 +298,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 incarnation
-public volatile http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger incarnation
+public volatile http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger incarnation
 
 
 
@@ -307,7 +307,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 unforcedResubmits
-public final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/At

[33/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index fe6532c..90511ca 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -705,19 +705,19 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.ScanType
 org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.FlushType
 org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
 org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
 org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.regionserver.BloomType
 org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
-org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
 org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
+org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.html
index a9b8590..1a58d86 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.html
@@ -101,7 +101,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-MetricsWAL
+MetricsWAL, ReplicationSourceWALActionListener
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/regionserver/wal/class-use/WALActionsListener.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/class-use/WALActionsListener.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/class-use/WALActionsListener.html
index df45e2f..3fb814d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/class-use/WALActionsListener.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/class-use/WALActionsListener.html
@@ -87,6 +87,10 @@
  
 
 
+org.apache.hadoop.hbase.replication.regionserver
+ 
+
+
 org.apache.hadoop.hbase.wal
  
 
@@ -192,6 +196,26 @@
 
 
 
+
+
+
+Uses of WALActionsListener in org.apache.hadoop.hbase.replication.regionserver
+
+Classes in org.apache.hadoop.hbase.replication.regionserver
 that implement WALActionsListener 
+
+Modifier and Type
+Class and Description
+
+
+
+(package private) class 
+ReplicationSourceWALActionListener
+Used to receive new wals.
+
+
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-use.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-use.html
index 88796c2..08b3a68 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-use.html
@@ -97,6 +97,10 @@
  
 
 
+org.apache.hadoop.hbase.replication.regionserver
+ 
+
+
 org.apache.hadoop.hbase.wal
  
 
@@ -272,6 +276,23 @@
 
 
 
+
+
+
+
+Classes in org.apache.hadoop.hbase.regionserver.wal
 used by org.apache.hadoop.hbase.replication.regionserver 
+
+Class and Description
+
+
+
+WALActionsListener
+Get notification of WAL events.
+
+
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-

[34/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
index c381304..0eecb74 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RSRpcServices
+public class RSRpcServices
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements HBaseRPCErrorHandler, 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface,
 PriorityFunction, ConfigurationObserver
 Implements the regionserver RPC services.
@@ -926,7 +926,7 @@ implements 
 
 LOG
-protected static final org.slf4j.Logger LOG
+protected static final org.slf4j.Logger LOG
 
 
 
@@ -935,7 +935,7 @@ implements 
 
 REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS
 RPC scheduler to use for the region server.
 
 See Also:
@@ -949,7 +949,7 @@ implements 
 
 REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
-private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
+private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
 Minimum allowable time limit delta (in milliseconds) that 
can be enforced during scans. This
  configuration exists to prevent the scenario where a time limit is specified 
to be so
  restrictive that the time limit is reached immediately (before any cells are 
scanned).
@@ -965,7 +965,7 @@ implements 
 
 DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
-private static final long DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
+private static final long DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
 Default value of REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
 
 See Also:
@@ -979,7 +979,7 @@ implements 
 
 BATCH_ROWS_THRESHOLD_NAME
-static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BATCH_ROWS_THRESHOLD_NAME
+static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BATCH_ROWS_THRESHOLD_NAME
 Number of rows in a batch operation above which a warning 
will be logged.
 
 See Also:
@@ -993,7 +993,7 @@ implements 
 
 BATCH_ROWS_THRESHOLD_DEFAULT
-static final int BATCH_ROWS_THRESHOLD_DEFAULT
+static final int BATCH_ROWS_THRESHOLD_DEFAULT
 Default value of BATCH_ROWS_THRESHOLD_NAME
 
 See Also:
@@ -1007,7 +1007,7 @@ implements 
 
 RESERVOIR_ENABLED_KEY
-protected static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RESERVOIR_ENABLED_KEY
+protected static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RESERVOIR_ENABLED_KEY
 
 See Also:
 Constant
 Field Values
@@ -1020,7 +1020,7 @@ implements 
 
 requestCount
-final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">LongAdder requestCount
+final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">LongAdder requestCount
 
 
 
@@ -1029,7 +1029,7 @@ implements 
 
 rpcGetRequestCount
-final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">LongAdder rpcGetRequestCount
+final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">LongAdder rpcGetRequestCount
 
 
 
@@ -1038,7 +1038,7 @@ implements 
 
 rpcScanRequestCount
-final http://doc

[28/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
index ad601c4..53e455f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
@@ -1117,1183 +1117,1186 @@
 1109  @Nullable
 1110  public static TableState 
getTableState(Connection conn, TableName tableName)
   throws IOException {
-1112Table metaHTable = 
getMetaHTable(conn);
-1113Get get = new 
Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
-1114long time = 
EnvironmentEdgeManager.currentTime();
-1115get.setTimeRange(0, time);
-1116Result result =
-1117metaHTable.get(get);
-1118return getTableState(result);
-1119  }
-1120
-1121  /**
-1122   * Fetch table states from META 
table
-1123   * @param conn connection to use
-1124   * @return map {tableName -> 
state}
-1125   * @throws IOException
-1126   */
-1127  public static Map getTableStates(Connection conn)
-1128  throws IOException {
-1129final Map states = new LinkedHashMap<>();
-1130Visitor collector = new Visitor() 
{
-1131  @Override
-1132  public boolean visit(Result r) 
throws IOException {
-1133TableState state = 
getTableState(r);
-1134if (state != null)
-1135  
states.put(state.getTableName(), state);
-1136return true;
-1137  }
-1138};
-1139fullScanTables(conn, collector);
-1140return states;
-1141  }
-1142
-1143  /**
-1144   * Updates state in META
-1145   * @param conn connection to use
-1146   * @param tableName table to look 
for
-1147   * @throws IOException
-1148   */
-1149  public static void 
updateTableState(Connection conn, TableName tableName,
-1150  TableState.State actual) throws 
IOException {
-1151updateTableState(conn, new 
TableState(tableName, actual));
-1152  }
-1153
-1154  /**
-1155   * Decode table state from META 
Result.
-1156   * Should contain cell from 
HConstants.TABLE_FAMILY
-1157   * @param r result
-1158   * @return null if not found
-1159   * @throws IOException
-1160   */
-1161  @Nullable
-1162  public static TableState 
getTableState(Result r)
-1163  throws IOException {
-1164Cell cell = 
r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
-1165if (cell == null) return null;
-1166try {
-1167  return 
TableState.parseFrom(TableName.valueOf(r.getRow()),
-1168  
Arrays.copyOfRange(cell.getValueArray(),
-1169  cell.getValueOffset(), 
cell.getValueOffset() + cell.getValueLength()));
-1170} catch (DeserializationException e) 
{
-1171  throw new IOException(e);
-1172}
-1173
-1174  }
-1175
-1176  /**
-1177   * Implementations 'visit' a catalog 
table row.
-1178   */
-1179  public interface Visitor {
-1180/**
-1181 * Visit the catalog table row.
-1182 * @param r A row from catalog 
table
-1183 * @return True if we are to proceed 
scanning the table, else false if
-1184 * we are to stop now.
-1185 */
-1186boolean visit(final Result r) throws 
IOException;
-1187  }
-1188
-1189  /**
-1190   * Implementations 'visit' a catalog 
table row but with close() at the end.
-1191   */
-1192  public interface CloseableVisitor 
extends Visitor, Closeable {
-1193  }
-1194
-1195  /**
-1196   * A {@link Visitor} that collects 
content out of passed {@link Result}.
-1197   */
-1198  static abstract class 
CollectingVisitor implements Visitor {
-1199final List results = new 
ArrayList<>();
-1200@Override
-1201public boolean visit(Result r) 
throws IOException {
-1202  if (r ==  null || r.isEmpty()) 
return true;
-1203  add(r);
-1204  return true;
-1205}
-1206
-1207abstract void add(Result r);
-1208
-1209/**
-1210 * @return Collected results; wait 
till visits complete to collect all
-1211 * possible results
-1212 */
-1213List getResults() {
-1214  return this.results;
-1215}
-1216  }
-1217
-1218  /**
-1219   * Collects all returned.
-1220   */
-1221  static class CollectAllVisitor extends 
CollectingVisitor {
-1222@Override
-1223void add(Result r) {
-1224  this.results.add(r);
-1225}
-1226  }
-1227
-1228  /**
-1229   * A Visitor that skips offline 
regions and split parents
-1230   */
-1231  public static abstract class 
DefaultVisitorBase implements Visitor {
-1232
-1233public DefaultVisitorBase() {
-1234  super();
-1235}
-1236
-1237public abstract boolean 
visitInternal(Result rowResult) throws IOException;
-1238
-1239

[47/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 53a7406..5161981 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 ©2007 - 2018 The Apache Software Foundation
 
-  File: 3524,
- Errors: 16593,
+  File: 3527,
+ Errors: 16583,
  Warnings: 0,
  Infos: 0
   
@@ -6411,7 +6411,7 @@ under the License.
   0
 
 
-  10
+  9
 
   
   
@@ -6803,7 +6803,7 @@ under the License.
   0
 
 
-  14
+  1
 
   
   
@@ -11143,7 +11143,7 @@ under the License.
   0
 
 
-  2
+  4
 
   
   
@@ -21083,7 +21083,7 @@ under the License.
   0
 
 
-  16
+  15
 
   
   
@@ -22684,6 +22684,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.net.TestAddress.java";>org/apache/hadoop/hbase/net/TestAddress.java
+
+
+  0
+
+
+  0
+
+
+  2
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer.java";>org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
 
 
@@ -26352,6 +26366,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.security.visibility.VisibilityLabelsWithDeletesTestBase.java";>org/apache/hadoop/hbase/security/visibility/VisibilityLabelsWithDeletesTestBase.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.TableDescriptors.java";>org/apache/hadoop/hbase/TableDescriptors.java
 
 
@@ -39078,6 +39106,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALActionListener.java";>org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.querymatcher.TestNewVersionBehaviorTracker.java";>org/apache/hadoop/hbase/regionserver/querymatcher/TestNewVersionBehaviorTracker.java
 
 
@@ -39857,7 +39899,7 @@ under the License.
   0
 
 
-  3
+  4
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/coc.html
--
diff --git a/coc.html b/coc.html
index 62e95ee..3b1d91e 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org";>the priv
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-03
+  Last Published: 
2018-02-04
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 5606bc6..ff7bfb6 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @

[19/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
index 5844c3b..80259dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
@@ -159,7 +159,7 @@
 151}
 152Task task = 
findOrCreateOrphanTask(path);
 153if (task.isOrphan() && 
(task.incarnation.get() == 0)) {
-154  LOG.info("resubmitting unassigned 
orphan task " + path);
+154  LOG.info("Resubmitting unassigned 
orphan task " + path);
 155  // ignore failure to resubmit. The 
timeout-monitor will handle it later
 156  // albeit in a more crude fashion
 157  resubmitTask(path, task, FORCE);
@@ -210,7 +210,7 @@
 202  
SplitLogCounters.tot_mgr_resubmit_force.increment();
 203  version = -1;
 204}
-205LOG.info("resubmitting task " + 
path);
+205LOG.info("Resubmitting task " + 
path);
 206task.incarnation.incrementAndGet();
 207boolean result = resubmit(path, 
version);
 208if (!result) {
@@ -288,7 +288,7 @@
 280
SplitLogCounters.tot_mgr_rescan_deleted.increment();
 281  }
 282  
SplitLogCounters.tot_mgr_missing_state_in_delete.increment();
-283  LOG.debug("deleted task without in 
memory state " + path);
+283  LOG.debug("Deleted task without in 
memory state " + path);
 284  return;
 285}
 286synchronized (task) {
@@ -336,13 +336,13 @@
 328  }
 329
 330  private void createNodeSuccess(String 
path) {
-331LOG.debug("put up splitlog task at 
znode " + path);
+331LOG.debug("Put up splitlog task at 
znode " + path);
 332getDataSetWatch(path, zkretries);
 333  }
 334
 335  private void createNodeFailure(String 
path) {
 336// TODO the Manager should split the 
log locally instead of giving up
-337LOG.warn("failed to create task node" 
+ path);
+337LOG.warn("Failed to create task node 
" + path);
 338setDone(path, FAILURE);
 339  }
 340
@@ -368,15 +368,15 @@
 360data = 
ZKMetadata.removeMetaData(data);
 361SplitLogTask slt = 
SplitLogTask.parseFrom(data);
 362if (slt.isUnassigned()) {
-363  LOG.debug("task not yet acquired " 
+ path + " ver = " + version);
+363  LOG.debug("Task not yet acquired " 
+ path + ", ver=" + version);
 364  handleUnassignedTask(path);
 365} else if (slt.isOwned()) {
 366  heartbeat(path, version, 
slt.getServerName());
 367} else if (slt.isResigned()) {
-368  LOG.info("task " + path + " entered 
state: " + slt.toString());
+368  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 369  resubmitOrFail(path, FORCE);
 370} else if (slt.isDone()) {
-371  LOG.info("task " + path + " entered 
state: " + slt.toString());
+371  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 372  if (taskFinisher != null && 
!ZKSplitLog.isRescanNode(watcher, path)) {
 373if 
(taskFinisher.finish(slt.getServerName(), ZKSplitLog.getFileName(path)) == 
Status.DONE) {
 374  setDone(path, SUCCESS);
@@ -387,7 +387,7 @@
 379setDone(path, SUCCESS);
 380  }
 381} else if (slt.isErr()) {
-382  LOG.info("task " + path + " entered 
state: " + slt.toString());
+382  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 383  resubmitOrFail(path, CHECK);
 384} else {
 385  LOG.error(HBaseMarkers.FATAL, 
"logic error - unexpected zk state for path = "
@@ -403,7 +403,7 @@
 395  }
 396
 397  private void 
getDataSetWatchFailure(String path) {
-398LOG.warn("failed to set data watch " 
+ path);
+398LOG.warn("Failed to set data watch " 
+ path);
 399setDone(path, FAILURE);
 400  }
 401
@@ -412,7 +412,7 @@
 404if (task == null) {
 405  if 
(!ZKSplitLog.isRescanNode(watcher, path)) {
 406
SplitLogCounters.tot_mgr_unacquired_orphan_done.increment();
-407LOG.debug("unacquired orphan task 
is done " + path);
+407LOG.debug("Unacquired orphan task 
is done " + path);
 408  }
 409} else {
 410  synchronized (task) {
@@ -449,7 +449,7 @@
 441
 442  private Task 
findOrCreateOrphanTask(String path) {
 443return 
computeIfAbsent(details.getTasks(), path, Task::new, () -> {
-444  LOG.info("creating orphan task " + 
path);
+444  LOG.info("Creating orphan task " + 
path);
 445  
SplitLogCounters.tot_mgr_orphan_task_acquired.increment();
 446});
 447  }
@@ -458,7 +458,7 @@
 450Task task = 
findOrCreateOrphanTask(path);
 451  

[31/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.html
new file mode 100644
index 000..c32f3bb
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALActionListener.html
@@ -0,0 +1,439 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+ReplicationSourceWALActionListener (Apache HBase 3.0.0-SNAPSHOT 
API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":9,"i3":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.replication.regionserver
+Class 
ReplicationSourceWALActionListener
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALActionListener
+
+
+
+
+
+
+
+All Implemented Interfaces:
+WALActionsListener
+
+
+
+@InterfaceAudience.Private
+class ReplicationSourceWALActionListener
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+implements WALActionsListener
+Used to receive new wals.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private 
org.apache.hadoop.conf.Configuration
+conf 
+
+
+private ReplicationSourceManager
+manager 
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+ReplicationSourceWALActionListener(org.apache.hadoop.conf.Configuration conf,
+  ReplicationSourceManager manager) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Static Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+void
+postLogRoll(org.apache.hadoop.fs.Path oldPath,
+   org.apache.hadoop.fs.Path newPath)
+The WAL has been rolled.
+
+
+
+void
+preLogRoll(org.apache.hadoop.fs.Path oldPath,
+  org.apache.hadoop.fs.Path newPath)
+The WAL is going to be rolled.
+
+
+
+(package private) static void
+scopeWALEdits(WALKey logKey,
+ WALEdit logEdit,
+ org.apache.hadoop.conf.Configuration conf)
+Utility method used to set the correct scopes on each log 
key.
+
+
+
+void
+visitLogEntryBeforeWrite(WALKey logKey,
+WALEdit logEdit) 
+
+
+
+
+
+
+Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--";
 title="class or in

[42/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index f19ffcc..49b9aea 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -2583,7 +2583,7 @@ implements 
 
 stopProcedureExecutor
-private void stopProcedureExecutor()
+private void stopProcedureExecutor()
 
 
 
@@ -2592,7 +2592,7 @@ implements 
 
 stopChores
-private void stopChores()
+private void stopChores()
 
 
 
@@ -2601,7 +2601,7 @@ implements 
 
 getRemoteInetAddress
-http://docs.oracle.com/javase/8/docs/api/java/net/InetAddress.html?is-external=true";
 title="class or interface in java.net">InetAddress getRemoteInetAddress(int port,
+http://docs.oracle.com/javase/8/docs/api/java/net/InetAddress.html?is-external=true";
 title="class or interface in java.net">InetAddress getRemoteInetAddress(int port,
  long serverStartCode)
   throws http://docs.oracle.com/javase/8/docs/api/java/net/UnknownHostException.html?is-external=true";
 title="class or interface in java.net">UnknownHostException
 
@@ -2618,7 +2618,7 @@ implements 
 
 getMaxBalancingTime
-private int getMaxBalancingTime()
+private int getMaxBalancingTime()
 
 Returns:
 Maximum time we should run balancer for
@@ -2631,7 +2631,7 @@ implements 
 
 getMaxRegionsInTransition
-private int getMaxRegionsInTransition()
+private int getMaxRegionsInTransition()
 
 Returns:
 Maximum number of regions in transition
@@ -2644,7 +2644,7 @@ implements 
 
 balanceThrottling
-private void balanceThrottling(long nextBalanceStartTime,
+private void balanceThrottling(long nextBalanceStartTime,
int maxRegionsInTransition,
long cutoffTime)
 It first sleep to the next balance plan start time. 
Meanwhile, throttling by the max
@@ -2663,7 +2663,7 @@ implements 
 
 balance
-public boolean balance()
+public boolean balance()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -2677,7 +2677,7 @@ implements 
 
 balance
-public boolean balance(boolean force)
+public boolean balance(boolean force)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -2691,7 +2691,7 @@ implements 
 
 getRegionNormalizer
-public RegionNormalizer getRegionNormalizer()
+public RegionNormalizer getRegionNormalizer()
 
 Specified by:
 getRegionNormalizer in
 interface MasterServices
@@ -2706,7 +2706,7 @@ implements 
 
 normalizeRegions
-public boolean normalizeRegions()
+public boolean normalizeRegions()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Perform normalization of cluster (invoked by RegionNormalizerChore).
 
@@ -2725,7 +2725,7 @@ implements 
 
 getClientIdAuditPrefix
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getClientIdAuditPrefix()
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getClientIdAuditPrefix()
 
 Specified by:
 getClientIdAuditPrefix in
 interface MasterServices
@@ -2740,7 +2740,7 @@ implements 
 
 setCatalogJanitorEnabled
-public void setCatalogJanitorEnabled(boolean b)
+public void setCatalogJanitorEnabled(boolean b)
 Switch for the background CatalogJanitor thread.
  Used for testing.  The thread will continue to run.  It will just be a noop
  if disabled.
@@ -2756,7 +2756,7 @@ implements 
 
 mergeRegions
-public long mergeRegions(RegionInfo[] regionsToMerge,
+public long mergeRegions(RegionInfo[] regionsToMerge,
  boolean forcible,
  long nonceGroup,
  long nonce)
@@ -2784,7 +2784,7 @@ implements 
 
 splitRegion
-public long splitRegion(RegionInfo regionInfo,
+public long splitRegion(RegionInfo regionInfo,
 byte[] splitRow,
 long nonceGroup,
 long nonce)
@@ -2812,7 +2812,7 @@ implements 
 
 move
-public void move(byte[] encodedRegionName,
+public void move(byte[] encodedRegionName,
  byte[] destServerName)
   throws HBaseIOException
 
@@ -2827,7 +2827,7 @@ implements 
 
 createTable
-public long createTable(TableDescriptor tableDescriptor,
+public long createTable(TableDescriptor tableDescriptor,
 byte[][] splitKeys,

  1   2   >