hbase git commit: HBASE-19096 Add RowMutions batch support in AsyncTable

2017-11-28 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 8688da9e9 -> 0c4c39553


HBASE-19096 Add RowMutions batch support in AsyncTable

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0c4c3955
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0c4c3955
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0c4c3955

Branch: refs/heads/branch-2
Commit: 0c4c3955380e1927311a8f4b092e23532d2e795f
Parents: 8688da9
Author: Jerry He 
Authored: Tue Nov 28 18:41:23 2017 -0800
Committer: Jerry He 
Committed: Tue Nov 28 18:49:08 2017 -0800

--
 .../client/AsyncBatchRpcRetryingCaller.java |  36 +++--
 .../apache/hadoop/hbase/client/AsyncTable.java  |  12 +-
 .../hbase/client/MultiServerCallable.java   |  62 +++--
 .../hbase/shaded/protobuf/RequestConverter.java | 136 +++
 .../hbase/client/TestAsyncTableBatch.java   |  19 ++-
 5 files changed, 161 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c4c3955/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
index 2ae68c4..52eb821 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
@@ -29,6 +29,7 @@ import 
org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.IdentityHashMap;
 import java.util.List;
 import java.util.Map;
@@ -58,7 +59,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
@@ -232,27 +232,19 @@ class AsyncBatchRpcRetryingCaller {
   }
 
   private ClientProtos.MultiRequest buildReq(Map 
actionsByRegion,
-  List cells) throws IOException {
+  List cells, Map rowMutationsIndexMap) 
throws IOException {
 ClientProtos.MultiRequest.Builder multiRequestBuilder = 
ClientProtos.MultiRequest.newBuilder();
 ClientProtos.RegionAction.Builder regionActionBuilder = 
ClientProtos.RegionAction.newBuilder();
 ClientProtos.Action.Builder actionBuilder = 
ClientProtos.Action.newBuilder();
 ClientProtos.MutationProto.Builder mutationBuilder = 
ClientProtos.MutationProto.newBuilder();
 for (Map.Entry entry : actionsByRegion.entrySet()) {
-  // TODO: remove the extra for loop as we will iterate it in 
mutationBuilder.
-  if (!multiRequestBuilder.hasNonceGroup()) {
-for (Action action : entry.getValue().actions) {
-  if (action.hasNonce()) {
-
multiRequestBuilder.setNonceGroup(conn.getNonceGenerator().getNonceGroup());
-break;
-  }
-}
-  }
-  regionActionBuilder.clear();
-  regionActionBuilder.setRegion(
-RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, 
entry.getKey()));
-  regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(entry.getKey(),
-entry.getValue().actions, cells, regionActionBuilder, actionBuilder, 
mutationBuilder);
-  multiRequestBuilder.addRegionAction(regionActionBuilder.build());
+  long nonceGroup = conn.getNonceGenerator().getNonceGroup();
+  // multiRequestBuilder will be populated with region actions.
+  // rowMutationsIndexMap will be non-empty after the call if there is 
RowMutations in the
+  // action list.
+  RequestConverter.buildNoDataRegionActions(entry.getKey(),
+entry.getValue().actions, cells, multiRequestBuilder, 
regionActionBuilder, actionBuilder,
+mutationBuilder, nonceGroup, rowMutationsIndexMap);
 }
 return multiRequestBuilder.build();
   }
@@ -337,8 +329,12 @@ class AsyncBatchRpcRetryingCaller {
   }
   ClientProtos.MultiRequest req;
   List cells = new ArrayList<>();
+  // Map from a created RegionAction to the original index for a 
RowMutations 

hbase git commit: HBASE-19096 Add RowMutions batch support in AsyncTable

2017-11-28 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 93b91e2cc -> e67a3699c


HBASE-19096 Add RowMutions batch support in AsyncTable

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e67a3699
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e67a3699
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e67a3699

Branch: refs/heads/master
Commit: e67a3699c463a9f222e5d1319d35994fea2a153d
Parents: 93b91e2
Author: Jerry He 
Authored: Tue Nov 28 18:41:23 2017 -0800
Committer: Jerry He 
Committed: Tue Nov 28 18:42:17 2017 -0800

--
 .../client/AsyncBatchRpcRetryingCaller.java |  36 +++--
 .../apache/hadoop/hbase/client/AsyncTable.java  |  12 +-
 .../hbase/client/MultiServerCallable.java   |  62 +++--
 .../hbase/shaded/protobuf/RequestConverter.java | 136 +++
 .../hbase/client/TestAsyncTableBatch.java   |  19 ++-
 5 files changed, 161 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e67a3699/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
index 2ae68c4..52eb821 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
@@ -29,6 +29,7 @@ import 
org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.IdentityHashMap;
 import java.util.List;
 import java.util.Map;
@@ -58,7 +59,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
@@ -232,27 +232,19 @@ class AsyncBatchRpcRetryingCaller {
   }
 
   private ClientProtos.MultiRequest buildReq(Map 
actionsByRegion,
-  List cells) throws IOException {
+  List cells, Map rowMutationsIndexMap) 
throws IOException {
 ClientProtos.MultiRequest.Builder multiRequestBuilder = 
ClientProtos.MultiRequest.newBuilder();
 ClientProtos.RegionAction.Builder regionActionBuilder = 
ClientProtos.RegionAction.newBuilder();
 ClientProtos.Action.Builder actionBuilder = 
ClientProtos.Action.newBuilder();
 ClientProtos.MutationProto.Builder mutationBuilder = 
ClientProtos.MutationProto.newBuilder();
 for (Map.Entry entry : actionsByRegion.entrySet()) {
-  // TODO: remove the extra for loop as we will iterate it in 
mutationBuilder.
-  if (!multiRequestBuilder.hasNonceGroup()) {
-for (Action action : entry.getValue().actions) {
-  if (action.hasNonce()) {
-
multiRequestBuilder.setNonceGroup(conn.getNonceGenerator().getNonceGroup());
-break;
-  }
-}
-  }
-  regionActionBuilder.clear();
-  regionActionBuilder.setRegion(
-RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, 
entry.getKey()));
-  regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(entry.getKey(),
-entry.getValue().actions, cells, regionActionBuilder, actionBuilder, 
mutationBuilder);
-  multiRequestBuilder.addRegionAction(regionActionBuilder.build());
+  long nonceGroup = conn.getNonceGenerator().getNonceGroup();
+  // multiRequestBuilder will be populated with region actions.
+  // rowMutationsIndexMap will be non-empty after the call if there is 
RowMutations in the
+  // action list.
+  RequestConverter.buildNoDataRegionActions(entry.getKey(),
+entry.getValue().actions, cells, multiRequestBuilder, 
regionActionBuilder, actionBuilder,
+mutationBuilder, nonceGroup, rowMutationsIndexMap);
 }
 return multiRequestBuilder.build();
   }
@@ -337,8 +329,12 @@ class AsyncBatchRpcRetryingCaller {
   }
   ClientProtos.MultiRequest req;
   List cells = new ArrayList<>();
+  // Map from a created RegionAction to the original index for a 
RowMutations within

hbase git commit: HBASE-17049 Do not issue sync request when there are still entries in ringbuffer

2017-11-28 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 e946d9d84 -> 8688da9e9


HBASE-17049 Do not issue sync request when there are still entries in ringbuffer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8688da9e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8688da9e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8688da9e

Branch: refs/heads/branch-2
Commit: 8688da9e9c4b54d5ccd22bc10eab5ac873325522
Parents: e946d9d
Author: zhangduo 
Authored: Fri Nov 24 21:26:30 2017 +0800
Committer: zhangduo 
Committed: Wed Nov 29 10:19:53 2017 +0800

--
 .../hadoop/hbase/regionserver/wal/AsyncFSWAL.java   | 16 ++--
 1 file changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8688da9e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index d4e113a..9baf803 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -463,12 +463,10 @@ public class AsyncFSWAL extends 
AbstractFSWAL {
   }
   return;
 }
-// we have some unsynced data but haven't reached the batch size yet
-if (!syncFutures.isEmpty()
-&& syncFutures.last().getTxid() > 
highestProcessedAppendTxidAtLastSync) {
-  // we have at least one sync request
-  sync(writer);
-}
+// reach here means that we have some unsynced data but haven't reached 
the batch size yet
+// but we will not issue a sync directly here even if there are sync 
requests because we may
+// have some new data in the ringbuffer, so let's just return here and 
delay the decision of
+// whether to issue a sync in the caller method.
   }
 
   private void consume() {
@@ -526,6 +524,12 @@ public class AsyncFSWAL extends AbstractFSWAL 
{
 // give up scheduling the consumer task.
 // 3. we set consumerScheduled to false and also give up scheduling 
consumer task.
 if (waitingConsumePayloadsGatingSequence.get() == 
waitingConsumePayloads.getCursor()) {
+  // we will give up consuming so if there are some unsynced data we 
need to issue a sync.
+  if (writer.getLength() > fileLengthAtLastSync && 
!syncFutures.isEmpty() &&
+  syncFutures.last().getTxid() > 
highestProcessedAppendTxidAtLastSync) {
+// no new data in the ringbuffer and we have at least one sync 
request
+sync(writer);
+  }
   return;
 } else {
   // maybe someone has grabbed this before us



hbase git commit: HBASE-17049 Do not issue sync request when there are still entries in ringbuffer

2017-11-28 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master f6582400b -> 93b91e2cc


HBASE-17049 Do not issue sync request when there are still entries in ringbuffer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/93b91e2c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/93b91e2c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/93b91e2c

Branch: refs/heads/master
Commit: 93b91e2cc69d7d8a8175b0059d92543929b0eabb
Parents: f658240
Author: zhangduo 
Authored: Fri Nov 24 21:26:30 2017 +0800
Committer: zhangduo 
Committed: Wed Nov 29 10:19:42 2017 +0800

--
 .../hadoop/hbase/regionserver/wal/AsyncFSWAL.java   | 16 ++--
 1 file changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/93b91e2c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index d4e113a..9baf803 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -463,12 +463,10 @@ public class AsyncFSWAL extends 
AbstractFSWAL {
   }
   return;
 }
-// we have some unsynced data but haven't reached the batch size yet
-if (!syncFutures.isEmpty()
-&& syncFutures.last().getTxid() > 
highestProcessedAppendTxidAtLastSync) {
-  // we have at least one sync request
-  sync(writer);
-}
+// reach here means that we have some unsynced data but haven't reached 
the batch size yet
+// but we will not issue a sync directly here even if there are sync 
requests because we may
+// have some new data in the ringbuffer, so let's just return here and 
delay the decision of
+// whether to issue a sync in the caller method.
   }
 
   private void consume() {
@@ -526,6 +524,12 @@ public class AsyncFSWAL extends AbstractFSWAL 
{
 // give up scheduling the consumer task.
 // 3. we set consumerScheduled to false and also give up scheduling 
consumer task.
 if (waitingConsumePayloadsGatingSequence.get() == 
waitingConsumePayloads.getCursor()) {
+  // we will give up consuming so if there are some unsynced data we 
need to issue a sync.
+  if (writer.getLength() > fileLengthAtLastSync && 
!syncFutures.isEmpty() &&
+  syncFutures.last().getTxid() > 
highestProcessedAppendTxidAtLastSync) {
+// no new data in the ringbuffer and we have at least one sync 
request
+sync(writer);
+  }
   return;
 } else {
   // maybe someone has grabbed this before us



hbase git commit: HBASE-19368 [nightly] Make xml test non-voting in branch-1.2

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 41b487795 -> 4fe4d755c


HBASE-19368 [nightly] Make xml test non-voting in branch-1.2


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4fe4d755
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4fe4d755
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4fe4d755

Branch: refs/heads/branch-1
Commit: 4fe4d755ce5007ab9160995355a510cb3062dee6
Parents: 41b4877
Author: Michael Stack 
Authored: Tue Nov 28 13:32:26 2017 -0800
Committer: Michael Stack 
Committed: Tue Nov 28 16:38:40 2017 -0800

--
 dev-support/Jenkinsfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4fe4d755/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 1f91582..c475430 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -49,7 +49,7 @@ pipeline {
 // output from surefire; sadly the archive function in yetus only works on 
file names.
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
-TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite'
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite,xml'
 BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
 EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
   }



hbase git commit: HBASE-19368 [nightly] Make xml test non-voting in branch-1.2

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 4face2b66 -> 5f0219b86


HBASE-19368 [nightly] Make xml test non-voting in branch-1.2


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5f0219b8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5f0219b8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5f0219b8

Branch: refs/heads/branch-1.4
Commit: 5f0219b86de69cdcf42e0e9f909bc5fd10757635
Parents: 4face2b
Author: Michael Stack 
Authored: Tue Nov 28 13:32:26 2017 -0800
Committer: Michael Stack 
Committed: Tue Nov 28 16:38:16 2017 -0800

--
 dev-support/Jenkinsfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5f0219b8/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 94616f7..995f67c 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -49,7 +49,7 @@ pipeline {
 // output from surefire; sadly the archive function in yetus only works on 
file names.
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
-TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite'
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite,xml'
 BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
 EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
   }



hbase git commit: HBASE-19368 [nightly] Make xml test non-voting in branch-1.2

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 b1912790f -> 604266f7b


HBASE-19368 [nightly] Make xml test non-voting in branch-1.2


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/604266f7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/604266f7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/604266f7

Branch: refs/heads/branch-1.3
Commit: 604266f7b75927704a520b3ac905327312383e9e
Parents: b191279
Author: Michael Stack 
Authored: Tue Nov 28 13:32:26 2017 -0800
Committer: Michael Stack 
Committed: Tue Nov 28 16:37:41 2017 -0800

--
 dev-support/Jenkinsfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/604266f7/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 94616f7..995f67c 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -49,7 +49,7 @@ pipeline {
 // output from surefire; sadly the archive function in yetus only works on 
file names.
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
-TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite'
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite,xml'
 BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
 EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
   }



hbase git commit: HBASE-19242 Add MOB compact support for AsyncAdmin

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 520b9efc2 -> f6582400b


HBASE-19242 Add MOB compact support for AsyncAdmin

Signed-off-by: Michael Stack 
Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f6582400
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f6582400
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f6582400

Branch: refs/heads/master
Commit: f6582400bec7a9b2450437efbfb7139f212e5631
Parents: 520b9ef
Author: Balazs Meszaros 
Authored: Thu Nov 23 14:42:39 2017 +0100
Committer: Michael Stack 
Committed: Tue Nov 28 15:03:50 2017 -0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   | 130 +--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  69 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  24 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  13 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 232 ---
 .../apache/hadoop/hbase/client/RegionInfo.java  |  11 +
 .../hbase/client/TestAsyncRegionAdminApi.java   |  38 +++
 7 files changed, 341 insertions(+), 176 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f6582400/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 6f1190e..d9f8e899 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -872,6 +872,33 @@ public interface Admin extends Abortable, Closeable {
 throws IOException;
 
   /**
+   * Compact a table.  Asynchronous operation in that this method requests 
that a
+   * Compaction run and then it returns. It does not wait on the completion of 
Compaction
+   * (it can take a while).
+   *
+   * @param tableName table to compact
+   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
+   * @throws IOException if a remote or network exception occurs
+   * @throws InterruptedException
+   */
+  void compact(TableName tableName, CompactType compactType)
+throws IOException, InterruptedException;
+
+  /**
+   * Compact a column family within a table.  Asynchronous operation in that 
this method
+   * requests that a Compaction run and then it returns. It does not wait on 
the
+   * completion of Compaction (it can take a while).
+   *
+   * @param tableName table to compact
+   * @param columnFamily column family within a table
+   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
+   * @throws IOException if not a mob column family or if a remote or network 
exception occurs
+   * @throws InterruptedException
+   */
+  void compact(TableName tableName, byte[] columnFamily, CompactType 
compactType)
+throws IOException, InterruptedException;
+
+  /**
* Major compact a table. Asynchronous operation in that this method requests
* that a Compaction run and then it returns. It does not wait on the 
completion of Compaction
* (it can take a while).
@@ -916,6 +943,33 @@ public interface Admin extends Abortable, Closeable {
 throws IOException;
 
   /**
+   * Major compact a table.  Asynchronous operation in that this method 
requests that a
+   * Compaction run and then it returns. It does not wait on the completion of 
Compaction
+   * (it can take a while).
+   *
+   * @param tableName table to compact
+   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
+   * @throws IOException if a remote or network exception occurs
+   * @throws InterruptedException
+   */
+  void majorCompact(TableName tableName, CompactType compactType)
+throws IOException, InterruptedException;
+
+  /**
+   * Major compact a column family within a table.  Asynchronous operation in 
that this method requests that a
+   * Compaction run and then it returns. It does not wait on the completion of 
Compaction
+   * (it can take a while).
+   *
+   * @param tableName table to compact
+   * @param columnFamily column family within a table
+   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
+   * @throws IOException if not a mob column family or if a remote or network 
exception occurs
+   * @throws InterruptedException
+   */
+  void majorCompact(TableName tableName, byte[] columnFamily, CompactType 
compactType)
+throws IOException, InterruptedException;
+
+  /**
* Compact all regions on the region server. Asynchronous operation in that 
this method requests
* that a Compaction run and then 

hbase git commit: HBASE-19242 Add MOB compact support for AsyncAdmin

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 144795684 -> e946d9d84


HBASE-19242 Add MOB compact support for AsyncAdmin

Signed-off-by: Michael Stack 
Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e946d9d8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e946d9d8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e946d9d8

Branch: refs/heads/branch-2
Commit: e946d9d841a72d41c0c7458667c7fedc495bd306
Parents: 1447956
Author: Balazs Meszaros 
Authored: Thu Nov 23 14:42:39 2017 +0100
Committer: Michael Stack 
Committed: Tue Nov 28 15:04:56 2017 -0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   | 130 +--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  69 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  24 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  13 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 232 ---
 .../apache/hadoop/hbase/client/RegionInfo.java  |  11 +
 .../hbase/client/TestAsyncRegionAdminApi.java   |  38 +++
 7 files changed, 341 insertions(+), 176 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e946d9d8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 6f1190e..d9f8e899 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -872,6 +872,33 @@ public interface Admin extends Abortable, Closeable {
 throws IOException;
 
   /**
+   * Compact a table.  Asynchronous operation in that this method requests 
that a
+   * Compaction run and then it returns. It does not wait on the completion of 
Compaction
+   * (it can take a while).
+   *
+   * @param tableName table to compact
+   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
+   * @throws IOException if a remote or network exception occurs
+   * @throws InterruptedException
+   */
+  void compact(TableName tableName, CompactType compactType)
+throws IOException, InterruptedException;
+
+  /**
+   * Compact a column family within a table.  Asynchronous operation in that 
this method
+   * requests that a Compaction run and then it returns. It does not wait on 
the
+   * completion of Compaction (it can take a while).
+   *
+   * @param tableName table to compact
+   * @param columnFamily column family within a table
+   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
+   * @throws IOException if not a mob column family or if a remote or network 
exception occurs
+   * @throws InterruptedException
+   */
+  void compact(TableName tableName, byte[] columnFamily, CompactType 
compactType)
+throws IOException, InterruptedException;
+
+  /**
* Major compact a table. Asynchronous operation in that this method requests
* that a Compaction run and then it returns. It does not wait on the 
completion of Compaction
* (it can take a while).
@@ -916,6 +943,33 @@ public interface Admin extends Abortable, Closeable {
 throws IOException;
 
   /**
+   * Major compact a table.  Asynchronous operation in that this method 
requests that a
+   * Compaction run and then it returns. It does not wait on the completion of 
Compaction
+   * (it can take a while).
+   *
+   * @param tableName table to compact
+   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
+   * @throws IOException if a remote or network exception occurs
+   * @throws InterruptedException
+   */
+  void majorCompact(TableName tableName, CompactType compactType)
+throws IOException, InterruptedException;
+
+  /**
+   * Major compact a column family within a table.  Asynchronous operation in 
that this method requests that a
+   * Compaction run and then it returns. It does not wait on the completion of 
Compaction
+   * (it can take a while).
+   *
+   * @param tableName table to compact
+   * @param columnFamily column family within a table
+   * @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
+   * @throws IOException if not a mob column family or if a remote or network 
exception occurs
+   * @throws InterruptedException
+   */
+  void majorCompact(TableName tableName, byte[] columnFamily, CompactType 
compactType)
+throws IOException, InterruptedException;
+
+  /**
* Compact all regions on the region server. Asynchronous operation in that 
this method requests
* that a Compaction run and 

hbase git commit: HBASE-18090 Improve TableSnapshotInputFormat to allow more multiple mappers per region

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 24d82195c -> 41b487795


HBASE-18090 Improve TableSnapshotInputFormat to allow more multiple mappers per 
region

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/41b48779
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/41b48779
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/41b48779

Branch: refs/heads/branch-1
Commit: 41b4877950fc3ff5cda10b1b25c398d75eab5677
Parents: 24d8219
Author: libisthanks 
Authored: Thu Nov 9 10:53:22 2017 +0800
Committer: Michael Stack 
Committed: Tue Nov 28 14:56:38 2017 -0800

--
 ...IntegrationTestTableSnapshotInputFormat.java |   4 +-
 .../hbase/client/ClientSideRegionScanner.java   |   2 +
 .../hadoop/hbase/mapred/TableMapReduceUtil.java |  38 +++
 .../hbase/mapred/TableSnapshotInputFormat.java  |  18 +++
 .../hbase/mapreduce/TableMapReduceUtil.java |  40 +++
 .../mapreduce/TableSnapshotInputFormat.java |  24 +++-
 .../mapreduce/TableSnapshotInputFormatImpl.java | 112 +--
 .../hadoop/hbase/util/RegionSplitter.java   |  70 
 .../mapred/TestTableSnapshotInputFormat.java|  41 ---
 .../TableSnapshotInputFormatTestBase.java   |  23 ++--
 .../mapreduce/TestTableSnapshotInputFormat.java |  35 --
 .../hadoop/hbase/util/TestRegionSplitter.java   |  22 +++-
 12 files changed, 381 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/41b48779/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
index 1a152e8..2df1c4b 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
@@ -151,7 +151,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions > 2 ? numRegions - 2 : numRegions;
 
   
org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions,
+tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions, 1,
 expectedNumSplits, false);
 } else if (mr.equalsIgnoreCase(MAPRED_IMPLEMENTATION)) {
   /*
@@ -165,7 +165,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions;
 
   
org.apache.hadoop.hbase.mapred.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions,
+tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions, 1,
 expectedNumSplits, false);
 } else {
   throw new IllegalArgumentException("Unrecognized mapreduce 
implementation: " + mr +".");

http://git-wip-us.apache.org/repos/asf/hbase/blob/41b48779/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index df118fa..c54baec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -55,6 +55,8 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
 // region is immutable, set isolation level
 scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
 
+htd.setReadOnly(true);
+
 // open region from the snapshot directory
 this.region = HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, 
null);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/41b48779/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index d5f225f..476c1a7 100644
--- 

hbase git commit: HBASE-18090 Improve TableSnapshotInputFormat to allow more multiple mappers per region

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 6aaa9dcfd -> 4face2b66


HBASE-18090 Improve TableSnapshotInputFormat to allow more multiple mappers per 
region

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4face2b6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4face2b6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4face2b6

Branch: refs/heads/branch-1.4
Commit: 4face2b662c5cee216ce7b710116bad23e8dfccc
Parents: 6aaa9dc
Author: libisthanks 
Authored: Thu Nov 9 10:53:22 2017 +0800
Committer: Michael Stack 
Committed: Tue Nov 28 14:56:06 2017 -0800

--
 ...IntegrationTestTableSnapshotInputFormat.java |   4 +-
 .../hbase/client/ClientSideRegionScanner.java   |   2 +
 .../hadoop/hbase/mapred/TableMapReduceUtil.java |  38 +++
 .../hbase/mapred/TableSnapshotInputFormat.java  |  18 +++
 .../hbase/mapreduce/TableMapReduceUtil.java |  40 +++
 .../mapreduce/TableSnapshotInputFormat.java |  24 +++-
 .../mapreduce/TableSnapshotInputFormatImpl.java | 112 +--
 .../hadoop/hbase/util/RegionSplitter.java   |  70 
 .../mapred/TestTableSnapshotInputFormat.java|  41 ---
 .../TableSnapshotInputFormatTestBase.java   |  23 ++--
 .../mapreduce/TestTableSnapshotInputFormat.java |  35 --
 .../hadoop/hbase/util/TestRegionSplitter.java   |  22 +++-
 12 files changed, 381 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4face2b6/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
index 1a152e8..2df1c4b 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
@@ -151,7 +151,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions > 2 ? numRegions - 2 : numRegions;
 
   
org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions,
+tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions, 1,
 expectedNumSplits, false);
 } else if (mr.equalsIgnoreCase(MAPRED_IMPLEMENTATION)) {
   /*
@@ -165,7 +165,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions;
 
   
org.apache.hadoop.hbase.mapred.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions,
+tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions, 1,
 expectedNumSplits, false);
 } else {
   throw new IllegalArgumentException("Unrecognized mapreduce 
implementation: " + mr +".");

http://git-wip-us.apache.org/repos/asf/hbase/blob/4face2b6/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index df118fa..c54baec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -55,6 +55,8 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
 // region is immutable, set isolation level
 scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
 
+htd.setReadOnly(true);
+
 // open region from the snapshot directory
 this.region = HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, 
null);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4face2b6/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index d5f225f..476c1a7 100644
--- 

hbase git commit: HBASE-18090 Improve TableSnapshotInputFormat to allow more multiple mappers per region

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 dca65353d -> b1912790f


HBASE-18090 Improve TableSnapshotInputFormat to allow more multiple mappers per 
region

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b1912790
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b1912790
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b1912790

Branch: refs/heads/branch-1.3
Commit: b1912790f9c057e51fd7e6dde924bfd0f6e784cc
Parents: dca6535
Author: libisthanks 
Authored: Thu Nov 9 10:53:22 2017 +0800
Committer: Michael Stack 
Committed: Tue Nov 28 14:50:40 2017 -0800

--
 ...IntegrationTestTableSnapshotInputFormat.java |   4 +-
 .../hbase/client/ClientSideRegionScanner.java   |   2 +
 .../hadoop/hbase/mapred/TableMapReduceUtil.java |  38 +++
 .../hbase/mapred/TableSnapshotInputFormat.java  |  18 +++
 .../hbase/mapreduce/TableMapReduceUtil.java |  40 +++
 .../mapreduce/TableSnapshotInputFormat.java |  24 +++-
 .../mapreduce/TableSnapshotInputFormatImpl.java | 112 +--
 .../hadoop/hbase/util/RegionSplitter.java   |  70 
 .../mapred/TestTableSnapshotInputFormat.java|  41 ---
 .../TableSnapshotInputFormatTestBase.java   |  23 ++--
 .../mapreduce/TestTableSnapshotInputFormat.java |  35 --
 .../hadoop/hbase/util/TestRegionSplitter.java   |  20 
 12 files changed, 380 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b1912790/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
index 1a152e8..2df1c4b 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
@@ -151,7 +151,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions > 2 ? numRegions - 2 : numRegions;
 
   
org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions,
+tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions, 1,
 expectedNumSplits, false);
 } else if (mr.equalsIgnoreCase(MAPRED_IMPLEMENTATION)) {
   /*
@@ -165,7 +165,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions;
 
   
org.apache.hadoop.hbase.mapred.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions,
+tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions, 1,
 expectedNumSplits, false);
 } else {
   throw new IllegalArgumentException("Unrecognized mapreduce 
implementation: " + mr +".");

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1912790/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index dde2f10..ef89c32 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -56,6 +56,8 @@ public class ClientSideRegionScanner extends 
AbstractClientScanner {
 // region is immutable, set isolation level
 scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
 
+htd.setReadOnly(true);
+
 // open region from the snapshot directory
 this.region = HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, 
null);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1912790/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index d5f225f..476c1a7 100644
--- 

hbase git commit: HBASE-19368 [nightly] Make xml test non-voting in branch-1.2

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 e1d695038 -> 690367fc4


HBASE-19368 [nightly] Make xml test non-voting in branch-1.2


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/690367fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/690367fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/690367fc

Branch: refs/heads/branch-1.2
Commit: 690367fc4cc5092fa62b2f08dc3a86803fba5122
Parents: e1d6950
Author: Michael Stack 
Authored: Tue Nov 28 13:32:26 2017 -0800
Committer: Michael Stack 
Committed: Tue Nov 28 13:32:26 2017 -0800

--
 dev-support/Jenkinsfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/690367fc/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 94616f7..995f67c 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -49,7 +49,7 @@ pipeline {
 // output from surefire; sadly the archive function in yetus only works on 
file names.
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
-TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite'
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite,xml'
 BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
 EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
   }



hbase git commit: HBASE-19363 Tests under TestCheckAndMutate are identical

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 4fef4cfc3 -> 144795684


HBASE-19363 Tests under TestCheckAndMutate are identical

Remove testCheckAndMutateUsingNewComparisonOperatorInstead

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/14479568
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/14479568
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/14479568

Branch: refs/heads/branch-2
Commit: 1447956846ae77d3e40cc3ca4285b92cfda12a32
Parents: 4fef4cf
Author: Peter Somogyi 
Authored: Tue Nov 28 14:55:01 2017 +0100
Committer: Michael Stack 
Committed: Tue Nov 28 12:19:13 2017 -0800

--
 .../hadoop/hbase/client/TestCheckAndMutate.java | 39 
 1 file changed, 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/14479568/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
index 5c39925..5e0791a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.client;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.filter.CompareFilter;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -46,17 +45,11 @@ public class TestCheckAndMutate {
   @Rule
   public TestName name = new TestName();
 
-  /**
-   * @throws java.lang.Exception
-   */
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
 TEST_UTIL.startMiniCluster();
   }
 
-  /**
-   * @throws java.lang.Exception
-   */
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
 TEST_UTIL.shutdownMiniCluster();
@@ -154,36 +147,4 @@ public class TestCheckAndMutate {
 }
   }
 
-  @Test
-  public void testCheckAndMutateUsingNewComparisonOperatorInstead() throws 
Throwable {
-try (Table table = createTable()) {
-  // put one row
-  putOneRow(table);
-  // get row back and assert the values
-  getOneRowAndAssertAllExist(table);
-
-  // put the same row again with C column deleted
-  RowMutations rm = makeRowMutationsWithColumnCDeleted();
-  boolean res = table.checkAndMutate(ROWKEY, FAMILY, Bytes.toBytes("A"),
-CompareOperator.EQUAL, Bytes.toBytes("a"), rm);
-  assertTrue(res);
-
-  // get row back and assert the values
-  getOneRowAndAssertAllButCExist(table);
-
-  //Test that we get a region level exception
-  try {
-rm = getBogusRowMutations();
-table.checkAndMutate(ROWKEY, FAMILY, Bytes.toBytes("A"), 
CompareOperator.EQUAL,
-  Bytes.toBytes("a"), rm);
-fail("Expected NoSuchColumnFamilyException");
-  } catch (RetriesExhaustedWithDetailsException e) {
-try {
-  throw e.getCause(0);
-} catch (NoSuchColumnFamilyException e1) {
-  // expected
-}
-  }
-}
-  }
 }



hbase git commit: HBASE-19363 Tests under TestCheckAndMutate are identical

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master b5a01685f -> 520b9efc2


HBASE-19363 Tests under TestCheckAndMutate are identical

Remove testCheckAndMutateUsingNewComparisonOperatorInstead

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/520b9efc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/520b9efc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/520b9efc

Branch: refs/heads/master
Commit: 520b9efc2de34b3b02ee86bad7ccb6e169788fc1
Parents: b5a0168
Author: Peter Somogyi 
Authored: Tue Nov 28 14:55:01 2017 +0100
Committer: Michael Stack 
Committed: Tue Nov 28 12:18:51 2017 -0800

--
 .../hadoop/hbase/client/TestCheckAndMutate.java | 39 
 1 file changed, 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/520b9efc/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
index 5c39925..5e0791a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.client;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.filter.CompareFilter;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -46,17 +45,11 @@ public class TestCheckAndMutate {
   @Rule
   public TestName name = new TestName();
 
-  /**
-   * @throws java.lang.Exception
-   */
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
 TEST_UTIL.startMiniCluster();
   }
 
-  /**
-   * @throws java.lang.Exception
-   */
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
 TEST_UTIL.shutdownMiniCluster();
@@ -154,36 +147,4 @@ public class TestCheckAndMutate {
 }
   }
 
-  @Test
-  public void testCheckAndMutateUsingNewComparisonOperatorInstead() throws 
Throwable {
-try (Table table = createTable()) {
-  // put one row
-  putOneRow(table);
-  // get row back and assert the values
-  getOneRowAndAssertAllExist(table);
-
-  // put the same row again with C column deleted
-  RowMutations rm = makeRowMutationsWithColumnCDeleted();
-  boolean res = table.checkAndMutate(ROWKEY, FAMILY, Bytes.toBytes("A"),
-CompareOperator.EQUAL, Bytes.toBytes("a"), rm);
-  assertTrue(res);
-
-  // get row back and assert the values
-  getOneRowAndAssertAllButCExist(table);
-
-  //Test that we get a region level exception
-  try {
-rm = getBogusRowMutations();
-table.checkAndMutate(ROWKEY, FAMILY, Bytes.toBytes("A"), 
CompareOperator.EQUAL,
-  Bytes.toBytes("a"), rm);
-fail("Expected NoSuchColumnFamilyException");
-  } catch (RetriesExhaustedWithDetailsException e) {
-try {
-  throw e.getCause(0);
-} catch (NoSuchColumnFamilyException e1) {
-  // expected
-}
-  }
-}
-  }
 }



hbase-thirdparty git commit: HBASE-19247 ADDENDUM - update .so relocation

2017-11-28 Thread mdrob
Repository: hbase-thirdparty
Updated Branches:
  refs/heads/master 3d4bc1986 -> 74176f758


HBASE-19247 ADDENDUM - update .so relocation


Project: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/commit/74176f75
Tree: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/tree/74176f75
Diff: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/diff/74176f75

Branch: refs/heads/master
Commit: 74176f7586287dd63399644c3028eab4dc697688
Parents: 3d4bc19
Author: Mike Drob 
Authored: Wed Nov 15 12:26:01 2017 -0600
Committer: Mike Drob 
Committed: Tue Nov 28 14:04:20 2017 -0600

--
 hbase-shaded-netty/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/74176f75/hbase-shaded-netty/pom.xml
--
diff --git a/hbase-shaded-netty/pom.xml b/hbase-shaded-netty/pom.xml
index 8f915f2..59ee5f9 100644
--- a/hbase-shaded-netty/pom.xml
+++ b/hbase-shaded-netty/pom.xml
@@ -125,8 +125,8 @@
 
 
-
+
 
 



hbase git commit: HBASE-19267 Remove compiler-plugin mapping executions as it breaks Java8 detection

2017-11-28 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-2 12efae482 -> 4fef4cfc3


HBASE-19267 Remove compiler-plugin mapping executions as it breaks Java8 
detection

It seems like the original reason this execution filter was added is no
longer an issue for 2.0. Actually, these entries actually preclude
Eclipse from correctly using the Java8 source/target version that we
have specified (which creates numerous compilation errors in Eclipse)

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4fef4cfc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4fef4cfc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4fef4cfc

Branch: refs/heads/branch-2
Commit: 4fef4cfc300b18879bdfae746aaaf623da1b0928
Parents: 12efae4
Author: Josh Elser 
Authored: Wed Nov 15 14:42:29 2017 -0500
Committer: Josh Elser 
Committed: Tue Nov 28 14:54:32 2017 -0500

--
 hbase-backup/pom.xml  | 13 -
 hbase-client/pom.xml  | 13 -
 hbase-common/pom.xml  | 13 -
 hbase-examples/pom.xml| 14 --
 hbase-external-blockcache/pom.xml | 13 -
 hbase-hadoop-compat/pom.xml   | 13 -
 hbase-hadoop2-compat/pom.xml  | 13 -
 hbase-http/pom.xml| 13 -
 hbase-it/pom.xml  | 13 -
 hbase-mapreduce/pom.xml   | 13 -
 hbase-replication/pom.xml | 13 -
 hbase-rsgroup/pom.xml | 13 -
 hbase-server/pom.xml  | 13 -
 hbase-shell/pom.xml   | 13 -
 hbase-thrift/pom.xml  | 13 -
 hbase-zookeeper/pom.xml   | 13 -
 16 files changed, 209 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4fef4cfc/hbase-backup/pom.xml
--
diff --git a/hbase-backup/pom.xml b/hbase-backup/pom.xml
index feec397..1b79624 100644
--- a/hbase-backup/pom.xml
+++ b/hbase-backup/pom.xml
@@ -61,19 +61,6 @@
   
 
   
-
-  
-org.apache.maven.plugins
-maven-compiler-plugin
-[3.2,)
-
-  compile
-
-  
-  
-
-  
-
   
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fef4cfc/hbase-client/pom.xml
--
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 0d386e2..da392f2 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -65,19 +65,6 @@
   
 
   
-
-  
-org.apache.maven.plugins
-maven-compiler-plugin
-[3.2,)
-
-  compile
-
-  
-  
-
-  
-
   
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fef4cfc/hbase-common/pom.xml
--
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 7e2a4f0..559a795 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -181,19 +181,6 @@
 
   
 
-
-  
-org.apache.maven.plugins
-maven-compiler-plugin
-[3.2,)
-
-  compile
-
-  
-  
-
-  
-
   
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fef4cfc/hbase-examples/pom.xml
--
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index ff67b4d..d0ad170 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -48,7 +48,6 @@
   
   
 maven-surefire-plugin
-${surefire.version}
 
   
@@ -97,19 +96,6 @@
 
   
 
-
-  
-org.apache.maven.plugins
-maven-compiler-plugin
-[3.2,)
-
-  compile
-
-

hbase git commit: HBASE-19351 Deprecated is missing in Table implementations

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 8b6f305ac -> 12efae482


HBASE-19351 Deprecated is missing in Table implementations

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12efae48
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12efae48
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12efae48

Branch: refs/heads/branch-2
Commit: 12efae4829830f920c5a7d47ea00f8f3c9c7c342
Parents: 8b6f305
Author: Peter Somogyi 
Authored: Mon Nov 27 17:09:56 2017 +0100
Committer: Michael Stack 
Committed: Tue Nov 28 11:18:07 2017 -0800

--
 .../java/org/apache/hadoop/hbase/client/HTable.java  |  8 ++--
 .../hadoop/hbase/rest/client/RemoteHTable.java   | 15 ---
 .../hadoop/hbase/regionserver/RegionAsTable.java |  7 +++
 .../TestRegionServerReadRequestMetrics.java  |  5 +++--
 4 files changed, 28 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/12efae48/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 61d955f..6030f86 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -233,6 +233,7 @@ public class HTable implements Table {
* {@inheritDoc}
*/
   @Override
+  @Deprecated
   public HTableDescriptor getTableDescriptor() throws IOException {
 HTableDescriptor htd = HBaseAdmin.getHTableDescriptor(tableName, 
connection, rpcCallerFactory,
   rpcControllerFactory, operationTimeoutMs, readRpcTimeoutMs);
@@ -745,7 +746,7 @@ public class HTable implements Table {
   final byte [] family, final byte [] qualifier, final byte [] value,
   final Put put)
   throws IOException {
-return checkAndPut(row, family, qualifier, CompareOp.EQUAL, value, put);
+return checkAndPut(row, family, qualifier, CompareOperator.EQUAL, value, 
put);
   }
 
   private boolean doCheckAndPut(final byte [] row, final byte [] family,
@@ -773,6 +774,7 @@ public class HTable implements Table {
* {@inheritDoc}
*/
   @Override
+  @Deprecated
   public boolean checkAndPut(final byte [] row, final byte [] family,
   final byte [] qualifier, final CompareOp compareOp, final byte [] value,
   final Put put)
@@ -799,7 +801,7 @@ public class HTable implements Table {
   @Override
   public boolean checkAndDelete(final byte [] row, final byte [] family, final 
byte [] qualifier,
   final byte [] value, final Delete delete) throws IOException {
-return checkAndDelete(row, family, qualifier, CompareOp.EQUAL, value, 
delete);
+return checkAndDelete(row, family, qualifier, CompareOperator.EQUAL, 
value, delete);
   }
 
   private boolean doCheckAndDelete(final byte [] row, final byte [] family,
@@ -845,6 +847,7 @@ public class HTable implements Table {
* {@inheritDoc}
*/
   @Override
+  @Deprecated
   public boolean checkAndDelete(final byte [] row, final byte [] family,
   final byte [] qualifier, final CompareOp compareOp, final byte [] value,
   final Delete delete)
@@ -919,6 +922,7 @@ public class HTable implements Table {
* {@inheritDoc}
*/
   @Override
+  @Deprecated
   public boolean checkAndMutate(final byte [] row, final byte [] family, final 
byte [] qualifier,
 final CompareOp compareOp, final byte [] value, final RowMutations rm)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/12efae48/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index 1eaaa65..39eb0a9 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -251,6 +251,7 @@ public class RemoteHTable implements Table {
   }
 
   @Override
+  @Deprecated
   public HTableDescriptor getTableDescriptor() throws IOException {
 StringBuilder sb = new StringBuilder();
 sb.append('/');
@@ -702,6 +703,7 @@ public class RemoteHTable implements Table {
   }
 
   @Override
+  @Deprecated
   public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
   CompareOp compareOp, byte[] value, Put put) throws IOException {
 throw new 

hbase git commit: HBASE-19351 Deprecated is missing in Table implementations

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master b75510284 -> b5a01685f


HBASE-19351 Deprecated is missing in Table implementations

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b5a01685
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b5a01685
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b5a01685

Branch: refs/heads/master
Commit: b5a01685f4aff0d97ec53a6eca26bd37dd9d9037
Parents: b755102
Author: Peter Somogyi 
Authored: Mon Nov 27 17:09:56 2017 +0100
Committer: Michael Stack 
Committed: Tue Nov 28 11:17:35 2017 -0800

--
 .../java/org/apache/hadoop/hbase/client/HTable.java  |  8 ++--
 .../hadoop/hbase/rest/client/RemoteHTable.java   | 15 ---
 .../hadoop/hbase/regionserver/RegionAsTable.java |  7 +++
 .../TestRegionServerReadRequestMetrics.java  |  5 +++--
 4 files changed, 28 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b5a01685/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 61d955f..6030f86 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -233,6 +233,7 @@ public class HTable implements Table {
* {@inheritDoc}
*/
   @Override
+  @Deprecated
   public HTableDescriptor getTableDescriptor() throws IOException {
 HTableDescriptor htd = HBaseAdmin.getHTableDescriptor(tableName, 
connection, rpcCallerFactory,
   rpcControllerFactory, operationTimeoutMs, readRpcTimeoutMs);
@@ -745,7 +746,7 @@ public class HTable implements Table {
   final byte [] family, final byte [] qualifier, final byte [] value,
   final Put put)
   throws IOException {
-return checkAndPut(row, family, qualifier, CompareOp.EQUAL, value, put);
+return checkAndPut(row, family, qualifier, CompareOperator.EQUAL, value, 
put);
   }
 
   private boolean doCheckAndPut(final byte [] row, final byte [] family,
@@ -773,6 +774,7 @@ public class HTable implements Table {
* {@inheritDoc}
*/
   @Override
+  @Deprecated
   public boolean checkAndPut(final byte [] row, final byte [] family,
   final byte [] qualifier, final CompareOp compareOp, final byte [] value,
   final Put put)
@@ -799,7 +801,7 @@ public class HTable implements Table {
   @Override
   public boolean checkAndDelete(final byte [] row, final byte [] family, final 
byte [] qualifier,
   final byte [] value, final Delete delete) throws IOException {
-return checkAndDelete(row, family, qualifier, CompareOp.EQUAL, value, 
delete);
+return checkAndDelete(row, family, qualifier, CompareOperator.EQUAL, 
value, delete);
   }
 
   private boolean doCheckAndDelete(final byte [] row, final byte [] family,
@@ -845,6 +847,7 @@ public class HTable implements Table {
* {@inheritDoc}
*/
   @Override
+  @Deprecated
   public boolean checkAndDelete(final byte [] row, final byte [] family,
   final byte [] qualifier, final CompareOp compareOp, final byte [] value,
   final Delete delete)
@@ -919,6 +922,7 @@ public class HTable implements Table {
* {@inheritDoc}
*/
   @Override
+  @Deprecated
   public boolean checkAndMutate(final byte [] row, final byte [] family, final 
byte [] qualifier,
 final CompareOp compareOp, final byte [] value, final RowMutations rm)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5a01685/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index 1eaaa65..39eb0a9 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -251,6 +251,7 @@ public class RemoteHTable implements Table {
   }
 
   @Override
+  @Deprecated
   public HTableDescriptor getTableDescriptor() throws IOException {
 StringBuilder sb = new StringBuilder();
 sb.append('/');
@@ -702,6 +703,7 @@ public class RemoteHTable implements Table {
   }
 
   @Override
+  @Deprecated
   public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
   CompareOp compareOp, byte[] value, Put put) throws IOException {
 throw new 

[2/2] hbase git commit: HBASE-19348 Fix error-prone errors for branch-1

2017-11-28 Thread apurtell
HBASE-19348 Fix error-prone errors for branch-1

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/24d82195
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/24d82195
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/24d82195

Branch: refs/heads/branch-1
Commit: 24d82195cb82bd90755408b85d70c57cf4b3ff2f
Parents: f7f425e
Author: Chia-Ping Tsai 
Authored: Tue Nov 28 03:46:14 2017 +0800
Committer: Andrew Purtell 
Committed: Mon Nov 27 18:29:11 2017 -0800

--
 ...MasterAnnotationReadingPriorityFunction.java |  10 +-
 .../master/procedure/ServerCrashProcedure.java  |   4 +-
 .../master/snapshot/CloneSnapshotHandler.java   |   1 -
 .../org/apache/hadoop/hbase/TestCompare.java|   1 +
 .../hadoop/hbase/TestHRegionLocation.java   |   1 +
 .../hbase/TestIPv6NIOServerSocketChannel.java   |   2 +-
 .../org/apache/hadoop/hbase/TestZooKeeper.java  |   4 +-
 .../hadoop/hbase/client/TestMetaScanner.java|   1 +
 .../client/TestScannersFromClientSide2.java |   1 +
 .../coprocessor/TestCoprocessorInterface.java   |   4 +-
 .../apache/hadoop/hbase/filter/TestFilter.java  |   1 +
 .../TestMasterOperationsForRegionReplicas.java  |   3 +-
 .../procedure/TestMasterProcedureScheduler.java |   2 +
 .../procedure/TestWALProcedureStoreOnHDFS.java  | 141 +--
 14 files changed, 85 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/24d82195/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
index 1e6dade..dc5d824 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
@@ -70,12 +70,10 @@ public class MasterAnnotationReadingPriorityFunction 
extends AnnotationReadingPr
   RegionServerStatusProtos.ReportRegionStateTransitionRequest
   tRequest = 
(RegionServerStatusProtos.ReportRegionStateTransitionRequest) param;
   for (RegionServerStatusProtos.RegionStateTransition rst : 
tRequest.getTransitionList()) {
-if (rst.getRegionInfoList() != null) {
-  for (HBaseProtos.RegionInfo info : rst.getRegionInfoList()) {
-TableName tn = ProtobufUtil.toTableName(info.getTableName());
-if (tn.isSystemTable()) {
-  return HConstants.SYSTEMTABLE_QOS;
-}
+for (HBaseProtos.RegionInfo info : rst.getRegionInfoList()) {
+  TableName tn = ProtobufUtil.toTableName(info.getTableName());
+  if (tn.isSystemTable()) {
+return HConstants.SYSTEMTABLE_QOS;
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/24d82195/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 3463000..1fbc428 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -183,8 +183,8 @@ implements ServerProcedureInterface {
   LOG.trace(state);
 }
 // Keep running count of cycles
-if (state.ordinal() != this.previousState) {
-  this.previousState = state.ordinal();
+if (state.getNumber() != this.previousState) {
+  this.previousState = state.getNumber();
   this.cycles = 0;
 } else {
   this.cycles++;

http://git-wip-us.apache.org/repos/asf/hbase/blob/24d82195/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
index 6f8bcd4..f4a6c95 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
@@ -123,7 +123,6 

[1/2] hbase git commit: HBASE-19348 Fix error-prone errors for branch-1

2017-11-28 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 f7f425e53 -> 24d82195c
  refs/heads/branch-1.4 36fae801a -> 6aaa9dcfd


HBASE-19348 Fix error-prone errors for branch-1

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6aaa9dcf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6aaa9dcf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6aaa9dcf

Branch: refs/heads/branch-1.4
Commit: 6aaa9dcfd6a349b9c5315309ec7d38849bc3e124
Parents: 36fae80
Author: Chia-Ping Tsai 
Authored: Tue Nov 28 03:46:14 2017 +0800
Committer: Andrew Purtell 
Committed: Mon Nov 27 18:29:02 2017 -0800

--
 ...MasterAnnotationReadingPriorityFunction.java |  10 +-
 .../master/procedure/ServerCrashProcedure.java  |   4 +-
 .../master/snapshot/CloneSnapshotHandler.java   |   1 -
 .../org/apache/hadoop/hbase/TestCompare.java|   1 +
 .../hadoop/hbase/TestHRegionLocation.java   |   1 +
 .../hbase/TestIPv6NIOServerSocketChannel.java   |   2 +-
 .../org/apache/hadoop/hbase/TestZooKeeper.java  |   4 +-
 .../hadoop/hbase/client/TestMetaScanner.java|   1 +
 .../client/TestScannersFromClientSide2.java |   1 +
 .../coprocessor/TestCoprocessorInterface.java   |   4 +-
 .../apache/hadoop/hbase/filter/TestFilter.java  |   1 +
 .../TestMasterOperationsForRegionReplicas.java  |   3 +-
 .../procedure/TestMasterProcedureScheduler.java |   2 +
 .../procedure/TestWALProcedureStoreOnHDFS.java  | 141 +--
 14 files changed, 85 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6aaa9dcf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
index 1e6dade..dc5d824 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
@@ -70,12 +70,10 @@ public class MasterAnnotationReadingPriorityFunction 
extends AnnotationReadingPr
   RegionServerStatusProtos.ReportRegionStateTransitionRequest
   tRequest = 
(RegionServerStatusProtos.ReportRegionStateTransitionRequest) param;
   for (RegionServerStatusProtos.RegionStateTransition rst : 
tRequest.getTransitionList()) {
-if (rst.getRegionInfoList() != null) {
-  for (HBaseProtos.RegionInfo info : rst.getRegionInfoList()) {
-TableName tn = ProtobufUtil.toTableName(info.getTableName());
-if (tn.isSystemTable()) {
-  return HConstants.SYSTEMTABLE_QOS;
-}
+for (HBaseProtos.RegionInfo info : rst.getRegionInfoList()) {
+  TableName tn = ProtobufUtil.toTableName(info.getTableName());
+  if (tn.isSystemTable()) {
+return HConstants.SYSTEMTABLE_QOS;
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6aaa9dcf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 3463000..1fbc428 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -183,8 +183,8 @@ implements ServerProcedureInterface {
   LOG.trace(state);
 }
 // Keep running count of cycles
-if (state.ordinal() != this.previousState) {
-  this.previousState = state.ordinal();
+if (state.getNumber() != this.previousState) {
+  this.previousState = state.getNumber();
   this.cycles = 0;
 } else {
   this.cycles++;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6aaa9dcf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
index 6f8bcd4..f4a6c95 100644
--- 

hbase git commit: HBASE-18233 We shouldn't wait for readlock in doMiniBatchMutation in case of deadlock (Allan Yang)

2017-11-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 e77c57874 -> e1d695038


HBASE-18233 We shouldn't wait for readlock in doMiniBatchMutation in case of 
deadlock (Allan Yang)

This patch plus a sorting of the batch (HBASE-17924) fixes a regression
in Increment/CheckAndPut-style operations.

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e1d69503
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e1d69503
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e1d69503

Branch: refs/heads/branch-1.2
Commit: e1d6950381daeb46f0218fc1de3256e88cfc0a1f
Parents: e77c578
Author: Michael Stack 
Authored: Tue Nov 28 09:14:58 2017 -0800
Committer: Michael Stack 
Committed: Tue Nov 28 09:15:04 2017 -0800

--
 .../hadoop/hbase/regionserver/HRegion.java  |  59 ++--
 .../hadoop/hbase/regionserver/Region.java   |   4 +-
 .../hadoop/hbase/client/TestMultiParallel.java  | 141 +++
 .../hbase/regionserver/TestAtomicOperation.java |   9 ++
 4 files changed, 203 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e1d69503/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 0e73d1c..30202a0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3051,18 +3051,29 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   continue;
 }
 
+
+//HBASE-18233
 // If we haven't got any rows in our batch, we should block to
-// get the next one.
+// get the next one's read lock. We need at least one row to mutate.
+// If we have got rows, do not block when lock is not available,
+// so that we can fail fast and go on with the rows with locks in
+// the batch. By doing this, we can reduce contention and prevent
+// possible deadlocks.
+// The unfinished rows in the batch will be detected in batchMutate,
+// and it wil try to finish them by calling doMiniBatchMutation again.
+boolean shouldBlock = numReadyToWrite == 0;
 RowLock rowLock = null;
 try {
-  rowLock = getRowLock(mutation.getRow(), true);
+  rowLock = getRowLock(mutation.getRow(), true, shouldBlock);
 } catch (IOException ioe) {
   LOG.warn("Failed getting lock in batch put, row="
 + Bytes.toStringBinary(mutation.getRow()), ioe);
 }
 if (rowLock == null) {
-  // We failed to grab another lock
-  break; // stop acquiring more rows for this batch
+  // We failed to grab another lock. Stop acquiring more rows for this
+  // batch and go on with the gotten ones
+  break;
+
 } else {
   acquiredRowLocks.add(rowLock);
 }
@@ -5138,7 +5149,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* Get an exclusive ( write lock ) lock on a given row.
* @param row Which row to lock.
* @return A locked RowLock. The lock is exclusive and already aqquired.
-   * @throws IOException
+   * @throws IOException if any error occurred
*/
   public RowLock getRowLock(byte[] row) throws IOException {
 return getRowLock(row, false);
@@ -5152,9 +5163,28 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* started (the calling thread has already acquired the region-close-guard 
lock).
* @param row The row actions will be performed against
* @param readLock is the lock reader or writer. True indicates that a 
non-exlcusive
-   * lock is requested
+   *  lock is requested
+   * @return A locked RowLock.
+   * @throws IOException if any error occurred
*/
   public RowLock getRowLock(byte[] row, boolean readLock) throws IOException {
+return getRowLock(row, readLock, true);
+  }
+
+  /**
+   *
+   * Get a row lock for the specified row. All locks are reentrant.
+   *
+   * Before calling this function make sure that a region operation has 
already been
+   * started (the calling thread has already acquired the region-close-guard 
lock).
+   * @param row The row actions will be performed against
+   * @param readLock is the lock reader or writer. True indicates that a 
non-exlcusive
+   *  lock is requested
+   * @param waitForLock whether should wait for 

hbase-site git commit: INFRA-10751 Empty commit

2017-11-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b9722a17b -> 83b248d3f


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/83b248d3
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/83b248d3
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/83b248d3

Branch: refs/heads/asf-site
Commit: 83b248d3f3e904ae63c310ac6da1e7f643c603cc
Parents: b9722a1
Author: jenkins 
Authored: Tue Nov 28 15:18:05 2017 +
Committer: jenkins 
Committed: Tue Nov 28 15:18:05 2017 +

--

--




[12/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */
-1392  public 

[09/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */

[06/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */
-1392  public 

[48/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index 9137fb5..7445164 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -1691,10 +1691,16 @@
 
 addFront(Procedure)
 - Method in class org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler
 
+addFront(IteratorProcedure)
 - Method in class org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler
+
 addFront(Procedure)
 - Method in interface org.apache.hadoop.hbase.procedure2.ProcedureScheduler
 
 Inserts the specified element at the front of this 
queue.
 
+addFront(IteratorProcedure)
 - Method in interface org.apache.hadoop.hbase.procedure2.ProcedureScheduler
+
+Inserts all elements in the iterator at the front of this 
queue.
+
 addGauge(String,
 Gauge?, MetricsRecordBuilder) - Method in class 
org.apache.hadoop.hbase.metrics.impl.HBaseMetrics2HadoopMetricsAdapter
 
 addGeneralBloomFilter(BloomFilterWriter)
 - Method in interface org.apache.hadoop.hbase.io.hfile.HFile.Writer
@@ -10178,6 +10184,11 @@
 
 checkConfiguredWALEntryFilters(ReplicationPeerConfig)
 - Method in class org.apache.hadoop.hbase.master.replication.ReplicationManager
 
+checkCoprocessorWithService(ListMasterCoprocessor,
 Class?) - Method in class org.apache.hadoop.hbase.master.MasterRpcServices
+
+Determines if there is a coprocessor implementation in the 
provided argument which extends
+ or implements the provided service.
+
 checkCoveringPermission(User,
 AccessController.OpType, RegionCoprocessorEnvironment, byte[], Mapbyte[], 
? extends Collection?, long, Permission.Action...) - 
Method in class org.apache.hadoop.hbase.security.access.AccessController
 
 Determine if cell ACLs covered by the operation grant 
access.
@@ -45041,7 +45052,7 @@
 
 getRegionState(String)
 - Method in class org.apache.hadoop.hbase.master.assignment.RegionStates
 
-getRegionState(Result,
 int) - Method in class org.apache.hadoop.hbase.master.assignment.RegionStateStore
+getRegionState(Result,
 int) - Static method in class 
org.apache.hadoop.hbase.master.assignment.RegionStateStore
 
 Pull the region state from a catalog table Result.
 
@@ -47126,6 +47137,8 @@
 
 getSizeOfCleaners()
 - Method in class org.apache.hadoop.hbase.master.cleaner.LogCleaner
 
+getSizeOfLatestPath()
 - Method in class org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager
+
 getSizeOfLogQueue()
 - Method in class org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource
 
 getSizeOfLogQueue()
 - Method in interface org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource
@@ -48376,7 +48389,10 @@
 Return a Set of filters supported by the Filter 
Language
 
 getSuspendedProcedures()
 - Method in class org.apache.hadoop.hbase.procedure2.ProcedureEvent
-
+
+Access to suspendedProcedures is 'synchronized' on this 
object, but it's fine to return it
+ here for tests.
+
 getSyncedEntries()
 - Method in class org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore.SyncMetrics
 
 getSyncedPerSec()
 - Method in class org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore.SyncMetrics
@@ -51813,6 +51829,11 @@
 
 hasAccess(User,
 TableName, Permission.Action) - Method in class 
org.apache.hadoop.hbase.security.access.TableAuthManager
 
+hasAccessControlServiceCoprocessor(MasterCoprocessorHost)
 - Method in class org.apache.hadoop.hbase.master.MasterRpcServices
+
+Determines if there is a MasterCoprocessor deployed which 
implements
+ AccessControlProtos.AccessControlService.Interface.
+
 hasActiveMaster()
 - Method in class org.apache.hadoop.hbase.master.ActiveMasterManager
 
 hasAdministratorAccess(ServletContext,
 HttpServletRequest, HttpServletResponse) - Static method in class 
org.apache.hadoop.hbase.http.HttpServer
@@ -52725,6 +52746,11 @@
 
 Indicates whether the current mob ref cell has a valid 
value.
 
+hasVisibilityLabelsServiceCoprocessor(MasterCoprocessorHost)
 - Method in class org.apache.hadoop.hbase.master.MasterRpcServices
+
+Determines if there is a MasterCoprocessor deployed which 
implements
+ VisibilityLabelsProtos.VisibilityLabelsService.Interface.
+
 hasWait()
 - Method in class org.apache.hadoop.hbase.regionserver.ServerNonceManager.OperationContext
 
 haveSameParent(Procedure?,
 Procedure?) - Static method in class 
org.apache.hadoop.hbase.procedure2.Procedure
@@ -65149,6 +65175,8 @@
 
 LOG - 
Static variable in class org.apache.hadoop.hbase.procedure2.Procedure
 
+LOG
 - Static variable in class org.apache.hadoop.hbase.procedure2.ProcedureEvent
+
 LOG
 - Static variable in class org.apache.hadoop.hbase.procedure2.ProcedureExecutor.CompletedProcedureCleaner
 
 LOG
 - Static variable in class org.apache.hadoop.hbase.procedure2.ProcedureExecutor
@@ -78068,11 +78096,11 @@ service.
 
 postClearCompactionQueues()
 - Method in 

[19/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.WALStoreLeaseRecovery.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.WALStoreLeaseRecovery.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.WALStoreLeaseRecovery.html
index 0b9d890..d852fd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.WALStoreLeaseRecovery.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.WALStoreLeaseRecovery.html
@@ -155,25 +155,25 @@
 147  }
 148
 149  public boolean 
waitInitialized(Procedure proc) {
-150return 
procSched.waitEvent(master.getInitializedEvent(), proc);
+150return 
master.getInitializedEvent().suspendIfNotReady(proc);
 151  }
 152
 153  public boolean 
waitServerCrashProcessingEnabled(Procedure proc) {
 154if (master instanceof HMaster) {
-155  return 
procSched.waitEvent(((HMaster)master).getServerCrashProcessingEnabledEvent(), 
proc);
+155  return 
((HMaster)master).getServerCrashProcessingEnabledEvent().suspendIfNotReady(proc);
 156}
 157return false;
 158  }
 159
 160  public boolean 
waitFailoverCleanup(Procedure proc) {
-161return 
procSched.waitEvent(master.getAssignmentManager().getFailoverCleanupEvent(), 
proc);
+161return 
master.getAssignmentManager().getFailoverCleanupEvent().suspendIfNotReady(proc);
 162  }
 163
 164  public void 
setEventReady(ProcedureEvent event, boolean isReady) {
 165if (isReady) {
-166  procSched.wakeEvent(event);
+166  event.wake(procSched);
 167} else {
-168  procSched.suspendEvent(event);
+168  event.suspend();
 169}
 170  }
 171

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
index 0b9d890..d852fd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
@@ -155,25 +155,25 @@
 147  }
 148
 149  public boolean 
waitInitialized(Procedure proc) {
-150return 
procSched.waitEvent(master.getInitializedEvent(), proc);
+150return 
master.getInitializedEvent().suspendIfNotReady(proc);
 151  }
 152
 153  public boolean 
waitServerCrashProcessingEnabled(Procedure proc) {
 154if (master instanceof HMaster) {
-155  return 
procSched.waitEvent(((HMaster)master).getServerCrashProcessingEnabledEvent(), 
proc);
+155  return 
((HMaster)master).getServerCrashProcessingEnabledEvent().suspendIfNotReady(proc);
 156}
 157return false;
 158  }
 159
 160  public boolean 
waitFailoverCleanup(Procedure proc) {
-161return 
procSched.waitEvent(master.getAssignmentManager().getFailoverCleanupEvent(), 
proc);
+161return 
master.getAssignmentManager().getFailoverCleanupEvent().suspendIfNotReady(proc);
 162  }
 163
 164  public void 
setEventReady(ProcedureEvent event, boolean isReady) {
 165if (isReady) {
-166  procSched.wakeEvent(event);
+166  event.wake(procSched);
 167} else {
-168  procSched.suspendEvent(event);
+168  event.suspend();
 169}
 170  }
 171

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
index e36d2ac..6175ecc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
@@ -26,298 +26,281 @@
 018
 019package 
org.apache.hadoop.hbase.procedure2;
 020
-021import 
java.util.concurrent.locks.Condition;
-022import 
java.util.concurrent.locks.ReentrantLock;
-023import java.util.concurrent.TimeUnit;
-024
-025import org.apache.commons.logging.Log;
-026import 
org.apache.commons.logging.LogFactory;
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028
-029@InterfaceAudience.Private
-030public abstract class 
AbstractProcedureScheduler implements ProcedureScheduler {
-031  private static final Log LOG = 
LogFactory.getLog(AbstractProcedureScheduler.class);
-032  private final ReentrantLock 
schedulerLock = new ReentrantLock();
-033  private final 

[51/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/b9722a17
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/b9722a17
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/b9722a17

Branch: refs/heads/asf-site
Commit: b9722a17b358503ee42f082e83b5f48e2fe5a588
Parents: 9f95f79
Author: jenkins 
Authored: Tue Nov 28 15:17:24 2017 +
Committer: jenkins 
Committed: Tue Nov 28 15:17:24 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 .../hbase/mapreduce/TableMapReduceUtil.html | 6 +-
 .../hbase/mapreduce/TableMapReduceUtil.html |   459 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 41576 +
 checkstyle.rss  |32 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html |12 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/index-all.html   |77 +-
 .../hadoop/hbase/backup/package-tree.html   | 6 +-
 .../hadoop/hbase/class-use/ServerName.html  |26 +
 .../hadoop/hbase/client/class-use/Result.html   | 2 +-
 .../hadoop/hbase/client/package-tree.html   |26 +-
 .../hbase/coprocessor/MasterObserver.html   |22 +-
 .../class-use/MasterCoprocessor.html|17 +
 .../class-use/MasterCoprocessorEnvironment.html | 4 +-
 .../coprocessor/class-use/ObserverContext.html  | 4 +-
 .../ExampleMasterObserverWithMetrics.html   | 2 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hbase/mapreduce/TableMapReduceUtil.html |16 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../hbase/master/MasterCoprocessorHost.html |20 +-
 .../MasterRpcServices.BalanceSwitchMode.html| 6 +-
 .../hadoop/hbase/master/MasterRpcServices.html  |   428 +-
 ...signmentManager.RegionInTransitionChore.html | 6 +-
 ...ssignmentManager.RegionInTransitionStat.html |40 +-
 .../master/assignment/AssignmentManager.html|   188 +-
 .../RegionStateStore.RegionStateVisitor.html| 4 +-
 .../master/assignment/RegionStateStore.html |60 +-
 .../RegionStates.AssignmentProcedureEvent.html  | 2 +-
 .../RegionStates.ServerReportEvent.html | 2 +-
 .../hbase/master/balancer/package-tree.html | 2 +-
 .../master/class-use/MasterCoprocessorHost.html |23 +
 .../master/class-use/RegionState.State.html | 2 +-
 .../hadoop/hbase/master/package-tree.html   | 6 +-
 .../procedure/MasterProcedureScheduler.html | 2 +-
 .../hadoop/hbase/monitoring/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |16 +-
 .../procedure2/AbstractProcedureScheduler.html  |   225 +-
 .../hadoop/hbase/procedure2/ProcedureEvent.html |   144 +-
 .../hbase/procedure2/ProcedureScheduler.html|   162 +-
 .../procedure2/SimpleProcedureScheduler.html| 2 +-
 .../class-use/AbstractProcedureScheduler.html   |30 +
 .../hbase/procedure2/class-use/Procedure.html   |38 +-
 .../procedure2/class-use/ProcedureDeque.html| 5 +-
 .../procedure2/class-use/ProcedureEvent.html|47 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 4 +-
 .../hbase/quotas/MasterSpaceQuotaObserver.html  | 2 +-
 .../hadoop/hbase/quotas/package-tree.html   | 6 +-
 .../HRegion.BatchOperation.Visitor.html | 4 +-
 .../regionserver/HRegion.BatchOperation.html|78 +-
 .../regionserver/HRegion.BulkLoadListener.html  | 8 +-
 .../HRegion.FlushResult.Result.html |10 +-
 .../hbase/regionserver/HRegion.FlushResult.html | 8 +-
 .../HRegion.MutationBatchOperation.html |44 +-
 .../regionserver/HRegion.RegionScannerImpl.html |90 +-
 .../HRegion.ReplayBatchOperation.html   |32 +-
 .../regionserver/HRegion.RowLockContext.html|28 +-
 .../hbase/regionserver/HRegion.RowLockImpl.html |16 +-
 .../hadoop/hbase/regionserver/HRegion.html  |   468 +-
 .../hadoop/hbase/regionserver/package-tree.html |18 +-
 .../regionserver/querymatcher/package-tree.html | 2 +-
 

[34/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
index a1e5ded..e18c93f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ReplicationSourceManager
+public class ReplicationSourceManager
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ReplicationListener
 This class is responsible to manage all the replication
@@ -411,53 +411,57 @@ implements 
+(package private) int
+getSizeOfLatestPath()
+
+
 ReplicationSourceInterface
 getSource(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Get the normal source for a given peer
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListReplicationSourceInterface
 getSources()
 Get a list of all the normal sources of this rs
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getStats()
 Get a string representation of all the sources' 
metrics
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
 getTotalBufferUsed()
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true;
 title="class or interface in java.util">SortedSethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getWALs()
 Get a copy of the wals of the first source on this rs
 
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true;
 title="class or interface in java.util">SortedSethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getWalsByIdRecoveredQueues()
 Get a copy of the wals of the recovered sources on this 
rs
 
 
-
+
 (package private) void
 init()
 Adds a normal source per registered peer cluster and tries 
to process all
  old region server wal queues
 
 
-
+
 void
 join()
 Terminate the replication on this region server
 
 
-
+
 void
 logPositionAndCleanOldLogs(org.apache.hadoop.fs.Pathlog,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[17/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the 

[02/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
index c552d8a..a792ab2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
@@ -48,922 +48,928 @@
 040import 
java.util.concurrent.ThreadPoolExecutor;
 041import java.util.concurrent.TimeUnit;
 042import 
java.util.concurrent.atomic.AtomicLong;
-043
-044import org.apache.commons.logging.Log;
-045import 
org.apache.commons.logging.LogFactory;
-046import 
org.apache.hadoop.conf.Configuration;
-047import org.apache.hadoop.fs.FileSystem;
-048import org.apache.hadoop.fs.Path;
-049import 
org.apache.hadoop.hbase.HConstants;
-050import 
org.apache.hadoop.hbase.MetaTableAccessor;
-051import org.apache.hadoop.hbase.Server;
-052import 
org.apache.hadoop.hbase.TableDescriptors;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.yetus.audience.InterfaceAudience;
-055import 
org.apache.hadoop.hbase.client.Connection;
-056import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-057import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-058import 
org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
-059import 
org.apache.hadoop.hbase.replication.ReplicationEndpoint;
-060import 
org.apache.hadoop.hbase.replication.ReplicationException;
-061import 
org.apache.hadoop.hbase.replication.ReplicationListener;
-062import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
-063import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-064import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-065import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-066import 
org.apache.hadoop.hbase.replication.ReplicationQueues;
-067import 
org.apache.hadoop.hbase.replication.ReplicationTracker;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.util.Pair;
-070import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-071
-072import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-073import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-074
-075/**
-076 * This class is responsible to manage 
all the replication
-077 * sources. There are two classes of 
sources:
-078 * ul
-079 * li Normal sources are 
persistent and one per peer cluster/li
-080 * li Old sources are recovered 
from a failed region server and our
-081 * only goal is to finish replicating the 
WAL queue it had up in ZK/li
-082 * /ul
-083 *
-084 * When a region server dies, this class 
uses a watcher to get notified and it
-085 * tries to grab a lock in order to 
transfer all the queues in a local
-086 * old source.
-087 *
-088 * This class implements the 
ReplicationListener interface so that it can track changes in
-089 * replication state.
-090 */
-091@InterfaceAudience.Private
-092public class ReplicationSourceManager 
implements ReplicationListener {
-093  private static final Log LOG =
-094  
LogFactory.getLog(ReplicationSourceManager.class);
-095  // List of all the sources that read 
this RS's logs
-096  private final 
ListReplicationSourceInterface sources;
-097  // List of all the sources we got from 
died RSs
-098  private final 
ListReplicationSourceInterface oldsources;
-099  private final ReplicationQueues 
replicationQueues;
-100  private final ReplicationTracker 
replicationTracker;
-101  private final ReplicationPeers 
replicationPeers;
-102  // UUID for this cluster
-103  private final UUID clusterId;
-104  // All about stopping
-105  private final Server server;
-106  // All logs we are currently tracking
-107  // Index structure of the map is: 
peer_id-logPrefix/logGroup-logs
-108  private final MapString, 
MapString, SortedSetString walsById;
-109  // Logs for recovered sources we are 
currently tracking
-110  private final MapString, 
MapString, SortedSetString walsByIdRecoveredQueues;
-111  private final Configuration conf;
-112  private final FileSystem fs;
-113  // The paths to the latest log of each 
wal group, for new coming peers
-114  private SetPath latestPaths;
-115  // Path to the wals directories
-116  private final Path logDir;
-117  // Path to the wal archive
-118  private final Path oldLogDir;
-119  private final WALFileLengthProvider 
walFileLengthProvider;
-120  // The number of ms that we wait before 
moving znodes, HBASE-3596

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */

[43/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index efa1c09..e8c7a75 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -1290,7 +1290,7 @@ implements 
 
 UNASSIGNED_PROCEDURE_FOR_TYPE_INFO
-private static finalUnassignProcedure[] UNASSIGNED_PROCEDURE_FOR_TYPE_INFO
+private static finalUnassignProcedure[] UNASSIGNED_PROCEDURE_FOR_TYPE_INFO
 
 
 
@@ -1299,7 +1299,7 @@ implements 
 
 pendingAssignQueue
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListRegionStates.RegionStateNode 
pendingAssignQueue
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListRegionStates.RegionStateNode 
pendingAssignQueue
 
 
 
@@ -1308,7 +1308,7 @@ implements 
 
 assignQueueLock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
 
 
 
@@ -1317,7 +1317,7 @@ implements 
 
 assignQueueFullCond
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">Condition assignQueueFullCond
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">Condition assignQueueFullCond
 
 
 
@@ -1657,7 +1657,7 @@ implements 
 
 setMetaInitialized
-privatevoidsetMetaInitialized(RegionInfometaRegionInfo,
+privatevoidsetMetaInitialized(RegionInfometaRegionInfo,
 booleanisInitialized)
 
 
@@ -1667,7 +1667,7 @@ implements 
 
 getMetaInitializedEvent
-privateProcedureEventgetMetaInitializedEvent(RegionInfometaRegionInfo)
+privateProcedureEventgetMetaInitializedEvent(RegionInfometaRegionInfo)
 
 
 
@@ -1676,7 +1676,7 @@ implements 
 
 waitMetaLoaded
-publicbooleanwaitMetaLoaded(Procedureproc)
+publicbooleanwaitMetaLoaded(Procedureproc)
 
 
 
@@ -1685,7 +1685,7 @@ implements 
 
 wakeMetaLoadedEvent
-protectedvoidwakeMetaLoadedEvent()
+protectedvoidwakeMetaLoadedEvent()
 
 
 
@@ -1694,7 +1694,7 @@ implements 
 
 isMetaLoaded
-publicbooleanisMetaLoaded()
+publicbooleanisMetaLoaded()
 
 
 
@@ -1703,7 +1703,7 @@ implements 
 
 assignMeta
-publicvoidassignMeta(RegionInfometaRegionInfo)
+publicvoidassignMeta(RegionInfometaRegionInfo)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1717,7 +1717,7 @@ implements 
 
 assignMeta
-publicvoidassignMeta(RegionInfometaRegionInfo,
+publicvoidassignMeta(RegionInfometaRegionInfo,
ServerNameserverName)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -1732,7 +1732,7 @@ implements 
 
 checkIfShouldMoveSystemRegionAsync
-publicvoidcheckIfShouldMoveSystemRegionAsync()
+publicvoidcheckIfShouldMoveSystemRegionAsync()
 Start a new thread to check if there are region servers 
whose versions are higher than others.
  If so, move all system table regions to RS with the highest version to keep 
compatibility.
  The reason is, RS in new version may not be able to access RS in old version 
when there are
@@ -1745,7 +1745,7 @@ implements 
 
 getCarryingSystemTables
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfogetCarryingSystemTables(ServerNameserverName)
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfogetCarryingSystemTables(ServerNameserverName)
 
 
 
@@ -1754,7 +1754,7 @@ implements 
 
 assign
-publicvoidassign(RegionInforegionInfo)
+publicvoidassign(RegionInforegionInfo)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1768,7 +1768,7 @@ implements 
 
 assign
-publicvoidassign(RegionInforegionInfo,

[38/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 78fa83c..c5d91ff 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private abstract static class HRegion.BatchOperationT
+private abstract static class HRegion.BatchOperationT
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Class that tracks the progress of a batch operations, 
accumulating status codes and tracking
  the index at which processing is proceeding. These batch operations may get 
split into
@@ -411,7 +411,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 operations
-protected finalT[] operations
+protected finalT[] operations
 
 
 
@@ -420,7 +420,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 retCodeDetails
-protected finalOperationStatus[] retCodeDetails
+protected finalOperationStatus[] retCodeDetails
 
 
 
@@ -429,7 +429,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 walEditsFromCoprocessors
-protected finalWALEdit[] walEditsFromCoprocessors
+protected finalWALEdit[] walEditsFromCoprocessors
 
 
 
@@ -438,7 +438,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 familyCellMaps
-protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell[] familyCellMaps
+protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell[] familyCellMaps
 
 
 
@@ -447,7 +447,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 region
-protected finalHRegion region
+protected finalHRegion region
 
 
 
@@ -456,7 +456,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 nextIndexToProcess
-protectedint nextIndexToProcess
+protectedint nextIndexToProcess
 
 
 
@@ -465,7 +465,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 observedExceptions
-protected finalHRegion.ObservedExceptionsInBatch observedExceptions
+protected finalHRegion.ObservedExceptionsInBatch observedExceptions
 
 
 
@@ -474,7 +474,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 durability
-protectedDurability durability
+protectedDurability durability
 
 
 
@@ -483,7 +483,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 atomic
-protectedboolean atomic
+protectedboolean atomic
 
 
 
@@ -502,7 +502,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 BatchOperation
-publicBatchOperation(HRegionregion,
+publicBatchOperation(HRegionregion,
   T[]operations)
 
 
@@ -520,7 +520,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 visitBatchOperations
-publicvoidvisitBatchOperations(booleanpendingOnly,
+publicvoidvisitBatchOperations(booleanpendingOnly,
  intlastIndexExclusive,
  HRegion.BatchOperation.Visitorvisitor)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -537,7 +537,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getMutation
-public abstractMutationgetMutation(intindex)
+public abstractMutationgetMutation(intindex)
 
 
 
@@ -546,7 +546,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getNonceGroup
-public abstractlonggetNonceGroup(intindex)
+public abstractlonggetNonceGroup(intindex)
 
 
 
@@ -555,7 +555,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getNonce
-public abstractlonggetNonce(intindex)
+public abstractlonggetNonce(intindex)
 
 
 
@@ -564,7 +564,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getMutationsForCoprocs
-public abstractMutation[]getMutationsForCoprocs()
+public abstractMutation[]getMutationsForCoprocs()
 This method is potentially expensive and useful mostly for 
non-replay 

[25/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
index 51844c3..bbd7a35 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -55,2130 +55,2170 @@
 047import 
org.apache.hadoop.hbase.client.TableState;
 048import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
 049import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-050import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
-051import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-052import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-053import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-054import 
org.apache.hadoop.hbase.ipc.QosPriority;
-055import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-056import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-057import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-058import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-059import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-060import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
-061import 
org.apache.hadoop.hbase.mob.MobUtils;
-062import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
-063import 
org.apache.hadoop.hbase.procedure2.LockType;
-064import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-065import 
org.apache.hadoop.hbase.procedure2.Procedure;
-066import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-067import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-068import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-069import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-070import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-071import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-072import 
org.apache.hadoop.hbase.replication.ReplicationException;
-073import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-074import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-075import 
org.apache.hadoop.hbase.security.User;
-076import 
org.apache.hadoop.hbase.security.access.AccessController;
-077import 
org.apache.hadoop.hbase.security.visibility.VisibilityController;
-078import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-079import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-080import 
org.apache.hadoop.hbase.util.Bytes;
-081import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-082import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-083import 
org.apache.hadoop.hbase.util.Pair;
-084import 
org.apache.yetus.audience.InterfaceAudience;
-085import 
org.apache.zookeeper.KeeperException;
-086import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-087import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-088import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;

[26/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index 51844c3..bbd7a35 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -55,2130 +55,2170 @@
 047import 
org.apache.hadoop.hbase.client.TableState;
 048import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
 049import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-050import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
-051import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-052import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-053import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-054import 
org.apache.hadoop.hbase.ipc.QosPriority;
-055import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-056import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-057import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-058import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-059import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-060import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
-061import 
org.apache.hadoop.hbase.mob.MobUtils;
-062import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
-063import 
org.apache.hadoop.hbase.procedure2.LockType;
-064import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-065import 
org.apache.hadoop.hbase.procedure2.Procedure;
-066import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-067import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-068import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-069import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-070import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-071import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-072import 
org.apache.hadoop.hbase.replication.ReplicationException;
-073import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-074import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-075import 
org.apache.hadoop.hbase.security.User;
-076import 
org.apache.hadoop.hbase.security.access.AccessController;
-077import 
org.apache.hadoop.hbase.security.visibility.VisibilityController;
-078import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-079import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-080import 
org.apache.hadoop.hbase.util.Bytes;
-081import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-082import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-083import 
org.apache.hadoop.hbase.util.Pair;
-084import 
org.apache.yetus.audience.InterfaceAudience;
-085import 
org.apache.zookeeper.KeeperException;
-086import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-087import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-088import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;

[30/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
index c5ba779..3ca0cce 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
@@ -551,7 +551,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTab
 leAction, postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPe
 er, postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures, postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyTable,
 postMove
 , postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetTableQuota, postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postTableFlush,
 postTruncateTable,
 postUnassign,
 
 postUpdateReplicationPeerConfig, preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup,
 preAssign,
 preBalance, preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers,
 preCloneSnapshot,
 preCreateNamespace,
 preCreateTable,
 preCreateTableAction,
 preDecommissionRegionServers,
 preDeleteNamespace, preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterStatus,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListSnapshot, preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyTableAction,
 preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline, preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRequestLock,
 preRestoreSnapshot, href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetNamespaceQuota-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-org.apache.hadoop.hbase.quotas.GlobalQuotaSettings-">preSetNamespaceQuota,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetSplitOrMergeEnabled-org.apache.hadoop.hbase.coprocessor.ObserverContext-boolean-org.apache.hadoop.hbase.client.MasterSwitchType-">preSetSplitOrMergeEnabled,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetTableQuota-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.TableName-org.apache.hadoop.hbase.quotas.GlobalQuotaSettings-">preSetTableQuota,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetUserQuota-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-org.apache.hadoop.hbase.quotas.GlobalQuotaSettings-">preSetUser
 Quota, preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 p
 reStopMaster, preTableFlush,
 preTruncateTable,
 preTruncateTableAction,
 preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction, 

[39/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.html
index b80877b..70e564b 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/MasterSpaceQuotaObserver.html
@@ -256,7 +256,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 post
 Balance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteSnapshot,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot, postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postStartMaster,
 postTableFlush,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure, preAddReplicationPeer,
 preAddRSGroup,
 preAssign,
 preBalance,
 preBalanceRSGroup, preBalanceSwitch,
 preClearDeadServers,
 preCloneSnapshot,
 preCreateNamespace,
 preCreateTable,
 preCreateTableAction,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot, preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTable,
 preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterStatus,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyTable,
 preModifyTableAction,
 preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota,
 preSetSplitOrMergeEnabled,
 preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preStopMaster,
 preTableFlush,
 preTruncateTable,
 preTruncateTableAction,
 preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 post
 Balance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteSnapshot,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetTableDescri
 ptors, postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot, postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup
 , postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAct
 ion, postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 

[40/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
index ae180ba..ae2d87c 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public interface ProcedureScheduler
+public interface ProcedureScheduler
 Keep track of the runnable procedures
 
 
@@ -134,103 +134,82 @@ public interface 
 void
+addFront(http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorProcedureprocedureIterator)
+Inserts all elements in the iterator at the front of this 
queue.
+
+
+
+void
 addFront(Procedureproc)
 Inserts the specified element at the front of this 
queue.
 
 
-
+
 void
 clear()
 Clear current state of scheduler such that it is equivalent 
to newly created scheduler.
 
 
-
+
 void
 completionCleanup(Procedureproc)
 The procedure in execution completed.
 
 
-
+
 LockedResource
 getLockResource(LockedResourceTyperesourceType,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringresourceName)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListLockedResource
 getLocks()
 List lock queues.
 
 
-
+
 boolean
 hasRunnables()
 
-
+
 Procedure
 poll()
 Fetch one Procedure from the queue
 
 
-
+
 Procedure
 poll(longtimeout,
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Fetch one Procedure from the queue
 
 
-
+
 void
 signalAll()
 In case the class is blocking on poll() waiting for items 
to be added,
  this method should awake poll() and poll() should return.
 
 
-
+
 int
 size()
 Returns the number of elements in this queue.
 
 
-
+
 void
 start()
 Start the scheduler
 
 
-
+
 void
 stop()
 Stop the scheduler
 
 
-
-void
-suspendEvent(ProcedureEventevent)
-Mark the event as not ready.
-
-
 
-boolean
-waitEvent(ProcedureEventevent,
- Procedureprocedure)
-Suspend the procedure if the event is not ready yet.
-
-
-
-void
-wakeEvent(ProcedureEventevent)
-Wake every procedure waiting for the specified event
- (By design each event has only one "wake" caller)
-
-
-
-void
-wakeEvents(intcount,
-  ProcedureEvent...events)
-Wake every procedure waiting for the specified events.
-
-
-
 void
 yield(Procedureproc)
 The procedure can't run at the moment.
@@ -257,7 +236,7 @@ public interface 
 
 start
-voidstart()
+voidstart()
 Start the scheduler
 
 
@@ -267,7 +246,7 @@ public interface 
 
 stop
-voidstop()
+voidstop()
 Stop the scheduler
 
 
@@ -277,7 +256,7 @@ public interface 
 
 signalAll
-voidsignalAll()
+voidsignalAll()
 In case the class is blocking on poll() waiting for items 
to be added,
  this method should awake poll() and poll() should return.
 
@@ -288,7 +267,7 @@ public interface 
 
 addFront
-voidaddFront(Procedureproc)
+voidaddFront(Procedureproc)
 Inserts the specified element at the front of this 
queue.
 
 Parameters:
@@ -296,13 +275,23 @@ public interface 
+
+
+
+
+addFront
+voidaddFront(http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorProcedureprocedureIterator)
+Inserts all elements in the iterator at the front of this 
queue.
+
+
 
 
 
 
 
 addBack
-voidaddBack(Procedureproc)
+voidaddBack(Procedureproc)
 Inserts the specified element at the end of this 
queue.
 
 Parameters:
@@ -316,7 +305,7 @@ public interface 
 
 yield
-voidyield(Procedureproc)
+voidyield(Procedureproc)
 The procedure can't run at the moment.
  add it back to the queue, giving priority to someone else.
 
@@ -331,7 +320,7 @@ public interface 
 
 completionCleanup
-voidcompletionCleanup(Procedureproc)
+voidcompletionCleanup(Procedureproc)
 The procedure in execution completed.
  This can be implemented to perform cleanups.
 
@@ -346,7 +335,7 @@ public interface 
 
 hasRunnables
-booleanhasRunnables()

[04/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */
-1392  public static final String 

[50/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 0fc0bb8..8343584 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-3436
+3437
 0
 0
-20809
+20812
 
 Files
 
@@ -5994,6891 +5994,6896 @@
 0
 1
 
+org/apache/hadoop/hbase/master/TestMasterCoprocessorServices.java
+0
+0
+1
+
 org/apache/hadoop/hbase/master/TestMasterFailover.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/master/TestMasterFileSystem.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/TestMasterFileSystemWithWALDir.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestMasterMetrics.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/TestMasterNoCluster.java
 0
 0
 20
-
+
 org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/master/TestMasterQosFunction.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestMasterShutdown.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestMasterStatusServlet.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/master/TestMasterTransitions.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/TestRegionPlacement.java
 0
 0
 21
-
+
 org/apache/hadoop/hbase/master/TestRegionPlacement2.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/TestRegionState.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/TestRestartCluster.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/TestRollingRestart.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestSplitLogManager.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/master/TestTableStateManager.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestWarmupRegion.java
 0
 0
 19
-
+
 org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 0
 0
 31
-
+
 org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
 0
 0
 31
-
+
 org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 0
 0
 74
-
+
 org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
 0
 0
-8
-
+7
+
 org/apache/hadoop/hbase/master/assignment/RegionStates.java
 0
 0
 25
-
+
 org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 0
 0
 74
-
+
 org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
 0
 0
 15
-
+
 org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/assignment/TestRegionStates.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/master/assignment/Util.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/BalancerChore.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
 0
 0
 75
-
+
 org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 0
 0
 61
-
+
 org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
 0
 0
 33
-
+
 org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 0
 0
 33
-
+
 

[03/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 29ea7b3..6ed75c9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */
-1392  public static final String 
MEMSTORE_PERIODIC_FLUSH_INTERVAL =
-1393  

[16/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */
-1392  public 

[24/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
index 40cd159..2da0903 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
@@ -260,7 +260,7 @@
 252
 253// Update meta events (for testing)
 254if (hasProcExecutor) {
-255  
getProcedureScheduler().suspendEvent(metaLoadEvent);
+255  metaLoadEvent.suspend();
 256  setFailoverCleanupDone(false);
 257  for (RegionInfo hri: 
getMetaRegionSet()) {
 258setMetaInitialized(hri, false);
@@ -421,1455 +421,1454 @@
 413  }
 414
 415  public boolean 
waitMetaInitialized(final Procedure proc, final RegionInfo regionInfo) {
-416return 
getProcedureScheduler().waitEvent(
-417  
getMetaInitializedEvent(getMetaForRegion(regionInfo)), proc);
-418  }
-419
-420  private void setMetaInitialized(final 
RegionInfo metaRegionInfo, final boolean isInitialized) {
-421assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-422final ProcedureEvent metaInitEvent = 
getMetaInitializedEvent(metaRegionInfo);
-423if (isInitialized) {
-424  
getProcedureScheduler().wakeEvent(metaInitEvent);
-425} else {
-426  
getProcedureScheduler().suspendEvent(metaInitEvent);
-427}
-428  }
-429
-430  private ProcedureEvent 
getMetaInitializedEvent(final RegionInfo metaRegionInfo) {
-431assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-432// TODO: handle multiple meta.
-433return metaInitializedEvent;
-434  }
-435
-436  public boolean waitMetaLoaded(final 
Procedure proc) {
-437return 
getProcedureScheduler().waitEvent(metaLoadEvent, proc);
-438  }
-439
-440  protected void wakeMetaLoadedEvent() 
{
-441
getProcedureScheduler().wakeEvent(metaLoadEvent);
-442assert isMetaLoaded() : "expected 
meta to be loaded";
-443  }
-444
-445  public boolean isMetaLoaded() {
-446return metaLoadEvent.isReady();
-447  }
-448
-449  // 

-450  //  TODO: Sync helpers
-451  // 

-452  public void assignMeta(final RegionInfo 
metaRegionInfo) throws IOException {
-453assignMeta(metaRegionInfo, null);
-454  }
-455
-456  public void assignMeta(final RegionInfo 
metaRegionInfo, final ServerName serverName)
-457  throws IOException {
-458assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-459AssignProcedure proc;
-460if (serverName != null) {
-461  LOG.debug("Try assigning Meta " + 
metaRegionInfo + " to " + serverName);
-462  proc = 
createAssignProcedure(metaRegionInfo, serverName);
-463} else {
-464  LOG.debug("Assigning " + 
metaRegionInfo.getRegionNameAsString());
-465  proc = 
createAssignProcedure(metaRegionInfo, false);
-466}
-467
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
-468  }
-469
-470  /**
-471   * Start a new thread to check if there 
are region servers whose versions are higher than others.
-472   * If so, move all system table regions 
to RS with the highest version to keep compatibility.
-473   * The reason is, RS in new version may 
not be able to access RS in old version when there are
-474   * some incompatible changes.
-475   */
-476  public void 
checkIfShouldMoveSystemRegionAsync() {
-477new Thread(() - {
-478  try {
-479synchronized 
(checkIfShouldMoveSystemRegionLock) {
-480  ListRegionPlan plans = 
new ArrayList();
-481  for (ServerName server : 
getExcludedServersForSystemTable()) {
-482if 
(master.getServerManager().isServerDead(server)) {
-483  // TODO: See HBASE-18494 
and HBASE-18495. Though getExcludedServersForSystemTable()
-484  // considers only online 
servers, the server could be queued for dead server
-485  // processing. As region 
assignments for crashed server is handled by
-486  // ServerCrashProcedure, do 
NOT handle them here. The goal is to handle this through
-487  // regular flow of 
LoadBalancer as a favored node and not to have this special
-488  // handling.
-489  continue;
-490}
-491ListRegionInfo 
regionsShouldMove = getCarryingSystemTables(server);
-492 

[45/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
index 9e691e7..c8c5c39 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MasterRpcServices
+public class MasterRpcServices
 extends RSRpcServices
 implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService.BlockingInterface
 Implements the master RPC services.
@@ -238,111 +238,119 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master

org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequestrequest)
 
 
+(package private) boolean
+checkCoprocessorWithService(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListMasterCoprocessorcoprocessorsToCheck,
+   http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class?service)
+Determines if there is a coprocessor implementation in the 
provided argument which extends
+ or implements the provided service.
+
+
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse
 clearDeadServers(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequestrequest)
 
-
+
 private 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse
 compactMob(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequestrequest,
   TableNametableName)
 Compacts the mob files in the current table.
 
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse
 compactRegion(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequestrequest)
 Compact a region on the master.
 
 
-
+
 private MasterSwitchType
 convert(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterSwitchTypeswitchType)
 
-
+
 private 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builder
 createConfigurationSubset()
 
-
+
 

[31/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
 
b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
index 92ebab8..30936b3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
@@ -255,7 +255,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTab
 leAction, postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPe
 er, postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures, postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyTable,
 postMove
 , postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled, postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postStartMaster,
 postTableFlush,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup,
 preAssign,
 preBalance,
 preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers, preCloneSnapshot,
 preCreateNamespace,
 preCreateTableAction,
 preDec
 ommissionRegionServers, preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTable,
 preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterStatus,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions, preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyTableAction, preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota,
 preSetSplitOrMergeEnabled,
 preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preStopMaster,
 preTableFlush,
 preTruncateTable,
 
 preTruncateTableAction, preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction, postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer, postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures, postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyTable,
 postMove, postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRequestLock,
 

[13/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */
-1392  public static final String 

[49/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index df03577..63c077c 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2017 The Apache Software Foundation
 
-  File: 3436,
- Errors: 20809,
+  File: 3437,
+ Errors: 20812,
  Warnings: 0,
  Infos: 0
   
@@ -3751,7 +3751,7 @@ under the License.
   0
 
 
-  4
+  3
 
   
   
@@ -5193,7 +5193,7 @@ under the License.
   0
 
 
-  11
+  10
 
   
   
@@ -15035,7 +15035,7 @@ under the License.
   0
 
 
-  1
+  9
 
   
   
@@ -19123,7 +19123,7 @@ under the License.
   0
 
 
-  2
+  1
 
   
   
@@ -20915,7 +20915,7 @@ under the License.
   0
 
 
-  8
+  7
 
   
   
@@ -33263,7 +33263,7 @@ under the License.
   0
 
 
-  2
+  1
 
   
   
@@ -40487,7 +40487,7 @@ under the License.
   0
 
 
-  17
+  16
 
   
   
@@ -44888,6 +44888,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.master.TestMasterCoprocessorServices.java;>org/apache/hadoop/hbase/master/TestMasterCoprocessorServices.java
+
+
+  0
+
+
+  0
+
+
+  1
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.filter.TestFirstKeyValueMatchingQualifiersFilter.java;>org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/coc.html
--
diff --git a/coc.html b/coc.html
index bdc5412..10ae93f 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-11-25
+  Last Published: 
2017-11-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 91840a4..9c1b8f7 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-11-25
+  Last Published: 
2017-11-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index ac5ec4f..0af8122 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -445,7 +445,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-11-25
+  Last Published: 
2017-11-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/dependency-convergence.html

[33/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html 
b/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
index 82999b6..8b753a0 100644
--- a/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
+++ b/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
@@ -306,7 +306,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 post
 Balance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplic
 ationPeers, postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postStartMaster,
 postTableFlush,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig, preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup,
 preAssign,
 preBalance, preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers,
 preCreateTableAction,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTable,
 preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterStatus,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions, preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyTable,
 preModifyTableAction, preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRequestLock, 
preRestoreSnapshot,
 preSetNamespaceQuota,
 preSetSplitOrMergeEnabled,
 preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota,
 preSetUserQuota,
 preShutdown, preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preStopMaster,
 preTableFlush,
 preTruncateTable,
 preTruncateTableAction,
 preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 post
 Balance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers, postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer
 , postRemoveRSGroup,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction
 , postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 

[35/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 620d01b..595a76c 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -699,19 +699,19 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
-org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
-org.apache.hadoop.hbase.regionserver.ScanType
 org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
+org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
 org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
+org.apache.hadoop.hbase.regionserver.ScanType
 org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
+org.apache.hadoop.hbase.regionserver.BloomType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 9d29552..313f199 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -130,9 +130,9 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
 org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
+org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
index c3ab9b6..f5e592d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class ReplicationSourceManager.AdoptAbandonedQueuesWorker
+class ReplicationSourceManager.AdoptAbandonedQueuesWorker
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
 
 
@@ -228,7 +228,7 @@ extends 

[08/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */

[37/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 803bf5d..7359555 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class HRegion.RegionScannerImpl
+class HRegion.RegionScannerImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionScanner, Shipper, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -425,7 +425,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -434,7 +434,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -445,7 +445,7 @@ implements 
 
 joinedContinuationRow
-protectedCell joinedContinuationRow
+protectedCell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -456,7 +456,7 @@ implements 
 
 filterClosed
-privateboolean filterClosed
+privateboolean filterClosed
 
 
 
@@ -465,7 +465,7 @@ implements 
 
 stopRow
-protected finalbyte[] stopRow
+protected finalbyte[] stopRow
 
 
 
@@ -474,7 +474,7 @@ implements 
 
 includeStopRow
-protected finalboolean includeStopRow
+protected finalboolean includeStopRow
 
 
 
@@ -483,7 +483,7 @@ implements 
 
 region
-protected finalHRegion region
+protected finalHRegion region
 
 
 
@@ -492,7 +492,7 @@ implements 
 
 comparator
-protected finalCellComparator comparator
+protected finalCellComparator comparator
 
 
 
@@ -501,7 +501,7 @@ implements 
 
 readPt
-private finallong readPt
+private finallong readPt
 
 
 
@@ -510,7 +510,7 @@ implements 
 
 maxResultSize
-private finallong maxResultSize
+private finallong maxResultSize
 
 
 
@@ -519,7 +519,7 @@ implements 
 
 defaultScannerContext
-private finalScannerContext defaultScannerContext
+private finalScannerContext defaultScannerContext
 
 
 
@@ -528,7 +528,7 @@ implements 
 
 filter
-private finalFilterWrapper filter
+private finalFilterWrapper filter
 
 
 
@@ -545,7 +545,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -561,7 +561,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion,
   longnonceGroup,
@@ -587,7 +587,7 @@ implements 
 
 getRegionInfo
-publicRegionInfogetRegionInfo()
+publicRegionInfogetRegionInfo()
 
 Specified by:
 getRegionInfoin
 interfaceRegionScanner
@@ -602,7 +602,7 @@ implements 
 
 initializeScanners
-protectedvoidinitializeScanners(Scanscan,
+protectedvoidinitializeScanners(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -617,7 +617,7 @@ implements 
 
 initializeKVHeap
-protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
+protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerjoinedScanners,
 HRegionregion)
  throws 

[47/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
index 0e69d82..38ef882 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
@@ -287,7 +287,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTab
 leAction, postCreateNamespace,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers, postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer, postRemoveRSGroup,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction, 
postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota, postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postStartMaster,
 postTableFlush,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup,
 preAssign, preBalance,
 preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers,
 preCloneSnapshot, preCreateNamespace,
 preCreateTableAction,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTableAction, preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterStatus,
 preGetLocks, preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetTableDescriptors,
 preGetTableNames, 
preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction, preModifyNamespace,
 preModifyTable,
 preModifyTableAction,
 preMove, 
preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota,
 preSetSplitOrMergeEnabled,
 preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preStopMaster-org.apache.hadoop.hbase.coprocessor.ObserverContext-">preStopMaster,
 preTableFlush,
 preTruncateTable,
 preTruncateTableAction,
 preUnassign, preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction, postCreateNamespace,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerCon
 fig, postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitA
 ction, postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,

[41/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
index ce260ce..435c524 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":6,"i3":6,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":6,"i12":6,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":6,"i4":6,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":6,"i13":6,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class AbstractProcedureScheduler
+public abstract class AbstractProcedureScheduler
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ProcedureScheduler
 
@@ -203,145 +203,130 @@ implements 
 void
+addFront(http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorProcedureprocedureIterator)
+Inserts all elements in the iterator at the front of this 
queue.
+
+
+
+void
 addFront(Procedureprocedure)
 Inserts the specified element at the front of this 
queue.
 
 
-
+
 protected abstract Procedure
 dequeue()
 Fetch one Procedure from the queue
  NOTE: this method is called with the sched lock held.
 
 
-
+
 protected abstract void
 enqueue(Procedureprocedure,
booleanaddFront)
 Add the procedure to the queue.
 
 
-
+
 long
 getNullPollCalls()
 
-
+
 long
 getPollCalls()
 
-
+
 boolean
 hasRunnables()
 
-
+
 Procedure
 poll()
 Fetch one Procedure from the queue
 
 
-
+
 Procedure
 poll(longnanos)
 
-
+
 Procedure
 poll(longtimeout,
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Fetch one Procedure from the queue
 
 
-
+
 protected void
 push(Procedureprocedure,
 booleanaddFront,
 booleannotify)
 
-
+
 protected abstract boolean
 queueHasRunnables()
 Returns true if there are procedures available to 
process.
 
 
-
+
 protected abstract int
 queueSize()
 Returns the number of elements in this queue.
 
 
-
+
 protected void
 schedLock()
 
-
+
 protected void
 schedUnlock()
 
-
+
 void
 signalAll()
 In case the class is blocking on poll() waiting for items 
to be added,
  this method should awake poll() and poll() should return.
 
 
-
+
 int
 size()
 Returns the number of elements in this queue.
 
 
-
+
 void
 start()
 Start the scheduler
 
 
-
+
 void
 stop()
 Stop the scheduler
 
 
-
-void
-suspendEvent(ProcedureEventevent)
-Mark the event as not ready.
-
-
 
-boolean
-waitEvent(ProcedureEventevent,
- Procedureprocedure)
-Suspend the procedure if the event is not ready yet.
-
-
-
 protected void
 waitProcedure(ProcedureDequewaitQueue,
  Procedureproc)
 
-
-void
-wakeEvent(ProcedureEventevent)
-Wake every procedure waiting for the specified event
- (By design each event has only one "wake" caller)
-
-
-
-void
-wakeEvents(intcount,
-  ProcedureEvent...events)
-Wake every procedure waiting for the specified events.
+
+(package private) void
+wakeEvents(ProcedureEvent[]events)
+Wake up all of the given events.
 
 
-
+
 protected void
 wakePollIfNeeded(intwaitingCount)
 
-
+
 protected void
 wakeProcedure(Procedureprocedure)
 
-
+
 protected int
 wakeWaitingProcedures(ProcedureDequewaitQueue)
 Wakes up given waiting procedures by pushing them back into 
scheduler queues.
@@ -382,7 +367,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -391,7 +376,7 @@ implements 
 
 schedulerLock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReentrantLock schedulerLock
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReentrantLock schedulerLock
 
 
 
@@ -400,7 +385,7 @@ 

[36/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index 21ee3f5..96ed6af 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -2991,7 +2991,7 @@ implements 
 
 closeLock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object closeLock
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object closeLock
 
 
 
@@ -3000,7 +3000,7 @@ implements 
 
 MEMSTORE_PERIODIC_FLUSH_INTERVAL
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MEMSTORE_PERIODIC_FLUSH_INTERVAL
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MEMSTORE_PERIODIC_FLUSH_INTERVAL
 Conf key for the periodic flush interval
 
 See Also:
@@ -3014,7 +3014,7 @@ implements 
 
 DEFAULT_CACHE_FLUSH_INTERVAL
-public static finalint DEFAULT_CACHE_FLUSH_INTERVAL
+public static finalint DEFAULT_CACHE_FLUSH_INTERVAL
 Default interval for the memstore flush
 
 See Also:
@@ -3028,7 +3028,7 @@ implements 
 
 SYSTEM_CACHE_FLUSH_INTERVAL
-public static finalint SYSTEM_CACHE_FLUSH_INTERVAL
+public static finalint SYSTEM_CACHE_FLUSH_INTERVAL
 Default interval for System tables memstore flush
 
 See Also:
@@ -3042,7 +3042,7 @@ implements 
 
 MEMSTORE_FLUSH_PER_CHANGES
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MEMSTORE_FLUSH_PER_CHANGES
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MEMSTORE_FLUSH_PER_CHANGES
 Conf key to force a flush if there are already enough 
changes for one region in memstore
 
 See Also:
@@ -3056,7 +3056,7 @@ implements 
 
 DEFAULT_FLUSH_PER_CHANGES
-public static finallong DEFAULT_FLUSH_PER_CHANGES
+public static finallong DEFAULT_FLUSH_PER_CHANGES
 
 See Also:
 Constant
 Field Values
@@ -3069,7 +3069,7 @@ implements 
 
 MAX_FLUSH_PER_CHANGES
-public static finallong MAX_FLUSH_PER_CHANGES
+public static finallong MAX_FLUSH_PER_CHANGES
 The following MAX_FLUSH_PER_CHANGES is large enough because 
each KeyValue has 20+ bytes
  overhead. Therefore, even 1G empty KVs occupy at least 20GB memstore size for 
a single region
 
@@ -3084,7 +3084,7 @@ implements 
 
 FOR_UNIT_TESTS_ONLY
-private static finalbyte[] FOR_UNIT_TESTS_ONLY
+private static finalbyte[] FOR_UNIT_TESTS_ONLY
 Row needed by below method.
 
 
@@ -3094,7 +3094,7 @@ implements 
 
 FIXED_OVERHEAD
-public static finallong FIXED_OVERHEAD
+public static finallong FIXED_OVERHEAD
 
 
 
@@ -3103,7 +3103,7 @@ implements 
 
 DEEP_OVERHEAD
-public static finallong DEEP_OVERHEAD
+public static finallong DEEP_OVERHEAD
 
 
 
@@ -3112,7 +3112,7 @@ implements 
 
 MOCKED_LIST
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell MOCKED_LIST
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell MOCKED_LIST
 A mocked list implementation - discards all updates.
 
 
@@ -3745,7 +3745,7 @@ publiclong
 
 isMergeable
-publicbooleanisMergeable()
+publicbooleanisMergeable()
 
 Specified by:
 isMergeablein
 interfaceRegion
@@ -3760,7 +3760,7 @@ publiclong
 
 areWritesEnabled
-publicbooleanareWritesEnabled()
+publicbooleanareWritesEnabled()
 
 
 
@@ -3769,7 +3769,7 @@ publiclong
 
 getMVCC
-publicMultiVersionConcurrencyControlgetMVCC()
+publicMultiVersionConcurrencyControlgetMVCC()
 
 
 
@@ -3778,7 +3778,7 @@ publiclong
 
 getMaxFlushedSeqId
-publiclonggetMaxFlushedSeqId()
+publiclonggetMaxFlushedSeqId()
 
 Specified by:
 getMaxFlushedSeqIdin
 interfaceRegion
@@ -3794,7 +3794,7 @@ publiclong
 
 getReadPoint
-publiclonggetReadPoint(IsolationLevelisolationLevel)
+publiclonggetReadPoint(IsolationLevelisolationLevel)
 
 Returns:
 readpoint considering given IsolationLevel. Pass null for 
default
@@ -3807,7 +3807,7 @@ publiclong
 
 isLoadingCfsOnDemandDefault
-publicbooleanisLoadingCfsOnDemandDefault()
+publicbooleanisLoadingCfsOnDemandDefault()
 
 
 
@@ -3816,7 +3816,7 @@ publiclong
 
 close
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 

[05/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */
-1392  public static final String 

[46/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
index b5ae4ad..327c2f7 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
@@ -277,7 +277,8 @@ extends 
 void
-postClearDeadServers()
+postClearDeadServers(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNamenotClearedServers)
 
 
 void
@@ -2999,13 +3000,14 @@ extends 
+
 
 
 
 
 postClearDeadServers
-publicvoidpostClearDeadServers()
+publicvoidpostClearDeadServers(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers,
+ http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNamenotClearedServers)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -3019,7 +3021,7 @@ extends 
 
 preDecommissionRegionServers
-publicvoidpreDecommissionRegionServers(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers,
+publicvoidpreDecommissionRegionServers(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers,
  booleanoffload)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -3034,7 +3036,7 @@ extends 
 
 postDecommissionRegionServers
-publicvoidpostDecommissionRegionServers(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers,
+publicvoidpostDecommissionRegionServers(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers,
   booleanoffload)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -3049,7 +3051,7 @@ extends 
 
 preListDecommissionedRegionServers
-publicvoidpreListDecommissionedRegionServers()
+publicvoidpreListDecommissionedRegionServers()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -3063,7 +3065,7 @@ extends 
 
 postListDecommissionedRegionServers
-publicvoidpostListDecommissionedRegionServers()
+publicvoidpostListDecommissionedRegionServers()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -3077,7 +3079,7 @@ extends 
 
 preRecommissionRegionServer
-publicvoidpreRecommissionRegionServer(ServerNameserver,
+publicvoidpreRecommissionRegionServer(ServerNameserver,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listbyte[]encodedRegionNames)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -3092,7 +3094,7 @@ extends 
 
 postRecommissionRegionServer
-publicvoidpostRecommissionRegionServer(ServerNameserver,
+publicvoidpostRecommissionRegionServer(ServerNameserver,
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listbyte[]encodedRegionNames)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 

[15/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */
-1392  

[22/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index 40cd159..2da0903 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -260,7 +260,7 @@
 252
 253// Update meta events (for testing)
 254if (hasProcExecutor) {
-255  
getProcedureScheduler().suspendEvent(metaLoadEvent);
+255  metaLoadEvent.suspend();
 256  setFailoverCleanupDone(false);
 257  for (RegionInfo hri: 
getMetaRegionSet()) {
 258setMetaInitialized(hri, false);
@@ -421,1455 +421,1454 @@
 413  }
 414
 415  public boolean 
waitMetaInitialized(final Procedure proc, final RegionInfo regionInfo) {
-416return 
getProcedureScheduler().waitEvent(
-417  
getMetaInitializedEvent(getMetaForRegion(regionInfo)), proc);
-418  }
-419
-420  private void setMetaInitialized(final 
RegionInfo metaRegionInfo, final boolean isInitialized) {
-421assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-422final ProcedureEvent metaInitEvent = 
getMetaInitializedEvent(metaRegionInfo);
-423if (isInitialized) {
-424  
getProcedureScheduler().wakeEvent(metaInitEvent);
-425} else {
-426  
getProcedureScheduler().suspendEvent(metaInitEvent);
-427}
-428  }
-429
-430  private ProcedureEvent 
getMetaInitializedEvent(final RegionInfo metaRegionInfo) {
-431assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-432// TODO: handle multiple meta.
-433return metaInitializedEvent;
-434  }
-435
-436  public boolean waitMetaLoaded(final 
Procedure proc) {
-437return 
getProcedureScheduler().waitEvent(metaLoadEvent, proc);
-438  }
-439
-440  protected void wakeMetaLoadedEvent() 
{
-441
getProcedureScheduler().wakeEvent(metaLoadEvent);
-442assert isMetaLoaded() : "expected 
meta to be loaded";
-443  }
-444
-445  public boolean isMetaLoaded() {
-446return metaLoadEvent.isReady();
-447  }
-448
-449  // 

-450  //  TODO: Sync helpers
-451  // 

-452  public void assignMeta(final RegionInfo 
metaRegionInfo) throws IOException {
-453assignMeta(metaRegionInfo, null);
-454  }
-455
-456  public void assignMeta(final RegionInfo 
metaRegionInfo, final ServerName serverName)
-457  throws IOException {
-458assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-459AssignProcedure proc;
-460if (serverName != null) {
-461  LOG.debug("Try assigning Meta " + 
metaRegionInfo + " to " + serverName);
-462  proc = 
createAssignProcedure(metaRegionInfo, serverName);
-463} else {
-464  LOG.debug("Assigning " + 
metaRegionInfo.getRegionNameAsString());
-465  proc = 
createAssignProcedure(metaRegionInfo, false);
-466}
-467
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
-468  }
-469
-470  /**
-471   * Start a new thread to check if there 
are region servers whose versions are higher than others.
-472   * If so, move all system table regions 
to RS with the highest version to keep compatibility.
-473   * The reason is, RS in new version may 
not be able to access RS in old version when there are
-474   * some incompatible changes.
-475   */
-476  public void 
checkIfShouldMoveSystemRegionAsync() {
-477new Thread(() - {
-478  try {
-479synchronized 
(checkIfShouldMoveSystemRegionLock) {
-480  ListRegionPlan plans = 
new ArrayList();
-481  for (ServerName server : 
getExcludedServersForSystemTable()) {
-482if 
(master.getServerManager().isServerDead(server)) {
-483  // TODO: See HBASE-18494 
and HBASE-18495. Though getExcludedServersForSystemTable()
-484  // considers only online 
servers, the server could be queued for dead server
-485  // processing. As region 
assignments for crashed server is handled by
-486  // ServerCrashProcedure, do 
NOT handle them here. The goal is to handle this through
-487  // regular flow of 
LoadBalancer as a favored node and not to have this special
-488  // handling.
-489  continue;
-490}
-491ListRegionInfo 
regionsShouldMove = getCarryingSystemTables(server);
-492if 
(!regionsShouldMove.isEmpty()) {
-493  for (RegionInfo regionInfo 
: regionsShouldMove) {
-494   

[20/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
index 84e9e52..252bcc2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
@@ -56,290 +56,293 @@
 048import 
org.apache.yetus.audience.InterfaceAudience;
 049import 
org.apache.zookeeper.KeeperException;
 050
-051import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-052
-053/**
-054 * Store Region State to hbase:meta 
table.
-055 */
-056@InterfaceAudience.Private
-057public class RegionStateStore {
-058  private static final Log LOG = 
LogFactory.getLog(RegionStateStore.class);
-059
-060  /** The delimiter for meta columns for 
replicaIds gt; 0 */
-061  protected static final char 
META_REPLICA_ID_DELIMITER = '_';
-062
-063  private final MasterServices master;
-064
-065  private MultiHConnection 
multiHConnection;
-066
-067  public RegionStateStore(final 
MasterServices master) {
-068this.master = master;
-069  }
-070
-071  public void start() throws IOException 
{
-072  }
-073
-074  public void stop() {
-075if (multiHConnection != null) {
-076  multiHConnection.close();
-077  multiHConnection = null;
-078}
-079  }
-080
-081  public interface RegionStateVisitor {
-082void visitRegionState(RegionInfo 
regionInfo, State state,
-083  ServerName regionLocation, 
ServerName lastHost, long openSeqNum);
-084  }
-085
-086  public void visitMeta(final 
RegionStateVisitor visitor) throws IOException {
-087
MetaTableAccessor.fullScanRegions(master.getConnection(), new 
MetaTableAccessor.Visitor() {
-088  final boolean isDebugEnabled = 
LOG.isDebugEnabled();
-089
-090  @Override
-091  public boolean visit(final Result 
r) throws IOException {
-092if (r !=  null  
!r.isEmpty()) {
-093  long st = 0;
-094  if (LOG.isTraceEnabled()) {
-095st = 
System.currentTimeMillis();
-096  }
-097  visitMetaEntry(visitor, r);
-098  if (LOG.isTraceEnabled()) {
-099long et = 
System.currentTimeMillis();
-100LOG.trace("[T] LOAD META PERF 
" + StringUtils.humanTimeDiff(et - st));
-101  }
-102} else if (isDebugEnabled) {
-103  LOG.debug("NULL result from 
meta - ignoring but this is strange.");
-104}
-105return true;
-106  }
-107});
-108  }
-109
-110  private void visitMetaEntry(final 
RegionStateVisitor visitor, final Result result)
-111  throws IOException {
-112final RegionLocations rl = 
MetaTableAccessor.getRegionLocations(result);
-113if (rl == null) return;
-114
-115final HRegionLocation[] locations = 
rl.getRegionLocations();
-116if (locations == null) return;
-117
-118for (int i = 0; i  
locations.length; ++i) {
-119  final HRegionLocation hrl = 
locations[i];
-120  if (hrl == null) continue;
-121
-122  final RegionInfo regionInfo = 
hrl.getRegionInfo();
-123  if (regionInfo == null) continue;
-124
-125  final int replicaId = 
regionInfo.getReplicaId();
-126  final State state = 
getRegionState(result, replicaId);
-127
-128  final ServerName lastHost = 
hrl.getServerName();
-129  final ServerName regionLocation = 
getRegionServer(result, replicaId);
-130  final long openSeqNum = -1;
-131
-132  // TODO: move under trace, now is 
visible for debugging
-133  LOG.info(String.format("Load 
hbase:meta entry region=%s regionState=%s lastHost=%s regionLocation=%s",
-134regionInfo, state, lastHost, 
regionLocation));
-135
-136  
visitor.visitRegionState(regionInfo, state, regionLocation, lastHost, 
openSeqNum);
-137}
-138  }
-139
-140  public void updateRegionLocation(final 
RegionInfo regionInfo, final State state,
-141  final ServerName regionLocation, 
final ServerName lastHost, final long openSeqNum,
-142  final long pid)
-143  throws IOException {
-144if (regionInfo.isMetaRegion()) {
-145  updateMetaLocation(regionInfo, 
regionLocation);
-146} else {
-147  
updateUserRegionLocation(regionInfo, state, regionLocation, lastHost, 
openSeqNum, pid);
-148}
-149  }
-150
-151  public void updateRegionState(final 
long openSeqNum, final long pid,
-152  final RegionState newState, final 
RegionState oldState) throws IOException {
-153
updateRegionLocation(newState.getRegion(), newState.getState(), 
newState.getServerName(),
-154oldState != null ? 
oldState.getServerName() : null, openSeqNum, pid);
-155  }
-156
-157  protected void updateMetaLocation(final 
RegionInfo regionInfo, final 

[28/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
index 166fc15..07e4dd5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
@@ -831,235 +831,236 @@
 823  
org.apache.commons.lang3.ArrayUtils.class,
 824  
com.fasterxml.jackson.databind.ObjectMapper.class,
 825  
com.fasterxml.jackson.core.Versioned.class,
-826  
com.fasterxml.jackson.annotation.JsonView.class);
-827  }
-828
-829  /**
-830   * Returns a classpath string built 
from the content of the "tmpjars" value in {@code conf}.
-831   * Also exposed to shell scripts via 
`bin/hbase mapredcp`.
-832   */
-833  public static String 
buildDependencyClasspath(Configuration conf) {
-834if (conf == null) {
-835  throw new 
IllegalArgumentException("Must provide a configuration object.");
-836}
-837SetString paths = new 
HashSet(conf.getStringCollection("tmpjars"));
-838if (paths.isEmpty()) {
-839  throw new 
IllegalArgumentException("Configuration contains no tmpjars.");
-840}
-841StringBuilder sb = new 
StringBuilder();
-842for (String s : paths) {
-843  // entries can take the form 
'file:/path/to/file.jar'.
-844  int idx = s.indexOf(":");
-845  if (idx != -1) s = s.substring(idx 
+ 1);
-846  if (sb.length()  0) 
sb.append(File.pathSeparator);
-847  sb.append(s);
-848}
-849return sb.toString();
-850  }
-851
-852  /**
-853   * Add the HBase dependency jars as 
well as jars for any of the configured
-854   * job classes to the job 
configuration, so that JobClient will ship them
-855   * to the cluster and add them to the 
DistributedCache.
-856   */
-857  public static void 
addDependencyJars(Job job) throws IOException {
-858
addHBaseDependencyJars(job.getConfiguration());
-859try {
-860  
addDependencyJarsForClasses(job.getConfiguration(),
-861  // when making changes here, 
consider also mapred.TableMapReduceUtil
-862  // pull job classes
-863  job.getMapOutputKeyClass(),
-864  job.getMapOutputValueClass(),
-865  job.getInputFormatClass(),
-866  job.getOutputKeyClass(),
-867  job.getOutputValueClass(),
-868  job.getOutputFormatClass(),
-869  job.getPartitionerClass(),
-870  job.getCombinerClass());
-871} catch (ClassNotFoundException e) 
{
-872  throw new IOException(e);
-873}
-874  }
-875
-876  /**
-877   * Add the jars containing the given 
classes to the job's configuration
-878   * such that JobClient will ship them 
to the cluster and add them to
-879   * the DistributedCache.
-880   * @deprecated rely on {@link 
#addDependencyJars(Job)} instead.
-881   */
-882  @Deprecated
-883  public static void 
addDependencyJars(Configuration conf,
-884  Class?... classes) throws 
IOException {
-885LOG.warn("The 
addDependencyJars(Configuration, Class?...) method has been deprecated 
since it"
-886 + " is easy to use 
incorrectly. Most users should rely on addDependencyJars(Job) " +
-887 "instead. See HBASE-8386 for 
more details.");
-888addDependencyJarsForClasses(conf, 
classes);
-889  }
-890
-891  /**
-892   * Add the jars containing the given 
classes to the job's configuration
-893   * such that JobClient will ship them 
to the cluster and add them to
-894   * the DistributedCache.
-895   *
-896   * N.B. that this method at most adds 
one jar per class given. If there is more than one
-897   * jar available containing a class 
with the same name as a given class, we don't define
-898   * which of those jars might be 
chosen.
-899   *
-900   * @param conf The Hadoop Configuration 
to modify
-901   * @param classes will add just those 
dependencies needed to find the given classes
-902   * @throws IOException if an underlying 
library call fails.
-903   */
-904  @InterfaceAudience.Private
-905  public static void 
addDependencyJarsForClasses(Configuration conf,
-906  Class?... classes) throws 
IOException {
-907
-908FileSystem localFs = 
FileSystem.getLocal(conf);
-909SetString jars = new 
HashSet();
-910// Add jars that are already in the 
tmpjars variable
-911
jars.addAll(conf.getStringCollection("tmpjars"));
-912
-913// add jars as we find them to a map 
of contents jar name so that we can avoid
-914// creating new jars for classes that 
have already been packaged.
-915MapString, String 
packagedClasses = new HashMap();
-916
-917// Add jars containing the specified 
classes
-918for (Class? clazz : classes) 
{
-919  if (clazz == null) continue;

[42/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
index 173b46a..02d5974 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static interface RegionStateStore.RegionStateVisitor
+public static interface RegionStateStore.RegionStateVisitor
 
 
 
@@ -153,7 +153,7 @@ var activeTableTab = "activeTableTab";
 
 
 visitRegionState
-voidvisitRegionState(RegionInforegionInfo,
+voidvisitRegionState(RegionInforegionInfo,
   RegionState.Statestate,
   ServerNameregionLocation,
   ServerNamelastHost,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
index ce08d4c..306bcd6 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":9,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":9,"i5":9,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RegionStateStore
+public class RegionStateStore
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Store Region State to hbase:meta table.
 
@@ -221,7 +221,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-protected RegionState.State
+static RegionState.State
 getRegionState(Resultr,
   intreplicaId)
 Pull the region state from a catalog table Result.
@@ -342,7 +342,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -351,7 +351,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 META_REPLICA_ID_DELIMITER
-protected static finalchar META_REPLICA_ID_DELIMITER
+protected static finalchar META_REPLICA_ID_DELIMITER
 The delimiter for meta columns for replicaIds  0
 
 See Also:
@@ -365,7 +365,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 master
-private finalMasterServices master
+private finalMasterServices master
 
 
 
@@ -374,7 +374,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 multiHConnection
-privateMultiHConnection multiHConnection
+privateMultiHConnection multiHConnection
 
 
 
@@ -391,7 +391,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 RegionStateStore
-publicRegionStateStore(MasterServicesmaster)
+publicRegionStateStore(MasterServicesmaster)
 
 
 
@@ -408,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 start
-publicvoidstart()
+publicvoidstart()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -422,7 +422,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 stop
-publicvoidstop()
+publicvoidstop()
 
 
 
@@ -431,7 +431,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 visitMeta
-publicvoidvisitMeta(RegionStateStore.RegionStateVisitorvisitor)
+publicvoidvisitMeta(RegionStateStore.RegionStateVisitorvisitor)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -445,7 +445,7 @@ extends 

[32/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
index c2b4126..aa7897b 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
@@ -1456,7 +1456,7 @@ implements MasterObserver
-postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance,
 postBalanceRSGroup, postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction
 , postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable, postDecommissionRegionServers,
 postDeleteSnapshot,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postListDecommissionedRegionServers,
 postListReplicationPeers,
 postListSn
 apshot, postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRequest
 Lock, postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota, postSnapshot,
 postTableFlush,
 postUnassign,
 postUpdateReplica
 tionPeerConfig, preCreateTableAction,
 preDeleteTableAction,
 preDisableTableAction,
 preEnableTableAction,
 preGetTableNames,
 preListNamespaceDescriptors,
 preMasterInitialization,
 preMergeRegionsAction,
 preMergeRegionsCommitAction, preModifyTableAction,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preTruncateTableAction
+postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance,
 postBalanceRSGroup, postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 
 postCompletedModifyTableAction, postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable, postDecommissionRegionServers,
 postDeleteSnapshot,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterStatus,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postListDecommissionedRegionServers,
 postListReplicationPeers,
 postListSnapshot, postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRequestLock, postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 
 postSetNamespaceQuota, postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota, postSnapshot,
 postTableFlush,
 postUnassign,
 postUpdateReplicationPeerConfig, preCreateTableAction,
 preDeleteTableAction,
 preDisableTableAction,
 preEnableTableAction,
 preGetTableNames,
 preListNamespaceDescriptors,
 preMasterInitialization,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyTableAction,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction
 , preTruncateTableAction
 
 
 
@@ -1834,7 +1834,7 @@ implements 
 
 requirePermission
-privatevoidrequirePermission(Useruser,
+privatevoidrequirePermission(Useruser,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringrequest,
TableNametableName,
byte[]family,
@@ -1860,7 +1860,7 @@ implements 
 
 requireTablePermission
-privatevoidrequireTablePermission(Useruser,
+privatevoidrequireTablePermission(Useruser,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringrequest,
 TableNametableName,
 byte[]family,
@@ -1886,7 +1886,7 @@ implements 
 
 requireAccess
-privatevoidrequireAccess(Useruser,
+privatevoidrequireAccess(Useruser,

[23/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index 40cd159..2da0903 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -260,7 +260,7 @@
 252
 253// Update meta events (for testing)
 254if (hasProcExecutor) {
-255  
getProcedureScheduler().suspendEvent(metaLoadEvent);
+255  metaLoadEvent.suspend();
 256  setFailoverCleanupDone(false);
 257  for (RegionInfo hri: 
getMetaRegionSet()) {
 258setMetaInitialized(hri, false);
@@ -421,1455 +421,1454 @@
 413  }
 414
 415  public boolean 
waitMetaInitialized(final Procedure proc, final RegionInfo regionInfo) {
-416return 
getProcedureScheduler().waitEvent(
-417  
getMetaInitializedEvent(getMetaForRegion(regionInfo)), proc);
-418  }
-419
-420  private void setMetaInitialized(final 
RegionInfo metaRegionInfo, final boolean isInitialized) {
-421assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-422final ProcedureEvent metaInitEvent = 
getMetaInitializedEvent(metaRegionInfo);
-423if (isInitialized) {
-424  
getProcedureScheduler().wakeEvent(metaInitEvent);
-425} else {
-426  
getProcedureScheduler().suspendEvent(metaInitEvent);
-427}
-428  }
-429
-430  private ProcedureEvent 
getMetaInitializedEvent(final RegionInfo metaRegionInfo) {
-431assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-432// TODO: handle multiple meta.
-433return metaInitializedEvent;
-434  }
-435
-436  public boolean waitMetaLoaded(final 
Procedure proc) {
-437return 
getProcedureScheduler().waitEvent(metaLoadEvent, proc);
-438  }
-439
-440  protected void wakeMetaLoadedEvent() 
{
-441
getProcedureScheduler().wakeEvent(metaLoadEvent);
-442assert isMetaLoaded() : "expected 
meta to be loaded";
-443  }
-444
-445  public boolean isMetaLoaded() {
-446return metaLoadEvent.isReady();
-447  }
-448
-449  // 

-450  //  TODO: Sync helpers
-451  // 

-452  public void assignMeta(final RegionInfo 
metaRegionInfo) throws IOException {
-453assignMeta(metaRegionInfo, null);
-454  }
-455
-456  public void assignMeta(final RegionInfo 
metaRegionInfo, final ServerName serverName)
-457  throws IOException {
-458assert isMetaRegion(metaRegionInfo) : 
"unexpected non-meta region " + metaRegionInfo;
-459AssignProcedure proc;
-460if (serverName != null) {
-461  LOG.debug("Try assigning Meta " + 
metaRegionInfo + " to " + serverName);
-462  proc = 
createAssignProcedure(metaRegionInfo, serverName);
-463} else {
-464  LOG.debug("Assigning " + 
metaRegionInfo.getRegionNameAsString());
-465  proc = 
createAssignProcedure(metaRegionInfo, false);
-466}
-467
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
-468  }
-469
-470  /**
-471   * Start a new thread to check if there 
are region servers whose versions are higher than others.
-472   * If so, move all system table regions 
to RS with the highest version to keep compatibility.
-473   * The reason is, RS in new version may 
not be able to access RS in old version when there are
-474   * some incompatible changes.
-475   */
-476  public void 
checkIfShouldMoveSystemRegionAsync() {
-477new Thread(() - {
-478  try {
-479synchronized 
(checkIfShouldMoveSystemRegionLock) {
-480  ListRegionPlan plans = 
new ArrayList();
-481  for (ServerName server : 
getExcludedServersForSystemTable()) {
-482if 
(master.getServerManager().isServerDead(server)) {
-483  // TODO: See HBASE-18494 
and HBASE-18495. Though getExcludedServersForSystemTable()
-484  // considers only online 
servers, the server could be queued for dead server
-485  // processing. As region 
assignments for crashed server is handled by
-486  // ServerCrashProcedure, do 
NOT handle them here. The goal is to handle this through
-487  // regular flow of 
LoadBalancer as a favored node and not to have this special
-488  // handling.
-489  continue;
-490}
-491ListRegionInfo 
regionsShouldMove = getCarryingSystemTables(server);
-492  

[44/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
index d58ea43..3f7ab79 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class AssignmentManager.RegionInTransitionChore
+private static class AssignmentManager.RegionInTransitionChore
 extends ProcedureInMemoryChoreMasterProcedureEnv
 
 
@@ -240,7 +240,7 @@ extends 
 
 RegionInTransitionChore
-publicRegionInTransitionChore(inttimeoutMsec)
+publicRegionInTransitionChore(inttimeoutMsec)
 
 
 
@@ -257,7 +257,7 @@ extends 
 
 periodicExecute
-protectedvoidperiodicExecute(MasterProcedureEnvenv)
+protectedvoidperiodicExecute(MasterProcedureEnvenv)
 
 Specified by:
 periodicExecutein
 classProcedureInMemoryChoreMasterProcedureEnv

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index 03b053f..86939b7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class AssignmentManager.RegionInTransitionStat
+public static class AssignmentManager.RegionInTransitionStat
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ritThreshold
-private finalint ritThreshold
+private finalint ritThreshold
 
 
 
@@ -275,7 +275,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ritsOverThreshold
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true;
 title="class or interface in java.util">HashMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,RegionState ritsOverThreshold
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true;
 title="class or interface in java.util">HashMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,RegionState ritsOverThreshold
 
 
 
@@ -284,7 +284,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 statTimestamp
-privatelong statTimestamp
+privatelong statTimestamp
 
 
 
@@ -293,7 +293,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 oldestRITTime
-privatelong oldestRITTime
+privatelong oldestRITTime
 
 
 
@@ -302,7 +302,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 totalRITsTwiceThreshold
-privateint totalRITsTwiceThreshold
+privateint totalRITsTwiceThreshold
 
 
 
@@ -311,7 +311,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 totalRITs
-privateint totalRITs
+privateint totalRITs
 
 
 
@@ -328,7 +328,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 RegionInTransitionStat
-publicRegionInTransitionStat(org.apache.hadoop.conf.Configurationconf)
+publicRegionInTransitionStat(org.apache.hadoop.conf.Configurationconf)
 
 
 
@@ -345,7 +345,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getRITThreshold
-publicintgetRITThreshold()
+publicintgetRITThreshold()
 
 
 
@@ -354,7 +354,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTimestamp
-publiclonggetTimestamp()
+publiclonggetTimestamp()
 
 
 
@@ -363,7 +363,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTotalRITs
-publicintgetTotalRITs()
+publicintgetTotalRITs()
 
 
 
@@ -372,7 +372,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getOldestRITTime
-publiclonggetOldestRITTime()
+publiclonggetOldestRITTime()
 
 
 
@@ -381,7 +381,7 @@ extends 

[29/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.html 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.html
index 6b6d668..275825f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class VisibilityUtils
+public class VisibilityUtils
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Utility method to support visibility
 
@@ -306,7 +306,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -315,7 +315,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 VISIBILITY_LABEL_GENERATOR_CLASS
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String VISIBILITY_LABEL_GENERATOR_CLASS
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String VISIBILITY_LABEL_GENERATOR_CLASS
 
 See Also:
 Constant
 Field Values
@@ -328,7 +328,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SYSTEM_LABEL
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SYSTEM_LABEL
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SYSTEM_LABEL
 
 See Also:
 Constant
 Field Values
@@ -341,7 +341,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG
-public static finalTag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG
+public static finalTag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG
 
 
 
@@ -350,7 +350,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 COMMA
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String COMMA
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String COMMA
 
 See Also:
 Constant
 Field Values
@@ -363,7 +363,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 EXP_PARSER
-private static finalExpressionParser EXP_PARSER
+private static finalExpressionParser EXP_PARSER
 
 
 
@@ -372,7 +372,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 EXP_EXPANDER
-private static finalExpressionExpander EXP_EXPANDER
+private static finalExpressionExpander EXP_EXPANDER
 
 
 
@@ -389,7 +389,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 VisibilityUtils
-publicVisibilityUtils()
+publicVisibilityUtils()
 
 
 
@@ -406,7 +406,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getDataToWriteToZooKeeper
-public staticbyte[]getDataToWriteToZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in 
java.lang">IntegerexistingLabels)
+public staticbyte[]getDataToWriteToZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in 
java.lang">IntegerexistingLabels)
 Creates the labels data to be written to zookeeper.
 
 Parameters:
@@ -422,7 +422,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getUserAuthsDataToWriteToZooKeeper
-public staticbyte[]getUserAuthsDataToWriteToZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[10/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key 

[27/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
index 860d344..9fa61e7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
@@ -1604,71 +1604,72 @@
 1596});
 1597  }
 1598
-1599  public void postClearDeadServers() 
throws IOException {
-1600
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-1601  @Override
-1602  public void call(MasterObserver 
observer) throws IOException {
-1603
observer.postClearDeadServers(this);
-1604  }
-1605});
-1606  }
-1607
-1608  public void 
preDecommissionRegionServers(ListServerName servers, boolean offload) 
throws IOException {
-1609
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-1610  @Override
-1611  public void call(MasterObserver 
observer) throws IOException {
-1612
observer.preDecommissionRegionServers(this, servers, offload);
-1613  }
-1614});
-1615  }
-1616
-1617  public void 
postDecommissionRegionServers(ListServerName servers, boolean offload) 
throws IOException {
-1618
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-1619  @Override
-1620  public void call(MasterObserver 
observer) throws IOException {
-1621
observer.postDecommissionRegionServers(this, servers, offload);
-1622  }
-1623});
-1624  }
-1625
-1626  public void 
preListDecommissionedRegionServers() throws IOException {
-1627
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-1628  @Override
-1629  public void call(MasterObserver 
observer) throws IOException {
-1630
observer.preListDecommissionedRegionServers(this);
-1631  }
-1632});
-1633  }
-1634
-1635  public void 
postListDecommissionedRegionServers() throws IOException {
-1636
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-1637  @Override
-1638  public void call(MasterObserver 
observer) throws IOException {
-1639
observer.postListDecommissionedRegionServers(this);
-1640  }
-1641});
-1642  }
-1643
-1644  public void 
preRecommissionRegionServer(ServerName server, Listbyte[] 
encodedRegionNames)
-1645  throws IOException {
-1646
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-1647  @Override
-1648  public void call(MasterObserver 
observer) throws IOException {
-1649
observer.preRecommissionRegionServer(this, server, encodedRegionNames);
-1650  }
-1651});
-1652  }
-1653
-1654  public void 
postRecommissionRegionServer(ServerName server, Listbyte[] 
encodedRegionNames)
-1655  throws IOException {
-1656
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-1657  @Override
-1658  public void call(MasterObserver 
observer) throws IOException {
-1659
observer.postRecommissionRegionServer(this, server, encodedRegionNames);
-1660  }
-1661});
-1662  }
-1663}
+1599  public void 
postClearDeadServers(ListServerName servers,
+1600  ListServerName 
notClearedServers) throws IOException {
+1601
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+1602  @Override
+1603  public void call(MasterObserver 
observer) throws IOException {
+1604
observer.postClearDeadServers(this, servers, notClearedServers);
+1605  }
+1606});
+1607  }
+1608
+1609  public void 
preDecommissionRegionServers(ListServerName servers, boolean offload) 
throws IOException {
+1610
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+1611  @Override
+1612  public void call(MasterObserver 
observer) throws IOException {
+1613
observer.preDecommissionRegionServers(this, servers, offload);
+1614  }
+1615});
+1616  }
+1617
+1618  public void 
postDecommissionRegionServers(ListServerName servers, boolean offload) 
throws IOException {
+1619
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+1620  @Override
+1621  public void call(MasterObserver 
observer) throws IOException {
+1622
observer.postDecommissionRegionServers(this, servers, offload);
+1623  }
+1624});
+1625  }
+1626
+1627  public void 
preListDecommissionedRegionServers() throws IOException {
+1628

[21/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
index 84e9e52..252bcc2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
@@ -56,290 +56,293 @@
 048import 
org.apache.yetus.audience.InterfaceAudience;
 049import 
org.apache.zookeeper.KeeperException;
 050
-051import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-052
-053/**
-054 * Store Region State to hbase:meta 
table.
-055 */
-056@InterfaceAudience.Private
-057public class RegionStateStore {
-058  private static final Log LOG = 
LogFactory.getLog(RegionStateStore.class);
-059
-060  /** The delimiter for meta columns for 
replicaIds gt; 0 */
-061  protected static final char 
META_REPLICA_ID_DELIMITER = '_';
-062
-063  private final MasterServices master;
-064
-065  private MultiHConnection 
multiHConnection;
-066
-067  public RegionStateStore(final 
MasterServices master) {
-068this.master = master;
-069  }
-070
-071  public void start() throws IOException 
{
-072  }
-073
-074  public void stop() {
-075if (multiHConnection != null) {
-076  multiHConnection.close();
-077  multiHConnection = null;
-078}
-079  }
-080
-081  public interface RegionStateVisitor {
-082void visitRegionState(RegionInfo 
regionInfo, State state,
-083  ServerName regionLocation, 
ServerName lastHost, long openSeqNum);
-084  }
-085
-086  public void visitMeta(final 
RegionStateVisitor visitor) throws IOException {
-087
MetaTableAccessor.fullScanRegions(master.getConnection(), new 
MetaTableAccessor.Visitor() {
-088  final boolean isDebugEnabled = 
LOG.isDebugEnabled();
-089
-090  @Override
-091  public boolean visit(final Result 
r) throws IOException {
-092if (r !=  null  
!r.isEmpty()) {
-093  long st = 0;
-094  if (LOG.isTraceEnabled()) {
-095st = 
System.currentTimeMillis();
-096  }
-097  visitMetaEntry(visitor, r);
-098  if (LOG.isTraceEnabled()) {
-099long et = 
System.currentTimeMillis();
-100LOG.trace("[T] LOAD META PERF 
" + StringUtils.humanTimeDiff(et - st));
-101  }
-102} else if (isDebugEnabled) {
-103  LOG.debug("NULL result from 
meta - ignoring but this is strange.");
-104}
-105return true;
-106  }
-107});
-108  }
-109
-110  private void visitMetaEntry(final 
RegionStateVisitor visitor, final Result result)
-111  throws IOException {
-112final RegionLocations rl = 
MetaTableAccessor.getRegionLocations(result);
-113if (rl == null) return;
-114
-115final HRegionLocation[] locations = 
rl.getRegionLocations();
-116if (locations == null) return;
-117
-118for (int i = 0; i  
locations.length; ++i) {
-119  final HRegionLocation hrl = 
locations[i];
-120  if (hrl == null) continue;
-121
-122  final RegionInfo regionInfo = 
hrl.getRegionInfo();
-123  if (regionInfo == null) continue;
-124
-125  final int replicaId = 
regionInfo.getReplicaId();
-126  final State state = 
getRegionState(result, replicaId);
-127
-128  final ServerName lastHost = 
hrl.getServerName();
-129  final ServerName regionLocation = 
getRegionServer(result, replicaId);
-130  final long openSeqNum = -1;
-131
-132  // TODO: move under trace, now is 
visible for debugging
-133  LOG.info(String.format("Load 
hbase:meta entry region=%s regionState=%s lastHost=%s regionLocation=%s",
-134regionInfo, state, lastHost, 
regionLocation));
-135
-136  
visitor.visitRegionState(regionInfo, state, regionLocation, lastHost, 
openSeqNum);
-137}
-138  }
-139
-140  public void updateRegionLocation(final 
RegionInfo regionInfo, final State state,
-141  final ServerName regionLocation, 
final ServerName lastHost, final long openSeqNum,
-142  final long pid)
-143  throws IOException {
-144if (regionInfo.isMetaRegion()) {
-145  updateMetaLocation(regionInfo, 
regionLocation);
-146} else {
-147  
updateUserRegionLocation(regionInfo, state, regionLocation, lastHost, 
openSeqNum, pid);
-148}
-149  }
-150
-151  public void updateRegionState(final 
long openSeqNum, final long pid,
-152  final RegionState newState, final 
RegionState oldState) throws IOException {
-153
updateRegionLocation(newState.getRegion(), newState.getState(), 
newState.getServerName(),
-154oldState != null ? 
oldState.getServerName() : null, openSeqNum, 

[01/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 9f95f7932 -> b9722a17b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
index c552d8a..a792ab2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
@@ -48,922 +48,928 @@
 040import 
java.util.concurrent.ThreadPoolExecutor;
 041import java.util.concurrent.TimeUnit;
 042import 
java.util.concurrent.atomic.AtomicLong;
-043
-044import org.apache.commons.logging.Log;
-045import 
org.apache.commons.logging.LogFactory;
-046import 
org.apache.hadoop.conf.Configuration;
-047import org.apache.hadoop.fs.FileSystem;
-048import org.apache.hadoop.fs.Path;
-049import 
org.apache.hadoop.hbase.HConstants;
-050import 
org.apache.hadoop.hbase.MetaTableAccessor;
-051import org.apache.hadoop.hbase.Server;
-052import 
org.apache.hadoop.hbase.TableDescriptors;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.yetus.audience.InterfaceAudience;
-055import 
org.apache.hadoop.hbase.client.Connection;
-056import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-057import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-058import 
org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
-059import 
org.apache.hadoop.hbase.replication.ReplicationEndpoint;
-060import 
org.apache.hadoop.hbase.replication.ReplicationException;
-061import 
org.apache.hadoop.hbase.replication.ReplicationListener;
-062import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
-063import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-064import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-065import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-066import 
org.apache.hadoop.hbase.replication.ReplicationQueues;
-067import 
org.apache.hadoop.hbase.replication.ReplicationTracker;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.util.Pair;
-070import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-071
-072import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-073import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-074
-075/**
-076 * This class is responsible to manage 
all the replication
-077 * sources. There are two classes of 
sources:
-078 * ul
-079 * li Normal sources are 
persistent and one per peer cluster/li
-080 * li Old sources are recovered 
from a failed region server and our
-081 * only goal is to finish replicating the 
WAL queue it had up in ZK/li
-082 * /ul
-083 *
-084 * When a region server dies, this class 
uses a watcher to get notified and it
-085 * tries to grab a lock in order to 
transfer all the queues in a local
-086 * old source.
-087 *
-088 * This class implements the 
ReplicationListener interface so that it can track changes in
-089 * replication state.
-090 */
-091@InterfaceAudience.Private
-092public class ReplicationSourceManager 
implements ReplicationListener {
-093  private static final Log LOG =
-094  
LogFactory.getLog(ReplicationSourceManager.class);
-095  // List of all the sources that read 
this RS's logs
-096  private final 
ListReplicationSourceInterface sources;
-097  // List of all the sources we got from 
died RSs
-098  private final 
ListReplicationSourceInterface oldsources;
-099  private final ReplicationQueues 
replicationQueues;
-100  private final ReplicationTracker 
replicationTracker;
-101  private final ReplicationPeers 
replicationPeers;
-102  // UUID for this cluster
-103  private final UUID clusterId;
-104  // All about stopping
-105  private final Server server;
-106  // All logs we are currently tracking
-107  // Index structure of the map is: 
peer_id-logPrefix/logGroup-logs
-108  private final MapString, 
MapString, SortedSetString walsById;
-109  // Logs for recovered sources we are 
currently tracking
-110  private final MapString, 
MapString, SortedSetString walsByIdRecoveredQueues;
-111  private final Configuration conf;
-112  private final FileSystem fs;
-113  // The paths to the latest log of each 
wal group, for new coming peers
-114  private SetPath latestPaths;
-115  // Path to the wals directories
-116  private final Path logDir;
-117  // Path to the wal archive
-118  private final Path oldLogDir;
-119  private final WALFileLengthProvider 
walFileLengthProvider;
-120  // The number of ms 

[18/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
index 10da76f..432de3f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
@@ -26,135 +26,109 @@
 018
 019package 
org.apache.hadoop.hbase.procedure2;
 020
-021import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-022
+021import 
com.google.common.annotations.VisibleForTesting;
+022import java.util.Iterator;
 023import java.util.List;
 024import java.util.concurrent.TimeUnit;
-025
-026import 
org.apache.yetus.audience.InterfaceAudience;
-027
-028/**
-029 * Keep track of the runnable 
procedures
-030 */
-031@InterfaceAudience.Private
-032public interface ProcedureScheduler {
-033  /**
-034   * Start the scheduler
-035   */
-036  void start();
-037
-038  /**
-039   * Stop the scheduler
-040   */
-041  void stop();
-042
-043  /**
-044   * In case the class is blocking on 
poll() waiting for items to be added,
-045   * this method should awake poll() and 
poll() should return.
-046   */
-047  void signalAll();
-048
-049  /**
-050   * Inserts the specified element at the 
front of this queue.
-051   * @param proc the Procedure to add
-052   */
-053  void addFront(Procedure proc);
-054
-055  /**
-056   * Inserts the specified element at the 
end of this queue.
-057   * @param proc the Procedure to add
-058   */
-059  void addBack(Procedure proc);
-060
-061  /**
-062   * The procedure can't run at the 
moment.
-063   * add it back to the queue, giving 
priority to someone else.
-064   * @param proc the Procedure to add 
back to the list
-065   */
-066  void yield(Procedure proc);
-067
-068  /**
-069   * The procedure in execution 
completed.
-070   * This can be implemented to perform 
cleanups.
-071   * @param proc the Procedure that 
completed the execution.
-072   */
-073  void completionCleanup(Procedure 
proc);
-074
-075  /**
-076   * @return true if there are procedures 
available to process, otherwise false.
-077   */
-078  boolean hasRunnables();
-079
-080  /**
-081   * Fetch one Procedure from the queue
-082   * @return the Procedure to execute, or 
null if nothing present.
-083   */
-084  Procedure poll();
-085
-086  /**
-087   * Fetch one Procedure from the queue
-088   * @param timeout how long to wait 
before giving up, in units of unit
-089   * @param unit a TimeUnit determining 
how to interpret the timeout parameter
-090   * @return the Procedure to execute, or 
null if nothing present.
-091   */
-092  Procedure poll(long timeout, TimeUnit 
unit);
-093
-094  /**
-095   * Mark the event as not ready.
-096   * Procedures calling waitEvent() will 
be suspended.
-097   * @param event the event to mark as 
suspended/not ready
-098   */
-099  void suspendEvent(ProcedureEvent 
event);
-100
-101  /**
-102   * Wake every procedure waiting for the 
specified event
-103   * (By design each event has only one 
"wake" caller)
-104   * @param event the event to wait
-105   */
-106  void wakeEvent(ProcedureEvent event);
-107
-108  /**
-109   * Wake every procedure waiting for the 
specified events.
-110   * (By design each event has only one 
"wake" caller)
-111   * @param count the number of events in 
the array to wake
-112   * @param events the list of events to 
wake
-113   */
-114  void wakeEvents(int count, 
ProcedureEvent... events);
+025import 
org.apache.yetus.audience.InterfaceAudience;
+026
+027/**
+028 * Keep track of the runnable 
procedures
+029 */
+030@InterfaceAudience.Private
+031public interface ProcedureScheduler {
+032  /**
+033   * Start the scheduler
+034   */
+035  void start();
+036
+037  /**
+038   * Stop the scheduler
+039   */
+040  void stop();
+041
+042  /**
+043   * In case the class is blocking on 
poll() waiting for items to be added,
+044   * this method should awake poll() and 
poll() should return.
+045   */
+046  void signalAll();
+047
+048  /**
+049   * Inserts the specified element at the 
front of this queue.
+050   * @param proc the Procedure to add
+051   */
+052  void addFront(Procedure proc);
+053
+054  /**
+055   * Inserts all elements in the iterator 
at the front of this queue.
+056   */
+057  void addFront(IteratorProcedure 
procedureIterator);
+058
+059  /**
+060   * Inserts the specified element at the 
end of this queue.
+061   * @param proc the Procedure to add
+062   */
+063  void addBack(Procedure proc);
+064
+065  /**
+066   * The procedure can't run at the 
moment.
+067   * add it back to the queue, giving 
priority to someone else.
+068   * @param proc the Procedure to add 
back to the list
+069   */
+070  void 

[11/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the 

[07/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 

hbase git commit: HBASE-19342 fix TestTableBasedReplicationSourceManagerImpl#testRemovePeerMetricsCleanup

2017-11-28 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master 0f33931b2 -> b75510284


HBASE-19342 fix 
TestTableBasedReplicationSourceManagerImpl#testRemovePeerMetricsCleanup


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b7551028
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b7551028
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b7551028

Branch: refs/heads/master
Commit: b75510284f8af2a70986b7ddf9174fe985d160a9
Parents: 0f33931
Author: Chia-Ping Tsai 
Authored: Tue Nov 28 18:06:38 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Nov 28 18:06:38 2017 +0800

--
 .../regionserver/ReplicationSourceManager.java  |  10 +-
 .../replication/ReplicationSourceDummy.java |   8 +-
 .../TestReplicationSourceManager.java   | 117 +++
 ...tTableBasedReplicationSourceManagerImpl.java |   7 ++
 4 files changed, 115 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b7551028/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 45d7d94..3aa3843 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -40,7 +40,6 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -51,7 +50,6 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -68,6 +66,7 @@ import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -305,6 +304,13 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 return src;
   }
 
+  @VisibleForTesting
+  int getSizeOfLatestPath() {
+synchronized (latestPaths) {
+  return latestPaths.size();
+}
+  }
+
   /**
* Delete a complete queue of wals associated with a peer cluster
* @param peerId Id of the peer cluster queue of wals to delete

http://git-wip-us.apache.org/repos/asf/hbase/blob/b7551028/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
index a12cebd..7ea79f9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.replication;
 import java.io.IOException;
 import java.util.List;
 import java.util.UUID;
-
+import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -45,7 +45,7 @@ public class ReplicationSourceDummy implements 
ReplicationSourceInterface {
   Path currentPath;
   MetricsSource metrics;
   WALFileLengthProvider walFileLengthProvider;
-
+  AtomicBoolean startup = new AtomicBoolean(false);
   @Override
   public void init(Configuration conf, FileSystem fs, ReplicationSourceManager 
manager,
   ReplicationQueues rq, ReplicationPeers rp, Server server, String 
peerClusterId,
@@ -70,7 +70,11 @@ public class ReplicationSourceDummy implements 
ReplicationSourceInterface {
 
   @Override
  

hbase git commit: HBASE-19342 fix TestTableBasedReplicationSourceManagerImpl#testRemovePeerMetricsCleanup

2017-11-28 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 ed1666720 -> 8b6f305ac


HBASE-19342 fix 
TestTableBasedReplicationSourceManagerImpl#testRemovePeerMetricsCleanup


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8b6f305a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8b6f305a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8b6f305a

Branch: refs/heads/branch-2
Commit: 8b6f305ac7f975bfde2b48ac211d40340d04e9ce
Parents: ed16667
Author: Chia-Ping Tsai 
Authored: Tue Nov 28 18:06:38 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Nov 28 18:12:55 2017 +0800

--
 .../regionserver/ReplicationSourceManager.java  |  10 +-
 .../replication/ReplicationSourceDummy.java |   8 +-
 .../TestReplicationSourceManager.java   | 117 +++
 ...tTableBasedReplicationSourceManagerImpl.java |   7 ++
 4 files changed, 115 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8b6f305a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 45d7d94..3aa3843 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -40,7 +40,6 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -51,7 +50,6 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -68,6 +66,7 @@ import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -305,6 +304,13 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 return src;
   }
 
+  @VisibleForTesting
+  int getSizeOfLatestPath() {
+synchronized (latestPaths) {
+  return latestPaths.size();
+}
+  }
+
   /**
* Delete a complete queue of wals associated with a peer cluster
* @param peerId Id of the peer cluster queue of wals to delete

http://git-wip-us.apache.org/repos/asf/hbase/blob/8b6f305a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
index a12cebd..7ea79f9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/ReplicationSourceDummy.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.replication;
 import java.io.IOException;
 import java.util.List;
 import java.util.UUID;
-
+import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -45,7 +45,7 @@ public class ReplicationSourceDummy implements 
ReplicationSourceInterface {
   Path currentPath;
   MetricsSource metrics;
   WALFileLengthProvider walFileLengthProvider;
-
+  AtomicBoolean startup = new AtomicBoolean(false);
   @Override
   public void init(Configuration conf, FileSystem fs, ReplicationSourceManager 
manager,
   ReplicationQueues rq, ReplicationPeers rp, Server server, String 
peerClusterId,
@@ -70,7 +70,11 @@ public class ReplicationSourceDummy implements 
ReplicationSourceInterface {