hbase git commit: HBASE-15580 Tag coprocessor limitedprivate scope to StoreFile.Reader

2018-01-03 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1 939e01642 -> b5ec4d381


HBASE-15580 Tag coprocessor limitedprivate scope to StoreFile.Reader

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b5ec4d38
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b5ec4d38
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b5ec4d38

Branch: refs/heads/branch-1
Commit: b5ec4d381c04df7c5011d8ea475af45c52d5aba1
Parents: 939e016
Author: Rajeshbabu Chintaguntla 
Authored: Thu Jan 4 13:54:58 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jan 4 14:15:03 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b5ec4d38/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index c27cf40..803bfb3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.KeyValue;
@@ -1141,6 +1142,7 @@ public class StoreFile {
   /**
* Reader for a StoreFile.
*/
+  @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, 
HBaseInterfaceAudience.PHOENIX})
   public static class Reader {
 private static final Log LOG = LogFactory.getLog(Reader.class.getName());
 



hbase git commit: HBASE-15580 Tag coprocessor limitedprivate scope to StoreFile.Reader

2018-01-03 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 cdd2ee2a3 -> 39d211ae0


HBASE-15580 Tag coprocessor limitedprivate scope to StoreFile.Reader

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/39d211ae
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/39d211ae
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/39d211ae

Branch: refs/heads/branch-1.4
Commit: 39d211ae0d7264ec3341bcdbd55e8bace3162b7b
Parents: cdd2ee2
Author: Rajeshbabu Chintaguntla 
Authored: Thu Jan 4 13:54:58 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jan 4 13:58:09 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/39d211ae/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index c27cf40..803bfb3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.KeyValue;
@@ -1141,6 +1142,7 @@ public class StoreFile {
   /**
* Reader for a StoreFile.
*/
+  @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, 
HBaseInterfaceAudience.PHOENIX})
   public static class Reader {
 private static final Log LOG = LogFactory.getLog(Reader.class.getName());
 



hbase git commit: HBASE-18452 VerifyReplication by Snapshot should cache HDFS token before submit job for kerberos env

2018-01-03 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2 654edc5fa -> 0b62528db


HBASE-18452 VerifyReplication by Snapshot should cache HDFS token before submit 
job for kerberos env


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b62528d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b62528d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b62528d

Branch: refs/heads/branch-2
Commit: 0b62528dbf76450a3c9eef13647889ce2ec8da67
Parents: 654edc5
Author: huzheng 
Authored: Thu Jan 4 11:47:20 2018 +0800
Committer: huzheng 
Committed: Thu Jan 4 14:01:41 2018 +0800

--
 .../hadoop/hbase/mapreduce/replication/VerifyReplication.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0b62528d/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 035086e..9065f4e 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -428,7 +429,7 @@ public class VerifyReplication extends Configured 
implements Tool {
   conf.set(NAME + ".peerHBaseRootAddress", peerHBaseRootAddress);
 
   // This is to create HDFS delegation token for peer cluster in case of 
secured
-  conf.setStrings(MRJobConfig.JOB_NAMENODES, peerFSAddress);
+  conf.setStrings(MRJobConfig.JOB_NAMENODES, peerFSAddress, 
conf.get(HConstants.HBASE_DIR));
 }
 
 Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + 
tableName));



hbase git commit: HBASE-18452 VerifyReplication by Snapshot should cache HDFS token before submit job for kerberos env

2018-01-03 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/master 8119acfca -> 519543594


HBASE-18452 VerifyReplication by Snapshot should cache HDFS token before submit 
job for kerberos env


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/51954359
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/51954359
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/51954359

Branch: refs/heads/master
Commit: 51954359416b107ce5eda6cb710449edc98ab0e6
Parents: 8119acf
Author: huzheng 
Authored: Thu Jan 4 11:47:20 2018 +0800
Committer: huzheng 
Committed: Thu Jan 4 13:55:27 2018 +0800

--
 .../hadoop/hbase/mapreduce/replication/VerifyReplication.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/51954359/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 035086e..9065f4e 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -428,7 +429,7 @@ public class VerifyReplication extends Configured 
implements Tool {
   conf.set(NAME + ".peerHBaseRootAddress", peerHBaseRootAddress);
 
   // This is to create HDFS delegation token for peer cluster in case of 
secured
-  conf.setStrings(MRJobConfig.JOB_NAMENODES, peerFSAddress);
+  conf.setStrings(MRJobConfig.JOB_NAMENODES, peerFSAddress, 
conf.get(HConstants.HBASE_DIR));
 }
 
 Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + 
tableName));



[2/3] hbase git commit: HBASE-19596 RegionMetrics/ServerMetrics/ClusterMetrics should apply to all public classes

2018-01-03 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
index 301cfef..1227595 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
@@ -17,31 +17,28 @@
  */
 package org.apache.hadoop.hbase.mapreduce;
 
+import static org.apache.hadoop.hbase.HConstants.DEFAULT_REGIONSERVER_PORT;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Size;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import static org.apache.hadoop.hbase.HConstants.DEFAULT_REGIONSERVER_PORT;
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.when;
-
 @Category({MiscTests.class, SmallTests.class})
 public class TestRegionSizeCalculator {
 
@@ -134,14 +131,15 @@ public class TestRegionSizeCalculator {
   /**
* Creates mock returning RegionLoad info about given servers.
   */
-  private Admin mockAdmin(RegionLoad... regionLoadArray) throws Exception {
+  private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception {
 Admin mockAdmin = Mockito.mock(Admin.class);
-Map regionLoads = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
-for (RegionLoad regionLoad : regionLoadArray) {
-  regionLoads.put(regionLoad.getName(), regionLoad);
+List regionLoads = new ArrayList<>();
+for (RegionMetrics regionLoad : regionLoadArray) {
+  regionLoads.add(regionLoad);
 }
 when(mockAdmin.getConfiguration()).thenReturn(configuration);
-when(mockAdmin.getRegionLoad(sn, 
TableName.valueOf("sizeTestTable"))).thenReturn(regionLoads);
+when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable")))
+.thenReturn(regionLoads);
 return mockAdmin;
   }
 
@@ -150,11 +148,11 @@ public class TestRegionSizeCalculator {
*
* @param  fileSizeMb number of megabytes occupied by region in file store 
in megabytes
* */
-  private RegionLoad mockRegion(String regionName, int fileSizeMb) {
-RegionLoad region = Mockito.mock(RegionLoad.class);
-when(region.getName()).thenReturn(regionName.getBytes());
+  private RegionMetrics mockRegion(String regionName, int fileSizeMb) {
+RegionMetrics region = Mockito.mock(RegionMetrics.class);
+when(region.getRegionName()).thenReturn(regionName.getBytes());
 when(region.getNameAsString()).thenReturn(regionName);
-when(region.getStorefileSizeMB()).thenReturn(fileSizeMb);
+when(region.getStoreFileSize()).thenReturn(new Size(fileSizeMb, 
Size.Unit.MEGABYTE));
 return region;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
index 7ee1065..2323bf3 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.util.EnumSet;
+import java.util.Map;
 import javax.ws.rs.GET;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.CacheControl;
@@ -28,11 +29,12 @@ import javax.ws.rs.core.Context;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.UriInfo;
+import org.apache.had

[2/3] hbase git commit: HBASE-19596 RegionMetrics/ServerMetrics/ClusterMetrics should apply to all public classes

2018-01-03 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/8119acfc/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
index 301cfef..1227595 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java
@@ -17,31 +17,28 @@
  */
 package org.apache.hadoop.hbase.mapreduce;
 
+import static org.apache.hadoop.hbase.HConstants.DEFAULT_REGIONSERVER_PORT;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Size;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import static org.apache.hadoop.hbase.HConstants.DEFAULT_REGIONSERVER_PORT;
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.when;
-
 @Category({MiscTests.class, SmallTests.class})
 public class TestRegionSizeCalculator {
 
@@ -134,14 +131,15 @@ public class TestRegionSizeCalculator {
   /**
* Creates mock returning RegionLoad info about given servers.
   */
-  private Admin mockAdmin(RegionLoad... regionLoadArray) throws Exception {
+  private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception {
 Admin mockAdmin = Mockito.mock(Admin.class);
-Map regionLoads = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
-for (RegionLoad regionLoad : regionLoadArray) {
-  regionLoads.put(regionLoad.getName(), regionLoad);
+List regionLoads = new ArrayList<>();
+for (RegionMetrics regionLoad : regionLoadArray) {
+  regionLoads.add(regionLoad);
 }
 when(mockAdmin.getConfiguration()).thenReturn(configuration);
-when(mockAdmin.getRegionLoad(sn, 
TableName.valueOf("sizeTestTable"))).thenReturn(regionLoads);
+when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable")))
+.thenReturn(regionLoads);
 return mockAdmin;
   }
 
@@ -150,11 +148,11 @@ public class TestRegionSizeCalculator {
*
* @param  fileSizeMb number of megabytes occupied by region in file store 
in megabytes
* */
-  private RegionLoad mockRegion(String regionName, int fileSizeMb) {
-RegionLoad region = Mockito.mock(RegionLoad.class);
-when(region.getName()).thenReturn(regionName.getBytes());
+  private RegionMetrics mockRegion(String regionName, int fileSizeMb) {
+RegionMetrics region = Mockito.mock(RegionMetrics.class);
+when(region.getRegionName()).thenReturn(regionName.getBytes());
 when(region.getNameAsString()).thenReturn(regionName);
-when(region.getStorefileSizeMB()).thenReturn(fileSizeMb);
+when(region.getStoreFileSize()).thenReturn(new Size(fileSizeMb, 
Size.Unit.MEGABYTE));
 return region;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8119acfc/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
index 7ee1065..2323bf3 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 import java.util.EnumSet;
+import java.util.Map;
 import javax.ws.rs.GET;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.CacheControl;
@@ -28,11 +29,12 @@ import javax.ws.rs.core.Context;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.UriInfo;
+import org.apache.had

[3/3] hbase git commit: HBASE-19596 RegionMetrics/ServerMetrics/ClusterMetrics should apply to all public classes

2018-01-03 Thread chia7712
HBASE-19596 RegionMetrics/ServerMetrics/ClusterMetrics should apply to all 
public classes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8119acfc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8119acfc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8119acfc

Branch: refs/heads/master
Commit: 8119acfca7e35cd7c4c203a397b970a50d7d7574
Parents: 2bd259b
Author: Chia-Ping Tsai 
Authored: Thu Jan 4 12:40:09 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jan 4 13:13:34 2018 +0800

--
 .../hadoop/hbase/RegionMetricsBuilder.java  |   8 +
 .../org/apache/hadoop/hbase/client/Admin.java   | 119 ++---
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  44 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  20 +-
 .../hbase/client/ClusterStatusListener.java |  13 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  48 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  49 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  27 --
 .../hadoop/hbase/client/TestInterfaceAlign.java |   3 +-
 .../hadoop/hbase/DistributedHBaseCluster.java   |  56 +--
 .../hadoop/hbase/IntegrationTestIngest.java |   6 +-
 .../hbase/IntegrationTestLazyCfLoading.java |  10 +-
 ...IntegrationTestRegionReplicaReplication.java |   7 +-
 .../hadoop/hbase/IntegrationTestingUtility.java |   7 +-
 .../StripeCompactionsPerformanceEvaluation.java |  10 +-
 .../hadoop/hbase/chaos/actions/Action.java  |  38 +-
 .../chaos/actions/BatchRestartRsAction.java |   5 +-
 .../chaos/actions/DumpClusterStatusAction.java  |   2 +-
 .../chaos/actions/MoveRegionsOfTableAction.java |   2 +-
 .../actions/RestartActiveMasterAction.java  |   2 +-
 .../actions/RestartRsHoldingMetaAction.java |   6 +-
 .../UnbalanceKillAndRebalanceAction.java|   7 +-
 .../chaos/actions/UnbalanceRegionsAction.java   |   7 +-
 .../mapreduce/IntegrationTestBulkLoad.java  |   5 +-
 .../hadoop/hbase/mttr/IntegrationTestMTTR.java  |   4 +-
 .../test/IntegrationTestBigLinkedList.java  |   7 +-
 ...stTimeBoundedRequestsWithRegionReplicas.java |   5 +-
 .../hbase/mapreduce/RegionSizeCalculator.java   |  15 +-
 .../mapreduce/TestRegionSizeCalculator.java |  42 +-
 .../rest/StorageClusterStatusResource.java  |  48 +-
 .../rest/StorageClusterVersionResource.java |   2 +-
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java |  11 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |  57 +--
 .../tmpl/master/BackupMasterStatusTmpl.jamon|   4 +-
 .../hbase/coprocessor/MasterObserver.java   |   9 +-
 .../hbase/master/ClusterStatusPublisher.java|  33 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  36 +-
 .../hadoop/hbase/master/LoadBalancer.java   |   8 +-
 .../hbase/master/MasterCoprocessorHost.java |  14 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   3 +-
 .../hbase/master/RegionPlacementMaintainer.java | 451 ++-
 .../master/balancer/BalancerRegionLoad.java |  17 +-
 .../hbase/master/balancer/BaseLoadBalancer.java |   8 +-
 .../master/balancer/ClusterStatusChore.java |   2 +-
 .../master/balancer/RegionLocationFinder.java   |  10 +-
 .../master/balancer/StochasticLoadBalancer.java |  35 +-
 .../hbase/security/access/AccessController.java |   2 -
 .../org/apache/hadoop/hbase/tool/Canary.java|  17 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  20 +-
 .../hadoop/hbase/util/HBaseFsckRepair.java  |   2 +-
 .../apache/hadoop/hbase/util/RegionMover.java   |   4 +-
 .../hadoop/hbase/util/RegionSplitter.java   |   6 +-
 .../org/apache/hadoop/hbase/HBaseCluster.java   |  24 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |   5 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |  31 +-
 .../hadoop/hbase/TestClientClusterMetrics.java  |  29 +-
 .../hadoop/hbase/TestClientClusterStatus.java   |  51 +--
 .../org/apache/hadoop/hbase/TestRegionLoad.java |  21 +-
 .../apache/hadoop/hbase/TestRegionMetrics.java  |  14 +-
 .../hbase/TestStochasticBalancerJmxMetrics.java |  17 +-
 .../apache/hadoop/hbase/client/TestAdmin2.java  |   3 +-
 .../hbase/client/TestAsyncClusterAdminApi.java  |  45 +-
 .../client/TestAsyncDecommissionAdminApi.java   |   3 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   7 +-
 .../hbase/client/TestMetaWithReplicas.java  |  10 +-
 .../hadoop/hbase/client/TestMultiParallel.java  |   2 +-
 .../hbase/coprocessor/TestMasterObserver.java   |   4 +-
 .../hadoop/hbase/master/TestMasterFailover.java |  29 +-
 .../TestMasterFailoverBalancerPersistence.java  |  25 +-
 .../TestMasterOperationsForRegionReplicas.java  |   6 +-
 .../hadoop/hbase/master/TestMasterShutdown.java |   8 +-
 .../TestFavoredStochasticBalancerPickers.java   |   8 +-
 .../TestFavoredStochasticLoadBalancer.java  |  27 +-
 .../balancer/TestRegionLocationFinder.java  |   3 +-
 .../balancer/TestStochasticLoadBalanc

[1/3] hbase git commit: HBASE-19596 RegionMetrics/ServerMetrics/ClusterMetrics should apply to all public classes

2018-01-03 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master 2bd259b44 -> 8119acfca


http://git-wip-us.apache.org/repos/asf/hbase/blob/8119acfc/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
index 58ae059..59a0059 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
@@ -19,16 +19,16 @@ package org.apache.hadoop.hbase;
 
 import java.io.Closeable;
 import java.io.IOException;
-
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
+
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
-import org.apache.hadoop.hbase.util.Threads;
 
 /**
  * This class defines methods that can help with managing HBase clusters
@@ -61,7 +61,7 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
   protected Configuration conf;
 
   /** the status of the cluster before we begin */
-  protected ClusterStatus initialClusterStatus;
+  protected ClusterMetrics initialClusterStatus;
 
   /**
* Construct an HBaseCluster
@@ -82,16 +82,16 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
   }
 
   /**
-   * Returns a ClusterStatus for this HBase cluster.
-   * @see #getInitialClusterStatus()
+   * Returns a ClusterMetrics for this HBase cluster.
+   * @see #getInitialClusterMetrics()
*/
-  public abstract ClusterStatus getClusterStatus() throws IOException;
+  public abstract ClusterMetrics getClusterMetrics() throws IOException;
 
   /**
* Returns a ClusterStatus for this HBase cluster as observed at the
* starting of the HBaseCluster
*/
-  public ClusterStatus getInitialClusterStatus() throws IOException {
+  public ClusterMetrics getInitialClusterMetrics() throws IOException {
 return initialClusterStatus;
   }
 
@@ -153,7 +153,7 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
   throws IOException {
 long start = System.currentTimeMillis();
 while ((System.currentTimeMillis() - start) < timeout) {
-  for (ServerName server : getClusterStatus().getServers()) {
+  for (ServerName server : 
getClusterMetrics().getLiveServerMetrics().keySet()) {
 if (server.getHostname().equals(hostname) && server.getPort() == port) 
{
   return;
 }
@@ -317,7 +317,7 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
* @return whether restoration is complete
*/
   public boolean restoreInitialStatus() throws IOException {
-return restoreClusterStatus(getInitialClusterStatus());
+return restoreClusterMetrics(getInitialClusterMetrics());
   }
 
   /**
@@ -327,7 +327,7 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
* permissions, etc. restoration might be partial.
* @return whether restoration is complete
*/
-  public boolean restoreClusterStatus(ClusterStatus desiredStatus) throws 
IOException {
+  public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws 
IOException {
 return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8119acfc/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 9e17a79..304b3cb 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import edu.umd.cs.findbugs.annotations.Nullable;
-
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
@@ -52,7 +51,6 @@ import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
-
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.commons.logging.impl.Jdk14Logger;
@@ -151,6 +149,7 @@ import org.apache.zookeeper.ZooKeeper.States;
 import org.slf4j.Logger;
 impo

[3/3] hbase git commit: HBASE-19596 RegionMetrics/ServerMetrics/ClusterMetrics should apply to all public classes

2018-01-03 Thread chia7712
HBASE-19596 RegionMetrics/ServerMetrics/ClusterMetrics should apply to all 
public classes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/654edc5f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/654edc5f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/654edc5f

Branch: refs/heads/branch-2
Commit: 654edc5fa586a4b9c3e225a17836a6acc1d32928
Parents: 72631a0
Author: Chia-Ping Tsai 
Authored: Thu Jan 4 12:40:09 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jan 4 13:05:21 2018 +0800

--
 .../hadoop/hbase/RegionMetricsBuilder.java  |   8 +
 .../org/apache/hadoop/hbase/client/Admin.java   | 119 ++---
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  44 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  20 +-
 .../hbase/client/ClusterStatusListener.java |  13 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  48 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  49 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  27 --
 .../hadoop/hbase/client/TestInterfaceAlign.java |   3 +-
 .../hadoop/hbase/DistributedHBaseCluster.java   |  56 +--
 .../hadoop/hbase/IntegrationTestIngest.java |   6 +-
 .../hbase/IntegrationTestLazyCfLoading.java |  10 +-
 ...IntegrationTestRegionReplicaReplication.java |   7 +-
 .../hadoop/hbase/IntegrationTestingUtility.java |   7 +-
 .../StripeCompactionsPerformanceEvaluation.java |  10 +-
 .../hadoop/hbase/chaos/actions/Action.java  |  38 +-
 .../chaos/actions/BatchRestartRsAction.java |   5 +-
 .../chaos/actions/DumpClusterStatusAction.java  |   2 +-
 .../chaos/actions/MoveRegionsOfTableAction.java |   2 +-
 .../actions/RestartActiveMasterAction.java  |   2 +-
 .../actions/RestartRsHoldingMetaAction.java |   6 +-
 .../UnbalanceKillAndRebalanceAction.java|   7 +-
 .../chaos/actions/UnbalanceRegionsAction.java   |   7 +-
 .../mapreduce/IntegrationTestBulkLoad.java  |   5 +-
 .../hadoop/hbase/mttr/IntegrationTestMTTR.java  |   4 +-
 .../test/IntegrationTestBigLinkedList.java  |   7 +-
 ...stTimeBoundedRequestsWithRegionReplicas.java |   5 +-
 .../hbase/mapreduce/RegionSizeCalculator.java   |  15 +-
 .../mapreduce/TestRegionSizeCalculator.java |  42 +-
 .../rest/StorageClusterStatusResource.java  |  48 +-
 .../rest/StorageClusterVersionResource.java |   2 +-
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java |  11 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |  57 +--
 .../tmpl/master/BackupMasterStatusTmpl.jamon|   4 +-
 .../hbase/coprocessor/MasterObserver.java   |   9 +-
 .../hbase/master/ClusterStatusPublisher.java|  33 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  36 +-
 .../hadoop/hbase/master/LoadBalancer.java   |   8 +-
 .../hbase/master/MasterCoprocessorHost.java |  14 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   3 +-
 .../hbase/master/RegionPlacementMaintainer.java | 451 ++-
 .../master/balancer/BalancerRegionLoad.java |  17 +-
 .../hbase/master/balancer/BaseLoadBalancer.java |   8 +-
 .../master/balancer/ClusterStatusChore.java |   2 +-
 .../master/balancer/RegionLocationFinder.java   |  10 +-
 .../master/balancer/StochasticLoadBalancer.java |  35 +-
 .../hbase/security/access/AccessController.java |   2 -
 .../org/apache/hadoop/hbase/tool/Canary.java|  17 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  20 +-
 .../hadoop/hbase/util/HBaseFsckRepair.java  |   2 +-
 .../apache/hadoop/hbase/util/RegionMover.java   |   4 +-
 .../hadoop/hbase/util/RegionSplitter.java   |   6 +-
 .../org/apache/hadoop/hbase/HBaseCluster.java   |  24 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |   5 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |  31 +-
 .../hadoop/hbase/TestClientClusterMetrics.java  |  29 +-
 .../hadoop/hbase/TestClientClusterStatus.java   |  51 +--
 .../org/apache/hadoop/hbase/TestRegionLoad.java |  21 +-
 .../apache/hadoop/hbase/TestRegionMetrics.java  |  14 +-
 .../hbase/TestStochasticBalancerJmxMetrics.java |  17 +-
 .../apache/hadoop/hbase/client/TestAdmin2.java  |   3 +-
 .../hbase/client/TestAsyncClusterAdminApi.java  |  45 +-
 .../client/TestAsyncDecommissionAdminApi.java   |   3 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   7 +-
 .../hbase/client/TestMetaWithReplicas.java  |  10 +-
 .../hadoop/hbase/client/TestMultiParallel.java  |   2 +-
 .../hbase/coprocessor/TestMasterObserver.java   |   4 +-
 .../hadoop/hbase/master/TestMasterFailover.java |  29 +-
 .../TestMasterFailoverBalancerPersistence.java  |  25 +-
 .../TestMasterOperationsForRegionReplicas.java  |   6 +-
 .../hadoop/hbase/master/TestMasterShutdown.java |   8 +-
 .../TestFavoredStochasticBalancerPickers.java   |   8 +-
 .../TestFavoredStochasticLoadBalancer.java  |  27 +-
 .../balancer/TestRegionLocationFinder.java  |   3 +-
 .../balancer/TestStochasticLoadBala

[1/3] hbase git commit: HBASE-19596 RegionMetrics/ServerMetrics/ClusterMetrics should apply to all public classes

2018-01-03 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 72631a08c -> 654edc5fa


http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
index 58ae059..59a0059 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
@@ -19,16 +19,16 @@ package org.apache.hadoop.hbase;
 
 import java.io.Closeable;
 import java.io.IOException;
-
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
+
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
-import org.apache.hadoop.hbase.util.Threads;
 
 /**
  * This class defines methods that can help with managing HBase clusters
@@ -61,7 +61,7 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
   protected Configuration conf;
 
   /** the status of the cluster before we begin */
-  protected ClusterStatus initialClusterStatus;
+  protected ClusterMetrics initialClusterStatus;
 
   /**
* Construct an HBaseCluster
@@ -82,16 +82,16 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
   }
 
   /**
-   * Returns a ClusterStatus for this HBase cluster.
-   * @see #getInitialClusterStatus()
+   * Returns a ClusterMetrics for this HBase cluster.
+   * @see #getInitialClusterMetrics()
*/
-  public abstract ClusterStatus getClusterStatus() throws IOException;
+  public abstract ClusterMetrics getClusterMetrics() throws IOException;
 
   /**
* Returns a ClusterStatus for this HBase cluster as observed at the
* starting of the HBaseCluster
*/
-  public ClusterStatus getInitialClusterStatus() throws IOException {
+  public ClusterMetrics getInitialClusterMetrics() throws IOException {
 return initialClusterStatus;
   }
 
@@ -153,7 +153,7 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
   throws IOException {
 long start = System.currentTimeMillis();
 while ((System.currentTimeMillis() - start) < timeout) {
-  for (ServerName server : getClusterStatus().getServers()) {
+  for (ServerName server : 
getClusterMetrics().getLiveServerMetrics().keySet()) {
 if (server.getHostname().equals(hostname) && server.getPort() == port) 
{
   return;
 }
@@ -317,7 +317,7 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
* @return whether restoration is complete
*/
   public boolean restoreInitialStatus() throws IOException {
-return restoreClusterStatus(getInitialClusterStatus());
+return restoreClusterMetrics(getInitialClusterMetrics());
   }
 
   /**
@@ -327,7 +327,7 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
* permissions, etc. restoration might be partial.
* @return whether restoration is complete
*/
-  public boolean restoreClusterStatus(ClusterStatus desiredStatus) throws 
IOException {
+  public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws 
IOException {
 return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/654edc5f/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 9e17a79..304b3cb 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import edu.umd.cs.findbugs.annotations.Nullable;
-
 import java.io.File;
 import java.io.IOException;
 import java.io.OutputStream;
@@ -52,7 +51,6 @@ import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
-
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.commons.logging.impl.Jdk14Logger;
@@ -151,6 +149,7 @@ import org.apache.zookeeper.ZooKeeper.States;
 import org.slf4j.Logger;
 im

hbase git commit: HBASE-19473 Miscellaneous changes to ClientScanner

2018-01-03 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2 4c0fba6a8 -> 72631a08c


HBASE-19473 Miscellaneous changes to ClientScanner

- Remove superfluous logging code guard
- Simplify some of the code
- Use ArrayDeque instead of LinkedList for queue implementation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/72631a08
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/72631a08
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/72631a08

Branch: refs/heads/branch-2
Commit: 72631a08c1d8ccc116225bf413989adac830edf8
Parents: 4c0fba6
Author: BELUGA BEHR 
Authored: Wed Jan 3 19:59:45 2018 -0800
Committer: Apekshit Sharma 
Committed: Wed Jan 3 20:26:18 2018 -0800

--
 .../hadoop/hbase/client/ClientScanner.java  | 99 ++--
 1 file changed, 49 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/72631a08/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 42597ff..0c6dc16 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -25,29 +25,30 @@ import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
-import java.util.LinkedList;
+import java.util.ArrayDeque;
 import java.util.Queue;
 import java.util.concurrent.ExecutorService;
 
 import org.apache.commons.lang3.mutable.MutableBoolean;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
+import org.apache.hadoop.hbase.exceptions.ScannerResetException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
-import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-import org.apache.hadoop.hbase.exceptions.ScannerResetException;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Implements the scanner interface for the HBase client. If there are 
multiple regions in a table,
@@ -294,25 +295,30 @@ public abstract class ClientScanner extends 
AbstractClientScanner {
   }
 
   protected void initSyncCache() {
-cache = new LinkedList<>();
+cache = new ArrayDeque<>();
   }
 
   protected Result nextWithSyncCache() throws IOException {
-// If the scanner is closed and there's nothing left in the cache, next is 
a no-op.
-if (cache.isEmpty() && this.closed) {
-  return null;
+Result result = cache.poll();
+if (result != null) {
+  return result;
 }
-if (cache.isEmpty()) {
-  loadCache();
+// If there is nothing left in the cache and the scanner is closed,
+// return a no-op
+if (this.closed) {
+  return null;
 }
 
-if (cache.size() > 0) {
-  return cache.poll();
-}
+loadCache();
+
+// try again to load from cache
+result = cache.poll();
 
 // if we exhausted this scanner before calling close, write out the scan 
metrics
-writeScanMetrics();
-return null;
+if (result == null) {
+  writeScanMetrics();
+}
+return result;
   }
 
   @VisibleForTesting
@@ -410,11 +416,9 @@ public abstract class ClientScanner extends 
AbstractClientScanner {
 long remainingResultSize = maxScannerResultSize;
 int countdown = this.caching;
 // This is possible if we just stopped at the boundary of a region in the 
previous call.
-if (callable == null) {
-  if (!moveToNextRegion()) {
-closed = true;
-return;
-  }
+if (callable == null && !moveToNextRegion()) {
+  closed = true;
+  return;
   

hbase git commit: HBASE-19473 Miscellaneous changes to ClientScanner

2018-01-03 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master 4ba741674 -> 2bd259b44


HBASE-19473 Miscellaneous changes to ClientScanner

- Remove superfluous logging code guard
- Simplify some of the code
- Use ArrayDeque instead of LinkedList for queue implementation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2bd259b4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2bd259b4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2bd259b4

Branch: refs/heads/master
Commit: 2bd259b445971131526a5e6580363c92dc597b10
Parents: 4ba7416
Author: BELUGA BEHR 
Authored: Wed Jan 3 19:59:45 2018 -0800
Committer: Apekshit Sharma 
Committed: Wed Jan 3 20:25:21 2018 -0800

--
 .../hadoop/hbase/client/ClientScanner.java  | 99 ++--
 1 file changed, 49 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2bd259b4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 42597ff..0c6dc16 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -25,29 +25,30 @@ import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
-import java.util.LinkedList;
+import java.util.ArrayDeque;
 import java.util.Queue;
 import java.util.concurrent.ExecutorService;
 
 import org.apache.commons.lang3.mutable.MutableBoolean;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
+import org.apache.hadoop.hbase.exceptions.ScannerResetException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
-import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-import org.apache.hadoop.hbase.exceptions.ScannerResetException;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Implements the scanner interface for the HBase client. If there are 
multiple regions in a table,
@@ -294,25 +295,30 @@ public abstract class ClientScanner extends 
AbstractClientScanner {
   }
 
   protected void initSyncCache() {
-cache = new LinkedList<>();
+cache = new ArrayDeque<>();
   }
 
   protected Result nextWithSyncCache() throws IOException {
-// If the scanner is closed and there's nothing left in the cache, next is 
a no-op.
-if (cache.isEmpty() && this.closed) {
-  return null;
+Result result = cache.poll();
+if (result != null) {
+  return result;
 }
-if (cache.isEmpty()) {
-  loadCache();
+// If there is nothing left in the cache and the scanner is closed,
+// return a no-op
+if (this.closed) {
+  return null;
 }
 
-if (cache.size() > 0) {
-  return cache.poll();
-}
+loadCache();
+
+// try again to load from cache
+result = cache.poll();
 
 // if we exhausted this scanner before calling close, write out the scan 
metrics
-writeScanMetrics();
-return null;
+if (result == null) {
+  writeScanMetrics();
+}
+return result;
   }
 
   @VisibleForTesting
@@ -410,11 +416,9 @@ public abstract class ClientScanner extends 
AbstractClientScanner {
 long remainingResultSize = maxScannerResultSize;
 int countdown = this.caching;
 // This is possible if we just stopped at the boundary of a region in the 
previous call.
-if (callable == null) {
-  if (!moveToNextRegion()) {
-closed = true;
-return;
-  }
+if (callable == null && !moveToNextRegion()) {
+  closed = true;
+  return;
 }

hbase git commit: HBASE-19588 Additional jar dependencies needed for mapreduce PerformanceEvaluation

2018-01-03 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 338a74e73 -> 4ba741674


HBASE-19588 Additional jar dependencies needed for mapreduce
PerformanceEvaluation

Signed-off-by: Albert Chu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4ba74167
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4ba74167
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4ba74167

Branch: refs/heads/master
Commit: 4ba741674d623309e0ff6cd37d2b53ab7c6d7398
Parents: 338a74e
Author: Michael Stack 
Authored: Wed Jan 3 21:32:16 2018 -0600
Committer: Michael Stack 
Committed: Wed Jan 3 21:39:37 2018 -0600

--
 .../java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4ba74167/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 83895fd..df8ea76 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -808,7 +808,7 @@ public class TableMapReduceUtil {
   org.apache.hadoop.hbase.ipc.RpcServer.class,   // 
hbase-server
   org.apache.hadoop.hbase.CompatibilityFactory.class,// 
hbase-hadoop-compat
   org.apache.hadoop.hbase.mapreduce.JobUtil.class,   // 
hbase-hadoop2-compat
-  org.apache.hadoop.hbase.mapreduce.TableMapper.class,   // 
hbase-mapreduce
+  org.apache.hadoop.hbase.mapreduce.TableMapper.class,   // 
hbase-server
   org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class,  // 
hbase-metrics
   org.apache.hadoop.hbase.metrics.Snapshot.class,// 
hbase-metrics-api
   org.apache.zookeeper.ZooKeeper.class,



hbase git commit: HBASE-19588 Additional jar dependencies needed for mapreduce PerformanceEvaluation

2018-01-03 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 f993425f0 -> 4c0fba6a8


HBASE-19588 Additional jar dependencies needed for mapreduce
PerformanceEvaluation

Signed-off-by: Albert Chu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4c0fba6a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4c0fba6a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4c0fba6a

Branch: refs/heads/branch-2
Commit: 4c0fba6a858545f7a109fbbc0d96d46b9bd37457
Parents: f993425
Author: Michael Stack 
Authored: Wed Jan 3 21:32:16 2018 -0600
Committer: Michael Stack 
Committed: Wed Jan 3 21:36:05 2018 -0600

--
 .../java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4c0fba6a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 83895fd..df8ea76 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -808,7 +808,7 @@ public class TableMapReduceUtil {
   org.apache.hadoop.hbase.ipc.RpcServer.class,   // 
hbase-server
   org.apache.hadoop.hbase.CompatibilityFactory.class,// 
hbase-hadoop-compat
   org.apache.hadoop.hbase.mapreduce.JobUtil.class,   // 
hbase-hadoop2-compat
-  org.apache.hadoop.hbase.mapreduce.TableMapper.class,   // 
hbase-mapreduce
+  org.apache.hadoop.hbase.mapreduce.TableMapper.class,   // 
hbase-server
   org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class,  // 
hbase-metrics
   org.apache.hadoop.hbase.metrics.Snapshot.class,// 
hbase-metrics-api
   org.apache.zookeeper.ZooKeeper.class,



hbase git commit: HBASE-19588 Additional jar dependencies needed for mapreduce PerformanceEvaluation

2018-01-03 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 a9723d476 -> cdd2ee2a3


HBASE-19588 Additional jar dependencies needed for mapreduce
PerformanceEvaluation

Signed-off-by: Albert Chu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cdd2ee2a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cdd2ee2a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cdd2ee2a

Branch: refs/heads/branch-1.4
Commit: cdd2ee2a357e77c6aba1bd72d9226844ae1b927b
Parents: a9723d4
Author: Michael Stack 
Authored: Wed Jan 3 21:32:16 2018 -0600
Committer: Michael Stack 
Committed: Wed Jan 3 21:34:10 2018 -0600

--
 .../java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cdd2ee2a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index f0fe508..b421036 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -826,6 +826,8 @@ public class TableMapReduceUtil {
   org.apache.hadoop.hbase.CompatibilityFactory.class,// 
hbase-hadoop-compat
   org.apache.hadoop.hbase.mapreduce.JobUtil.class,   // 
hbase-hadoop2-compat
   org.apache.hadoop.hbase.mapreduce.TableMapper.class,   // 
hbase-server
+  org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class,  // 
hbase-metrics
+  org.apache.hadoop.hbase.metrics.Snapshot.class,// 
hbase-metrics-api
   prefixTreeCodecClass, //  hbase-prefix-tree (if null will be skipped)
   // pull necessary dependencies
   org.apache.zookeeper.ZooKeeper.class,



hbase git commit: HBASE-19588 Additional jar dependencies needed for mapreduce PerformanceEvaluation

2018-01-03 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 2c30c9bbb -> 939e01642


HBASE-19588 Additional jar dependencies needed for mapreduce
PerformanceEvaluation

Signed-off-by: Albert Chu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/939e0164
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/939e0164
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/939e0164

Branch: refs/heads/branch-1
Commit: 939e016425e62dc49a425d7b950491d45c206c75
Parents: 2c30c9b
Author: Michael Stack 
Authored: Wed Jan 3 21:32:16 2018 -0600
Committer: Michael Stack 
Committed: Wed Jan 3 21:33:25 2018 -0600

--
 .../java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/939e0164/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index f0fe508..b421036 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -826,6 +826,8 @@ public class TableMapReduceUtil {
   org.apache.hadoop.hbase.CompatibilityFactory.class,// 
hbase-hadoop-compat
   org.apache.hadoop.hbase.mapreduce.JobUtil.class,   // 
hbase-hadoop2-compat
   org.apache.hadoop.hbase.mapreduce.TableMapper.class,   // 
hbase-server
+  org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class,  // 
hbase-metrics
+  org.apache.hadoop.hbase.metrics.Snapshot.class,// 
hbase-metrics-api
   prefixTreeCodecClass, //  hbase-prefix-tree (if null will be skipped)
   // pull necessary dependencies
   org.apache.zookeeper.ZooKeeper.class,



hbase git commit: HBASE-19490 Rare failure in TestRateLimiter

2018-01-03 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master 301062566 -> 338a74e73


HBASE-19490 Rare failure in TestRateLimiter


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/338a74e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/338a74e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/338a74e7

Branch: refs/heads/master
Commit: 338a74e73705fd7c80111ade47345b2a6efe11e7
Parents: 3010625
Author: Chia-Ping Tsai 
Authored: Wed Jan 3 03:19:07 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jan 4 10:46:43 2018 +0800

--
 .../apache/hadoop/hbase/quotas/TestRateLimiter.java  | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/338a74e7/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
index e205f9b..567577b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
@@ -23,10 +23,10 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -122,6 +122,16 @@ public class TestRateLimiter {
 RateLimiter limiter = new FixedIntervalRateLimiter();
 limiter.set(10, TimeUnit.SECONDS);
 
+// fix the current time in order to get the precise value of interval
+EnvironmentEdge edge = new EnvironmentEdge() {
+  private final long ts = System.currentTimeMillis();
+
+  @Override
+  public long currentTime() {
+return ts;
+  }
+};
+EnvironmentEdgeManager.injectEdge(edge);
 // 10 resources are available, but we need to consume 20 resources
 // Verify that we have to wait at least 1.1sec to have 1 resource available
 assertTrue(limiter.canExecute());
@@ -130,6 +140,7 @@ public class TestRateLimiter {
 assertEquals(1000, limiter.waitInterval(1));
 // To consume 10 resource wait for 100ms
 assertEquals(1000, limiter.waitInterval(10));
+EnvironmentEdgeManager.reset();
 
 limiter.setNextRefillTime(limiter.getNextRefillTime() - 900);
 // Verify that after 1sec also no resource should be available



hbase git commit: HBASE-19490 Rare failure in TestRateLimiter

2018-01-03 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 59558f020 -> f993425f0


HBASE-19490 Rare failure in TestRateLimiter


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f993425f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f993425f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f993425f

Branch: refs/heads/branch-2
Commit: f993425f018c3fa1352775cf9cc309883d4f7354
Parents: 59558f0
Author: Chia-Ping Tsai 
Authored: Wed Jan 3 03:19:07 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jan 4 10:43:26 2018 +0800

--
 .../apache/hadoop/hbase/quotas/TestRateLimiter.java  | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f993425f/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
index e205f9b..567577b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
@@ -23,10 +23,10 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -122,6 +122,16 @@ public class TestRateLimiter {
 RateLimiter limiter = new FixedIntervalRateLimiter();
 limiter.set(10, TimeUnit.SECONDS);
 
+// fix the current time in order to get the precise value of interval
+EnvironmentEdge edge = new EnvironmentEdge() {
+  private final long ts = System.currentTimeMillis();
+
+  @Override
+  public long currentTime() {
+return ts;
+  }
+};
+EnvironmentEdgeManager.injectEdge(edge);
 // 10 resources are available, but we need to consume 20 resources
 // Verify that we have to wait at least 1.1sec to have 1 resource available
 assertTrue(limiter.canExecute());
@@ -130,6 +140,7 @@ public class TestRateLimiter {
 assertEquals(1000, limiter.waitInterval(1));
 // To consume 10 resource wait for 100ms
 assertEquals(1000, limiter.waitInterval(10));
+EnvironmentEdgeManager.reset();
 
 limiter.setNextRefillTime(limiter.getNextRefillTime() - 900);
 // Verify that after 1sec also no resource should be available



hbase git commit: HBASE-19490 Rare failure in TestRateLimiter

2018-01-03 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 bc5186f4d -> a9723d476


HBASE-19490 Rare failure in TestRateLimiter


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a9723d47
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a9723d47
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a9723d47

Branch: refs/heads/branch-1.4
Commit: a9723d476c4a9d58b4e810b35c071a609c7f3c57
Parents: bc5186f
Author: Chia-Ping Tsai 
Authored: Wed Jan 3 03:02:45 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jan 4 10:35:34 2018 +0800

--
 .../org/apache/hadoop/hbase/quotas/TestRateLimiter.java | 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9723d47/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
index 1ca6643..56a215a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
@@ -16,8 +16,8 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.junit.Test;
@@ -114,6 +114,15 @@ public class TestRateLimiter {
 RateLimiter limiter = new FixedIntervalRateLimiter();
 limiter.set(10, TimeUnit.SECONDS);
 
+// fix the current time in order to get the precise value of interval
+EnvironmentEdge edge = new EnvironmentEdge() {
+  private final long ts = System.currentTimeMillis();
+  @Override
+  public long currentTime() {
+return ts;
+  }
+};
+EnvironmentEdgeManager.injectEdge(edge);
 // 10 resources are available, but we need to consume 20 resources
 // Verify that we have to wait at least 1.1sec to have 1 resource available
 assertTrue(limiter.canExecute());
@@ -122,6 +131,7 @@ public class TestRateLimiter {
 assertEquals(1000, limiter.waitInterval(1));
 // To consume 10 resource wait for 100ms
 assertEquals(1000, limiter.waitInterval(10));
+EnvironmentEdgeManager.reset();
 
 limiter.setNextRefillTime(limiter.getNextRefillTime() - 900);
 // Verify that after 1sec also no resource should be available



hbase git commit: HBASE-19490 Rare failure in TestRateLimiter

2018-01-03 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 e9c332c7f -> aded33e20


HBASE-19490 Rare failure in TestRateLimiter


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aded33e2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aded33e2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aded33e2

Branch: refs/heads/branch-1.3
Commit: aded33e20609926e51ad8a1e3059e1364bae6ff3
Parents: e9c332c
Author: Chia-Ping Tsai 
Authored: Wed Jan 3 03:02:45 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jan 4 10:37:39 2018 +0800

--
 .../org/apache/hadoop/hbase/quotas/TestRateLimiter.java | 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aded33e2/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
index 1ca6643..56a215a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
@@ -16,8 +16,8 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.junit.Test;
@@ -114,6 +114,15 @@ public class TestRateLimiter {
 RateLimiter limiter = new FixedIntervalRateLimiter();
 limiter.set(10, TimeUnit.SECONDS);
 
+// fix the current time in order to get the precise value of interval
+EnvironmentEdge edge = new EnvironmentEdge() {
+  private final long ts = System.currentTimeMillis();
+  @Override
+  public long currentTime() {
+return ts;
+  }
+};
+EnvironmentEdgeManager.injectEdge(edge);
 // 10 resources are available, but we need to consume 20 resources
 // Verify that we have to wait at least 1.1sec to have 1 resource available
 assertTrue(limiter.canExecute());
@@ -122,6 +131,7 @@ public class TestRateLimiter {
 assertEquals(1000, limiter.waitInterval(1));
 // To consume 10 resource wait for 100ms
 assertEquals(1000, limiter.waitInterval(10));
+EnvironmentEdgeManager.reset();
 
 limiter.setNextRefillTime(limiter.getNextRefillTime() - 900);
 // Verify that after 1sec also no resource should be available



hbase git commit: HBASE-19490 Rare failure in TestRateLimiter

2018-01-03 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1 490728ae7 -> 2c30c9bbb


HBASE-19490 Rare failure in TestRateLimiter


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2c30c9bb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2c30c9bb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2c30c9bb

Branch: refs/heads/branch-1
Commit: 2c30c9bbb6615ffc17caaed979d46ef1f8725ded
Parents: 490728a
Author: Chia-Ping Tsai 
Authored: Wed Jan 3 03:02:45 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jan 4 10:33:46 2018 +0800

--
 .../org/apache/hadoop/hbase/quotas/TestRateLimiter.java | 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2c30c9bb/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
index 1ca6643..56a215a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
@@ -16,8 +16,8 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.junit.Test;
@@ -114,6 +114,15 @@ public class TestRateLimiter {
 RateLimiter limiter = new FixedIntervalRateLimiter();
 limiter.set(10, TimeUnit.SECONDS);
 
+// fix the current time in order to get the precise value of interval
+EnvironmentEdge edge = new EnvironmentEdge() {
+  private final long ts = System.currentTimeMillis();
+  @Override
+  public long currentTime() {
+return ts;
+  }
+};
+EnvironmentEdgeManager.injectEdge(edge);
 // 10 resources are available, but we need to consume 20 resources
 // Verify that we have to wait at least 1.1sec to have 1 resource available
 assertTrue(limiter.canExecute());
@@ -122,6 +131,7 @@ public class TestRateLimiter {
 assertEquals(1000, limiter.waitInterval(1));
 // To consume 10 resource wait for 100ms
 assertEquals(1000, limiter.waitInterval(10));
+EnvironmentEdgeManager.reset();
 
 limiter.setNextRefillTime(limiter.getNextRefillTime() - 900);
 // Verify that after 1sec also no resource should be available



hbase git commit: HBASE-19613 Miscellaneous changes to WALSplitter.

2018-01-03 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2 16e842285 -> 59558f020


HBASE-19613 Miscellaneous changes to WALSplitter.

* Use ArrayList instead LinkedList
* Use Apache Commons where appropriate
* Parameterize and improve logging


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/59558f02
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/59558f02
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/59558f02

Branch: refs/heads/branch-2
Commit: 59558f020f69af03dbae895c3367d04187555be8
Parents: 16e8422
Author: BELUGA BEHR 
Authored: Wed Jan 3 18:29:09 2018 -0800
Committer: Apekshit Sharma 
Committed: Wed Jan 3 18:39:14 2018 -0800

--
 .../apache/hadoop/hbase/wal/WALSplitter.java| 136 +--
 1 file changed, 63 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/59558f02/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 817e925..4af782d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -29,8 +29,8 @@ import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.text.ParseException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
@@ -52,6 +52,9 @@ import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.collections.MapUtils;
+import org.apache.commons.lang3.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
@@ -98,7 +101,6 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
 /**
  * This class is responsible for splitting up a bunch of regionserver commit 
log
  * files that are no longer being written to, into new files, one per region, 
for
@@ -188,7 +190,7 @@ public class WALSplitter {
 final FileStatus[] logfiles = SplitLogManager.getFileList(conf,
 Collections.singletonList(logDir), null);
 List splits = new ArrayList<>();
-if (logfiles != null && logfiles.length > 0) {
+if (ArrayUtils.isNotEmpty(logfiles)) {
   for (FileStatus logfile: logfiles) {
 WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, null, 
null);
 if (s.splitLogFile(logfile, null)) {
@@ -230,7 +232,7 @@ public class WALSplitter {
 this.fileBeingSplit = logfile;
 try {
   long logLength = logfile.getLen();
-  LOG.info("Splitting WAL=" + logPath + ", length=" + logLength);
+  LOG.info("Splitting WAL={}, length={}", logPath, logLength);
   status.setStatus("Opening log file");
   if (reporter != null && !reporter.progress()) {
 progress_failed = true;
@@ -238,7 +240,7 @@ public class WALSplitter {
   }
   logFileReader = getReader(logfile, skipErrors, reporter);
   if (logFileReader == null) {
-LOG.warn("Nothing to split in WAL=" + logPath);
+LOG.warn("Nothing to split in WAL={}", logPath);
 return true;
   }
   int numOpenedFilesBeforeReporting = 
conf.getInt("hbase.splitlog.report.openedfiles", 3);
@@ -302,7 +304,7 @@ public class WALSplitter {
   iie.initCause(ie);
   throw iie;
 } catch (CorruptedLogFileException e) {
-  LOG.warn("Could not parse, corrupted WAL=" + logPath, e);
+  LOG.warn("Could not parse, corrupted WAL={}", logPath, e);
   if (splitLogWorkerCoordination != null) {
 // Some tests pass in a csm of null.
 splitLogWorkerCoordination.markCorrupted(rootDir, 
logfile.getPath().getName(), fs);
@@ -315,14 +317,13 @@ public class WALSplitter {
   e = e instanceof RemoteException ? ((RemoteException) 
e).unwrapRemoteException() : e;
   throw e;
 } finally {
-  LOG.debug("Finishing writing output logs and closing down.");
+  LOG.debug("Finishing writing output logs and closing down");
   try {
 if (null != logFileReader) {
   logFileReader.close();
 }
   } catch (IOException exception) {
-LOG.warn("Could not close WAL reader: " + exception.getMessage());
-LOG.debug("exception details", exception);
+LOG.warn("Could not close WA

hbase git commit: HBASE-19490 Rare failure in TestRateLimiter

2018-01-03 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 a0a17765c -> 197772525


HBASE-19490 Rare failure in TestRateLimiter


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/19777252
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/19777252
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/19777252

Branch: refs/heads/branch-1.2
Commit: 19777252581c085e78177e1c147d0d79a77ab748
Parents: a0a1776
Author: Chia-Ping Tsai 
Authored: Wed Jan 3 03:02:45 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jan 4 10:40:25 2018 +0800

--
 .../org/apache/hadoop/hbase/quotas/TestRateLimiter.java | 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/19777252/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
index 1ca6643..56a215a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRateLimiter.java
@@ -16,8 +16,8 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.util.concurrent.TimeUnit;
-
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.junit.Test;
@@ -114,6 +114,15 @@ public class TestRateLimiter {
 RateLimiter limiter = new FixedIntervalRateLimiter();
 limiter.set(10, TimeUnit.SECONDS);
 
+// fix the current time in order to get the precise value of interval
+EnvironmentEdge edge = new EnvironmentEdge() {
+  private final long ts = System.currentTimeMillis();
+  @Override
+  public long currentTime() {
+return ts;
+  }
+};
+EnvironmentEdgeManager.injectEdge(edge);
 // 10 resources are available, but we need to consume 20 resources
 // Verify that we have to wait at least 1.1sec to have 1 resource available
 assertTrue(limiter.canExecute());
@@ -122,6 +131,7 @@ public class TestRateLimiter {
 assertEquals(1000, limiter.waitInterval(1));
 // To consume 10 resource wait for 100ms
 assertEquals(1000, limiter.waitInterval(10));
+EnvironmentEdgeManager.reset();
 
 limiter.setNextRefillTime(limiter.getNextRefillTime() - 900);
 // Verify that after 1sec also no resource should be available



hbase git commit: HBASE-19613 Miscellaneous changes to WALSplitter.

2018-01-03 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master 6e136f26b -> 301062566


HBASE-19613 Miscellaneous changes to WALSplitter.

* Use ArrayList instead LinkedList
* Use Apache Commons where appropriate
* Parameterize and improve logging


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/30106256
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/30106256
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/30106256

Branch: refs/heads/master
Commit: 301062566ac6e32d5bc3c6dbfd819b5e62742e8c
Parents: 6e136f2
Author: BELUGA BEHR 
Authored: Wed Jan 3 18:29:09 2018 -0800
Committer: Apekshit Sharma 
Committed: Wed Jan 3 18:30:10 2018 -0800

--
 .../apache/hadoop/hbase/wal/WALSplitter.java| 163 +--
 1 file changed, 75 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/30106256/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 328390e..2aad203 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -24,9 +24,9 @@ import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.text.ParseException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
@@ -48,6 +48,9 @@ import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.collections.MapUtils;
+import org.apache.commons.lang3.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
@@ -86,14 +89,14 @@ import org.apache.hadoop.hbase.wal.WALProvider.Writer;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
@@ -203,7 +206,7 @@ public class WALSplitter {
 final FileStatus[] logfiles = SplitLogManager.getFileList(conf,
 Collections.singletonList(logDir), null);
 List splits = new ArrayList<>();
-if (logfiles != null && logfiles.length > 0) {
+if (ArrayUtils.isNotEmpty(logfiles)) {
   for (FileStatus logfile: logfiles) {
 WALSplitter s = new WALSplitter(factory, conf, rootDir, fs, null, 
null);
 if (s.splitLogFile(logfile, null)) {
@@ -245,7 +248,7 @@ public class WALSplitter {
 this.fileBeingSplit = logfile;
 try {
   long logLength = logfile.getLen();
-  LOG.info("Splitting WAL=" + logPath + ", length=" + logLength);
+  LOG.info("Splitting WAL={}, length={}", logPath, logLength);
   status.setStatus("Opening log file");
   if (reporter != null && !reporter.progress()) {
 progress_failed = true;
@@ -253,7 +256,7 @@ public class WALSplitter {
   }
   logFileReader = getReader(logfile, skipErrors, reporter);
   if (logFileReader == null) {
-LOG.warn("Nothing to split in WAL=" + logPath);
+LOG.warn("Nothing to split in WAL={}", logPath);
 return true;
   }
   int numOpenedFilesBeforeReporting = 
conf.getInt("hbase.splitlog.report.openedfiles", 3);
@@ -317,7 +320,7 @@ public class WALSplitter {
   iie.initCause(ie);
   throw iie;
 } catch (CorruptedLogFileException e) {
-  LOG.warn("Could not parse, corrupted WAL=" + logPath, e);
+  LOG.warn("Could not parse, corrupted WAL={}", logPath, e);
   if (splitLogWorkerCoordination != null) {
 // Some tests pass in a csm of null.
   

hbase git commit: HBASE-18806 VerifyRep by snapshot need not to restore snapshot for each mapper

2018-01-03 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2 f0011ebfe -> 16e842285


HBASE-18806 VerifyRep by snapshot need not to restore snapshot for each mapper


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/16e84228
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/16e84228
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/16e84228

Branch: refs/heads/branch-2
Commit: 16e84228554b1561c7085cf8a439061478d2d232
Parents: f0011eb
Author: huzheng 
Authored: Thu Sep 14 17:08:16 2017 +0800
Committer: huzheng 
Committed: Thu Jan 4 10:12:53 2018 +0800

--
 .../hbase/mapreduce/TableMapReduceUtil.java |  32 +++---
 .../mapreduce/TableSnapshotInputFormatImpl.java |   6 +-
 .../replication/VerifyReplication.java  |  24 +++-
 .../replication/TestReplicationSmallTests.java  |  19 
 .../hbase/client/TableSnapshotScanner.java  | 112 +--
 .../hbase/client/TestTableSnapshotScanner.java  |  47 
 6 files changed, 185 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/16e84228/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index d1101c5..83895fd 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -345,22 +345,20 @@ public class TableMapReduceUtil {
   }
 
   /**
-   * Sets up the job for reading from a table snapshot. It bypasses hbase 
servers
-   * and read directly from snapshot files.
-   *
+   * Sets up the job for reading from a table snapshot. It bypasses hbase 
servers and read directly
+   * from snapshot files.
* @param snapshotName The name of the snapshot (of a table) to read from.
-   * @param scan  The scan instance with the columns, time range etc.
-   * @param mapper  The mapper class to use.
-   * @param outputKeyClass  The class of the output key.
-   * @param outputValueClass  The class of the output value.
-   * @param job  The current job to adjust.  Make sure the passed job is
-   * carrying all necessary HBase configuration.
-   * @param addDependencyJars upload HBase jars and jars for any of the 
configured
-   *   job classes via the distributed cache (tmpjars).
-   *
+   * @param scan The scan instance with the columns, time range etc.
+   * @param mapper The mapper class to use.
+   * @param outputKeyClass The class of the output key.
+   * @param outputValueClass The class of the output value.
+   * @param job The current job to adjust. Make sure the passed job is 
carrying all necessary HBase
+   *  configuration.
+   * @param addDependencyJars upload HBase jars and jars for any of the 
configured job classes via
+   *  the distributed cache (tmpjars).
* @param tmpRestoreDir a temporary directory to copy the snapshot files 
into. Current user should
-   * have write permissions to this directory, and this should not be a 
subdirectory of rootdir.
-   * After the job is finished, restore directory can be deleted.
+   *  have write permissions to this directory, and this should not be 
a subdirectory of
+   *  rootdir. After the job is finished, restore directory can be 
deleted.
* @throws IOException When setting up the details fails.
* @see TableSnapshotInputFormat
*/
@@ -369,10 +367,10 @@ public class TableMapReduceUtil {
   Class outputKeyClass,
   Class outputValueClass, Job job,
   boolean addDependencyJars, Path tmpRestoreDir)
-  throws IOException {
+  throws IOException {
 TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir);
-initTableMapperJob(snapshotName, scan, mapper, outputKeyClass,
-outputValueClass, job, addDependencyJars, false, 
TableSnapshotInputFormat.class);
+initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, 
outputValueClass, job,
+  addDependencyJars, false, TableSnapshotInputFormat.class);
 resetCacheConfig(job.getConfiguration());
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/16e84228/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index 904ede0..add0985f 100644
--

hbase git commit: HBASE-18806 VerifyRep by snapshot need not to restore snapshot for each mapper

2018-01-03 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/master 9a98bb4ce -> 6e136f26b


HBASE-18806 VerifyRep by snapshot need not to restore snapshot for each mapper


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6e136f26
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6e136f26
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6e136f26

Branch: refs/heads/master
Commit: 6e136f26bf0761797716b532b1a8c4984bf80c58
Parents: 9a98bb4
Author: huzheng 
Authored: Thu Sep 14 17:08:16 2017 +0800
Committer: huzheng 
Committed: Thu Jan 4 10:10:03 2018 +0800

--
 .../hbase/mapreduce/TableMapReduceUtil.java |  32 +++---
 .../mapreduce/TableSnapshotInputFormatImpl.java |   6 +-
 .../replication/VerifyReplication.java  |  24 +++-
 .../replication/TestReplicationSmallTests.java  |  19 
 .../hbase/client/TableSnapshotScanner.java  | 112 +--
 .../hbase/client/TestTableSnapshotScanner.java  |  47 
 6 files changed, 185 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6e136f26/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index d1101c5..83895fd 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -345,22 +345,20 @@ public class TableMapReduceUtil {
   }
 
   /**
-   * Sets up the job for reading from a table snapshot. It bypasses hbase 
servers
-   * and read directly from snapshot files.
-   *
+   * Sets up the job for reading from a table snapshot. It bypasses hbase 
servers and read directly
+   * from snapshot files.
* @param snapshotName The name of the snapshot (of a table) to read from.
-   * @param scan  The scan instance with the columns, time range etc.
-   * @param mapper  The mapper class to use.
-   * @param outputKeyClass  The class of the output key.
-   * @param outputValueClass  The class of the output value.
-   * @param job  The current job to adjust.  Make sure the passed job is
-   * carrying all necessary HBase configuration.
-   * @param addDependencyJars upload HBase jars and jars for any of the 
configured
-   *   job classes via the distributed cache (tmpjars).
-   *
+   * @param scan The scan instance with the columns, time range etc.
+   * @param mapper The mapper class to use.
+   * @param outputKeyClass The class of the output key.
+   * @param outputValueClass The class of the output value.
+   * @param job The current job to adjust. Make sure the passed job is 
carrying all necessary HBase
+   *  configuration.
+   * @param addDependencyJars upload HBase jars and jars for any of the 
configured job classes via
+   *  the distributed cache (tmpjars).
* @param tmpRestoreDir a temporary directory to copy the snapshot files 
into. Current user should
-   * have write permissions to this directory, and this should not be a 
subdirectory of rootdir.
-   * After the job is finished, restore directory can be deleted.
+   *  have write permissions to this directory, and this should not be 
a subdirectory of
+   *  rootdir. After the job is finished, restore directory can be 
deleted.
* @throws IOException When setting up the details fails.
* @see TableSnapshotInputFormat
*/
@@ -369,10 +367,10 @@ public class TableMapReduceUtil {
   Class outputKeyClass,
   Class outputValueClass, Job job,
   boolean addDependencyJars, Path tmpRestoreDir)
-  throws IOException {
+  throws IOException {
 TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir);
-initTableMapperJob(snapshotName, scan, mapper, outputKeyClass,
-outputValueClass, job, addDependencyJars, false, 
TableSnapshotInputFormat.class);
+initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, 
outputValueClass, job,
+  addDependencyJars, false, TableSnapshotInputFormat.class);
 resetCacheConfig(job.getConfiguration());
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6e136f26/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index 904ede0..add0985f 100644
--- 
a

[17/35] hbase git commit: HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer Procedure classes

2018-01-03 Thread zhangduo
HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer 
Procedure classes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7cdda898
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7cdda898
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7cdda898

Branch: refs/heads/HBASE-19397
Commit: 7cdda8987768345dc68e60573c17fba3fdb8b2b1
Parents: 8253f50
Author: zhangduo 
Authored: Thu Dec 21 21:59:46 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../hadoop/hbase/master/replication/AddPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/DisablePeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/EnablePeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/ModifyPeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/RefreshPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/RemovePeerProcedure.java   | 6 +++---
 .../hbase/master/replication/UpdatePeerConfigProcedure.java| 6 +++---
 7 files changed, 21 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7cdda898/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
index c3862d8..066c3e7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -28,6 +26,8 @@ import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AddPeerStateData;
 
@@ -37,7 +37,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.A
 @InterfaceAudience.Private
 public class AddPeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(AddPeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(AddPeerProcedure.class);
 
   private ReplicationPeerConfig peerConfig;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7cdda898/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
index 0b32db9..9a28de6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
@@ -19,11 +19,11 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The procedure for disabling a replication peer.
@@ -31,7 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DisablePeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(DisablePeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(DisablePeerProcedure.class);
 
   public DisablePeerProcedure() {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7cdda898/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org

[24/35] hbase git commit: HBASE-19630 Add peer cluster key check when add new replication peer

2018-01-03 Thread zhangduo
HBASE-19630 Add peer cluster key check when add new replication peer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/382b3096
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/382b3096
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/382b3096

Branch: refs/heads/HBASE-19397
Commit: 382b30966080ef99ee9771021eccb3871428c4ef
Parents: bbbda61
Author: Guanghao Zhang 
Authored: Tue Dec 26 21:10:00 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 54 
 .../replication/TestReplicationAdmin.java   | 22 
 2 files changed, 54 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/382b3096/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 84abfeb..b78cbce 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master.replication;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -42,6 +43,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -216,36 +218,36 @@ public final class ReplicationPeerManager {
 return desc != null ? Optional.of(desc.getPeerConfig()) : Optional.empty();
   }
 
-  /**
-   * If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
-   * Then allow config exclude namespaces or exclude table-cfs which can't be 
replicated to peer
-   * cluster.
-   * 
-   * If replicate_all flag is false, it means all user tables can't be 
replicated to peer cluster.
-   * Then allow to config namespaces or table-cfs which will be replicated to 
peer cluster.
-   */
-  private static void checkPeerConfig(ReplicationPeerConfig peerConfig)
-  throws DoNotRetryIOException {
+  private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws 
DoNotRetryIOException {
+checkClusterKey(peerConfig.getClusterKey());
+
 if (peerConfig.replicateAllUserTables()) {
-  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty()) ||
-(peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
-throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly " +
-  "when you want replicate all cluster");
+  // If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
+  // Then allow config exclude namespaces or exclude table-cfs which can't 
be replicated to peer
+  // cluster.
+  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty())
+  || (peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
+throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly "
++ "when you want replicate all cluster");
   }
   
checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(),
 peerConfig.getExcludeTableCFsMap());
 } else {
-  if ((peerConfig.getExcludeNamespaces() != null &&
-!peerConfig.getExcludeNamespaces().isEmpty()) ||
-(peerConfig.getExcludeTableCFsMap() != null &&
-  !peerConfig.getExcludeTableCFsMap().isEmpty())) {
+  // If replicate_all flag is false, it means all user tables can't be 
replicated to peer
+  // cluster. Then allow to config namespaces or table-cfs which will be 
replicated to peer
+  // cluster.
+  if ((peerConfig.getExcludeNamespaces() != null
+  && !peerConfig.getExcludeNamespaces().isEmpty())
+  || (peerConfig.getExcludeTableCFsMap() != null
+  && !peerConfig.getExcludeTableCFsMap().isEmpty())) {
 throw new DoNotRetryIOException(
-"Need clean exclude-namespaces or exclude-table-cfs config 
firstly" +
-  

[27/35] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly

2018-01-03 Thread zhangduo
HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/20a612fb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/20a612fb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/20a612fb

Branch: refs/heads/HBASE-19397
Commit: 20a612fbdef4c80f8f48ae22b8faf7e8bba5f0cb
Parents: dbe1f74
Author: zhangduo 
Authored: Wed Dec 27 22:03:51 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |   9 +-
 .../hbase/replication/ReplicationQueues.java| 160 ---
 .../replication/ReplicationQueuesArguments.java |  70 ---
 .../replication/ReplicationQueuesZKImpl.java| 407 -
 .../hbase/replication/ReplicationTableBase.java | 442 ---
 .../replication/ReplicationTrackerZKImpl.java   |  21 +-
 .../replication/ZKReplicationQueueStorage.java  |  22 +
 .../replication/TestReplicationStateBasic.java  | 131 +++---
 .../replication/TestReplicationStateZKImpl.java |  41 +-
 .../regionserver/DumpReplicationQueues.java |  15 +-
 .../RecoveredReplicationSource.java |  17 +-
 .../RecoveredReplicationSourceShipper.java  |  22 +-
 .../replication/regionserver/Replication.java   |  41 +-
 .../regionserver/ReplicationSource.java |  23 +-
 .../ReplicationSourceInterface.java |  11 +-
 .../regionserver/ReplicationSourceManager.java  | 261 ++-
 .../regionserver/ReplicationSyncUp.java |  29 +-
 .../hbase/master/cleaner/TestLogsCleaner.java   |  12 +-
 .../cleaner/TestReplicationHFileCleaner.java|  26 +-
 .../cleaner/TestReplicationZKNodeCleaner.java   |  22 +-
 .../replication/ReplicationSourceDummy.java |   6 +-
 .../replication/TestReplicationSyncUpTool.java  |   6 +-
 .../TestReplicationSourceManager.java   | 104 ++---
 .../TestReplicationSourceManagerZkImpl.java |  58 +--
 24 files changed, 385 insertions(+), 1571 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/20a612fb/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 6c1c213..5e70e57 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -17,12 +17,11 @@
  */
 package org.apache.hadoop.hbase.replication;
 
-import org.apache.commons.lang3.reflect.ConstructorUtils;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A factory class for instantiating replication objects that deal with 
replication state.
@@ -30,12 +29,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 @InterfaceAudience.Private
 public class ReplicationFactory {
 
-  public static ReplicationQueues 
getReplicationQueues(ReplicationQueuesArguments args)
-  throws Exception {
-return (ReplicationQueues) 
ConstructorUtils.invokeConstructor(ReplicationQueuesZKImpl.class,
-  args);
-  }
-
   public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
   Abortable abortable) {
 return getReplicationPeers(zk, conf, null, abortable);

http://git-wip-us.apache.org/repos/asf/hbase/blob/20a612fb/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
deleted file mode 100644
index 7f440b1..000
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/l

[10/35] hbase git commit: HBASE-19592 Add UTs to test retry on update zk failure

2018-01-03 Thread zhangduo
HBASE-19592 Add UTs to test retry on update zk failure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/165b0ded
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/165b0ded
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/165b0ded

Branch: refs/heads/HBASE-19397
Commit: 165b0dedaea69ff01c194a232bf0953a2d704971
Parents: 382b309
Author: zhangduo 
Authored: Tue Dec 26 20:39:00 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../replication/ReplicationPeerManager.java |   5 +-
 .../TestReplicationProcedureRetry.java  | 200 +++
 2 files changed, 202 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/165b0ded/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index b78cbce..f4ccce8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Used to add/remove a replication peer.
  */
 @InterfaceAudience.Private
-public final class ReplicationPeerManager {
+public class ReplicationPeerManager {
 
   private final ReplicationPeerStorage peerStorage;
 
@@ -61,8 +61,7 @@ public final class ReplicationPeerManager {
 
   private final ConcurrentMap peers;
 
-  private ReplicationPeerManager(ReplicationPeerStorage peerStorage,
-  ReplicationQueueStorage queueStorage,
+  ReplicationPeerManager(ReplicationPeerStorage peerStorage, 
ReplicationQueueStorage queueStorage,
   ConcurrentMap peers) {
 this.peerStorage = peerStorage;
 this.queueStorage = queueStorage;

http://git-wip-us.apache.org/repos/asf/hbase/blob/165b0ded/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
new file mode 100644
index 000..ab35b46
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.invocation.InvocationOnMock;
+
+/**
+ * All the modification method will fail once in the test and should finally 
succeed.
+ */
+@Category({ ReplicationTests

[08/35] hbase git commit: HBASE-19633 Clean up the replication queues in the postPeerModification stage when removing a peer

2018-01-03 Thread zhangduo
HBASE-19633 Clean up the replication queues in the postPeerModification stage 
when removing a peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cb3f8894
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cb3f8894
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cb3f8894

Branch: refs/heads/HBASE-19397
Commit: cb3f88944018d209d4f286c7d77dd15bb0c70ef7
Parents: 1104d16
Author: zhangduo 
Authored: Tue Jan 2 09:57:23 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |  2 +-
 .../replication/VerifyReplication.java  | 34 ++---
 .../hbase/replication/ReplicationPeers.java | 35 +++---
 .../replication/ZKReplicationQueueStorage.java  |  3 +-
 .../replication/ZKReplicationStorageBase.java   |  4 +-
 .../replication/TestReplicationStateBasic.java  | 10 +
 .../org/apache/hadoop/hbase/master/HMaster.java |  4 +-
 .../master/replication/AddPeerProcedure.java|  5 +--
 .../replication/DisablePeerProcedure.java   |  3 +-
 .../master/replication/EnablePeerProcedure.java |  3 +-
 .../master/replication/ModifyPeerProcedure.java | 34 +
 .../replication/RefreshPeerProcedure.java   | 17 -
 .../master/replication/RemovePeerProcedure.java |  7 ++--
 .../replication/ReplicationPeerManager.java | 31 +++-
 .../replication/UpdatePeerConfigProcedure.java  |  3 +-
 .../hbase/regionserver/HRegionServer.java   | 18 -
 .../RemoteProcedureResultReporter.java  |  3 +-
 .../regionserver/RefreshPeerCallable.java   |  5 ++-
 .../regionserver/ReplicationSourceManager.java  | 39 +++-
 .../TestReplicationAdminUsingProcedure.java |  7 ++--
 20 files changed, 135 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cb3f8894/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index ab75dff..c6d0fae 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -27,8 +27,8 @@ import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A configuration for the replication peer cluster.

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb3f8894/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 15ac2ab..f005f80 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.mapreduce.replication;
 
 import java.io.IOException;
 import java.util.Arrays;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -43,13 +42,14 @@ import org.apache.hadoop.hbase.filter.PrefixFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableMapper;
+import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
@@ -63,6 +63,7 @@ import org.apache.

[26/35] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly

2018-01-03 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/20a612fb/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
index b6cf54d..4b9ed74 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.util.List;
 import java.util.UUID;
 
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -32,9 +31,10 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Interface that defines a replication source
@@ -47,15 +47,10 @@ public interface ReplicationSourceInterface {
* @param conf the configuration to use
* @param fs the file system to use
* @param manager the manager to use
-   * @param replicationQueues
-   * @param replicationPeers
* @param server the server for this region server
-   * @param peerClusterZnode
-   * @param clusterId
-   * @throws IOException
*/
   void init(Configuration conf, FileSystem fs, ReplicationSourceManager 
manager,
-  ReplicationQueues replicationQueues, ReplicationPeers replicationPeers, 
Server server,
+  ReplicationQueueStorage queueStorage, ReplicationPeers replicationPeers, 
Server server,
   String peerClusterZnode, UUID clusterId, ReplicationEndpoint 
replicationEndpoint,
   WALFileLengthProvider walFileLengthProvider, MetricsSource metrics) 
throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/20a612fb/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index b1d82c8..853bafb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -34,19 +34,21 @@ import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Connection;
@@ -60,7 +62,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -68,6 +70,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import 
org.apache.hbase

[23/35] hbase git commit: HBASE-19623 Create replication endpoint asynchronously when adding a replication source

2018-01-03 Thread zhangduo
HBASE-19623 Create replication endpoint asynchronously when adding a 
replication source


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eee095c9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eee095c9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eee095c9

Branch: refs/heads/HBASE-19397
Commit: eee095c99fe59d223b61b07ac31d757f3da9bded
Parents: cb3f889
Author: zhangduo 
Authored: Tue Jan 2 13:25:58 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../hbase/replication/ReplicationPeer.java  |   8 ++
 .../hbase/replication/ReplicationPeers.java |  19 +---
 .../replication/ZKReplicationPeerStorage.java   |   7 +-
 .../replication/TestReplicationStateBasic.java  |  20 +---
 .../TestZKReplicationPeerStorage.java   |  14 +--
 .../HBaseInterClusterReplicationEndpoint.java   |  17 ++-
 .../RecoveredReplicationSource.java |  13 +--
 .../regionserver/ReplicationSource.java | 110 +++
 .../ReplicationSourceInterface.java |   8 +-
 .../regionserver/ReplicationSourceManager.java  |  47 +---
 .../client/TestAsyncReplicationAdminApi.java|   2 -
 .../replication/TestReplicationAdmin.java   |   2 -
 .../replication/ReplicationSourceDummy.java |   7 +-
 .../replication/TestReplicationSource.java  |  27 +++--
 .../TestReplicationSourceManager.java   |   8 +-
 15 files changed, 127 insertions(+), 182 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eee095c9/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 4846018..2da3cce 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,14 @@ public interface ReplicationPeer {
   PeerState getPeerState();
 
   /**
+   * Test whether the peer is enabled.
+   * @return {@code true} if enabled, otherwise {@code false}.
+   */
+  default boolean isPeerEnabled() {
+return getPeerState() == PeerState.ENABLED;
+  }
+
+  /**
* Get the peer config object
* @return the ReplicationPeerConfig for this peer
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/eee095c9/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index 3d18091..ad3fee7 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.replication;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -86,21 +87,6 @@ public class ReplicationPeers {
   }
 
   /**
-   * Get the peer state for the specified connected remote slave cluster. The 
value might be read
-   * from cache, so it is recommended to use {@link #peerStorage } to read 
storage directly if
-   * reading the state after enabling or disabling it.
-   * @param peerId a short that identifies the cluster
-   * @return true if replication is enabled, false otherwise.
-   */
-  public boolean isPeerEnabled(String peerId) {
-ReplicationPeer replicationPeer = this.peerCache.get(peerId);
-if (replicationPeer == null) {
-  throw new IllegalArgumentException("Peer with id= " + peerId + " is not 
cached");
-}
-return replicationPeer.getPeerState() == PeerState.ENABLED;
-  }
-
-  /**
* Returns the ReplicationPeerImpl for the specified cached peer. This 
ReplicationPeer will
* continue to track changes to the Peer's state and config. This method 
returns null if no peer
* has been cached with the given peerId.
@@ -117,7 +103,7 @@ public class ReplicationPeers {
* @return a Set of Strings for peerIds
*/
   public Set getAllPeerIds() {
-return peerCache.keySet();
+return Collections.unmodifiableSet(peerCache.keySet());
   }
 
   public static Configuration 
getPeerClusterConfiguration(ReplicationPeerConfig peerConfig,
@@ -161,7 +147,6 @@ public class ReplicationPeers {
* Helper method

[05/35] hbase git commit: HBASE-19654 Remove misleading and chatty debug message in ReplicationLogCleaner

2018-01-03 Thread zhangduo
HBASE-19654 Remove misleading and chatty debug message in ReplicationLogCleaner

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d8ef30c5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d8ef30c5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d8ef30c5

Branch: refs/heads/HBASE-19397
Commit: d8ef30c5e1f3f19d48ca91e80beea20cacd96a14
Parents: ab0e459
Author: Reid Chan 
Authored: Tue Jan 2 21:45:31 2018 +0800
Committer: tedyu 
Committed: Wed Jan 3 06:58:32 2018 -0800

--
 .../hadoop/hbase/replication/master/ReplicationLogCleaner.java | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d8ef30c5/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
index ad14a98..86f98da 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
@@ -86,8 +86,6 @@ public class ReplicationLogCleaner extends 
BaseLogCleanerDelegate {
 if (LOG.isDebugEnabled()) {
   if (logInReplicationQueue) {
 LOG.debug("Found log in ZK, keeping: " + wal);
-  } else {
-LOG.debug("Didn't find this log in ZK, deleting: " + wal);
   }
 }
 return !logInReplicationQueue && (file.getModificationTime() < 
readZKTimestamp);



[25/35] hbase git commit: HBASE-19642 Fix locking for peer modification procedure

2018-01-03 Thread zhangduo
HBASE-19642 Fix locking for peer modification procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dbe1f749
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dbe1f749
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dbe1f749

Branch: refs/heads/HBASE-19397
Commit: dbe1f749e2a897c9bea6c91c3712525edda34a56
Parents: 165b0de
Author: zhangduo 
Authored: Wed Dec 27 18:27:13 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../procedure/MasterProcedureScheduler.java | 14 +
 .../master/replication/ModifyPeerProcedure.java | 21 +---
 2 files changed, 32 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dbe1f749/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index a25217c..4ecb3b1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -610,6 +610,20 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 public boolean requireExclusiveLock(Procedure proc) {
   return requirePeerExclusiveLock((PeerProcedureInterface) proc);
 }
+
+@Override
+public boolean isAvailable() {
+  if (isEmpty()) {
+return false;
+  }
+  if (getLockStatus().hasExclusiveLock()) {
+// if we have an exclusive lock already taken
+// only child of the lock owner can be executed
+Procedure nextProc = peek();
+return nextProc != null && getLockStatus().hasLockAccess(nextProc);
+  }
+  return true;
+}
   }
 
   // 


http://git-wip-us.apache.org/repos/asf/hbase/blob/dbe1f749/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index 279fbc7..a682606 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -46,6 +46,8 @@ public abstract class ModifyPeerProcedure
 
   protected String peerId;
 
+  private volatile boolean locked;
+
   // used to keep compatible with old client where we can only returns after 
updateStorage.
   protected ProcedurePrepareLatch latch;
 
@@ -145,17 +147,30 @@ public abstract class ModifyPeerProcedure
 
   @Override
   protected LockState acquireLock(MasterProcedureEnv env) {
-return env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)
-  ? LockState.LOCK_EVENT_WAIT
-  : LockState.LOCK_ACQUIRED;
+if (env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)) {
+  return  LockState.LOCK_EVENT_WAIT;
+}
+locked = true;
+return LockState.LOCK_ACQUIRED;
   }
 
   @Override
   protected void releaseLock(MasterProcedureEnv env) {
+locked = false;
 env.getProcedureScheduler().wakePeerExclusiveLock(this, peerId);
   }
 
   @Override
+  protected boolean holdLock(MasterProcedureEnv env) {
+return true;
+  }
+
+  @Override
+  protected boolean hasLock(MasterProcedureEnv env) {
+return locked;
+  }
+
+  @Override
   protected void rollbackState(MasterProcedureEnv env, PeerModificationState 
state)
   throws IOException, InterruptedException {
 if (state == PeerModificationState.PRE_PEER_MODIFICATION) {



[32/35] hbase git commit: HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface

2018-01-03 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/1104d16a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 853bafb..24a4f30 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -166,7 +166,6 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 this.clusterId = clusterId;
 this.walFileLengthProvider = walFileLengthProvider;
 this.replicationTracker.registerListener(this);
-this.replicationPeers.getAllPeerIds();
 // It's preferable to failover 1 RS at a time, but with good zk servers
 // more could be processed at the same time.
 int nbWorkers = conf.getInt("replication.executor.workers", 1);
@@ -270,8 +269,8 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 }
 List otherRegionServers = 
replicationTracker.getListOfRegionServers().stream()
 .map(ServerName::valueOf).collect(Collectors.toList());
-LOG.info(
-  "Current list of replicators: " + currentReplicators + " other RSs: " + 
otherRegionServers);
+LOG.info("Current list of replicators: " + currentReplicators + " other 
RSs: "
++ otherRegionServers);
 
 // Look if there's anything to process after a restart
 for (ServerName rs : currentReplicators) {
@@ -288,7 +287,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
* The returned future is for adoptAbandonedQueues task.
*/
   Future init() throws IOException, ReplicationException {
-for (String id : this.replicationPeers.getConnectedPeerIds()) {
+for (String id : this.replicationPeers.getAllPeerIds()) {
   addSource(id);
   if (replicationForBulkLoadDataEnabled) {
 // Check if peer exists in hfile-refs queue, if not add it. This can 
happen in the case
@@ -307,8 +306,8 @@ public class ReplicationSourceManager implements 
ReplicationListener {
*/
   @VisibleForTesting
   ReplicationSourceInterface addSource(String id) throws IOException, 
ReplicationException {
-ReplicationPeerConfig peerConfig = 
replicationPeers.getReplicationPeerConfig(id);
-ReplicationPeer peer = replicationPeers.getConnectedPeer(id);
+ReplicationPeerConfig peerConfig = replicationPeers.getPeerConfig(id);
+ReplicationPeer peer = replicationPeers.getPeer(id);
 ReplicationSourceInterface src = getReplicationSource(id, peerConfig, 
peer);
 synchronized (this.walsById) {
   this.sources.add(src);
@@ -354,7 +353,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   public void deleteSource(String peerId, boolean closeConnection) {
 abortWhenFail(() -> this.queueStorage.removeQueue(server.getServerName(), 
peerId));
 if (closeConnection) {
-  this.replicationPeers.peerDisconnected(peerId);
+  this.replicationPeers.removePeer(peerId);
 }
   }
 
@@ -445,12 +444,12 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 // update replication queues on ZK
 // synchronize on replicationPeers to avoid adding source for the 
to-be-removed peer
 synchronized (replicationPeers) {
-  for (String id : replicationPeers.getConnectedPeerIds()) {
+  for (String id : replicationPeers.getAllPeerIds()) {
 try {
   this.queueStorage.addWAL(server.getServerName(), id, logName);
 } catch (ReplicationException e) {
-  throw new IOException("Cannot add log to replication queue" +
-" when creating a new source, queueId=" + id + ", filename=" + 
logName, e);
+  throw new IOException("Cannot add log to replication queue"
+  + " when creating a new source, queueId=" + id + ", filename=" + 
logName, e);
 }
   }
 }
@@ -593,7 +592,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 
   public void addPeer(String id) throws ReplicationException, IOException {
 LOG.info("Trying to add peer, peerId: " + id);
-boolean added = this.replicationPeers.peerConnected(id);
+boolean added = this.replicationPeers.addPeer(id);
 if (added) {
   LOG.info("Peer " + id + " connected success, trying to start the 
replication source thread.");
   addSource(id);
@@ -729,19 +728,25 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   // there is not an actual peer defined corresponding to peerId for 
the failover.
   ReplicationQueueInfo replicationQueueInfo = new 
ReplicationQueueInfo(peerId);

[30/35] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2018-01-03 Thread zhangduo
HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/48b9ae4e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/48b9ae4e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/48b9ae4e

Branch: refs/heads/HBASE-19397
Commit: 48b9ae4ee35051ad2e919b103483224bf9de1d48
Parents: 28f2315
Author: zhangduo 
Authored: Mon Dec 25 18:49:56 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |  19 +-
 .../replication/ReplicationPeersZKImpl.java |  24 +-
 .../replication/ReplicationQueueStorage.java|  26 +-
 .../replication/ReplicationQueuesClient.java|  93 -
 .../ReplicationQueuesClientArguments.java   |  40 --
 .../ReplicationQueuesClientZKImpl.java  | 176 -
 .../replication/ZKReplicationQueueStorage.java  |  90 -
 .../replication/TestReplicationStateBasic.java  | 378 +++
 .../replication/TestReplicationStateZKImpl.java | 148 
 .../TestZKReplicationQueueStorage.java  |  74 
 .../cleaner/ReplicationZKNodeCleaner.java   |  71 ++--
 .../cleaner/ReplicationZKNodeCleanerChore.java  |   5 +-
 .../replication/ReplicationPeerManager.java |  31 +-
 .../master/ReplicationHFileCleaner.java | 109 ++
 .../master/ReplicationLogCleaner.java   |  35 +-
 .../regionserver/DumpReplicationQueues.java |  78 ++--
 .../hbase/util/hbck/ReplicationChecker.java |  14 +-
 .../client/TestAsyncReplicationAdminApi.java|  31 +-
 .../replication/TestReplicationAdmin.java   |   2 +
 .../hbase/master/cleaner/TestLogsCleaner.java   |  30 +-
 .../cleaner/TestReplicationHFileCleaner.java|  59 +--
 .../cleaner/TestReplicationZKNodeCleaner.java   |  12 +-
 .../replication/TestReplicationStateBasic.java  | 378 ---
 .../replication/TestReplicationStateZKImpl.java | 227 ---
 .../TestReplicationSourceManagerZkImpl.java |  84 ++---
 25 files changed, 907 insertions(+), 1327 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/48b9ae4e/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 9f4ad18..6c1c213 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -37,20 +36,14 @@ public class ReplicationFactory {
   args);
   }
 
-  public static ReplicationQueuesClient
-  getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws 
Exception {
-return (ReplicationQueuesClient) ConstructorUtils
-.invokeConstructor(ReplicationQueuesClientZKImpl.class, args);
-  }
-
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- Abortable abortable) {
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  Abortable abortable) {
 return getReplicationPeers(zk, conf, null, abortable);
   }
 
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- final 
ReplicationQueuesClient queuesClient, Abortable abortable) {
-return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable);
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  ReplicationQueueStorage queueStorage, Abortable abortable) {
+return new ReplicationPeersZKImpl(zk, conf, queueStorage, abortable);
   }
 
   public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,

http://git-wip-us.apache.org/repos/asf/hbase/blob/48b9ae4e/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 8e2c5f4..f2e5647 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/repli

[11/35] hbase git commit: HBASE-19216 Implement a general framework to execute remote procedure on RS

2018-01-03 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/25139a39/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 83a6dfd..361581c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -30,6 +30,7 @@ import com.google.protobuf.RpcCallback;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
+
 import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.util.ArrayList;
@@ -120,8 +121,6 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
-import 
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
@@ -137,6 +136,9 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import 
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
+
 /**
  * Performs authorization checks for common operations, according to different
  * levels of authorized users.



[16/35] hbase git commit: HBASE-19520 Add UTs for the new lock type PEER

2018-01-03 Thread zhangduo
HBASE-19520 Add UTs for the new lock type PEER

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8253f500
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8253f500
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8253f500

Branch: refs/heads/HBASE-19397
Commit: 8253f500df8fa3b1a6d4bc22e5dda97bea2337e4
Parents: 3bb465a
Author: Guanghao Zhang 
Authored: Wed Dec 20 16:43:38 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../procedure/MasterProcedureScheduler.java |   9 +-
 .../procedure/TestMasterProcedureScheduler.java |  65 -
 ...TestMasterProcedureSchedulerConcurrency.java | 135 +++
 3 files changed, 201 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8253f500/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 8ff2d12..a25217c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -389,6 +389,13 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 while (tableIter.hasNext()) {
   count += tableIter.next().size();
 }
+
+// Peer queues
+final AvlTreeIterator peerIter = new AvlTreeIterator<>(peerMap);
+while (peerIter.hasNext()) {
+  count += peerIter.next().size();
+}
+
 return count;
   }
 
@@ -1041,7 +1048,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
* @see #wakePeerExclusiveLock(Procedure, String)
* @param procedure the procedure trying to acquire the lock
* @param peerId peer to lock
-   * @return true if the procedure has to wait for the per to be available
+   * @return true if the procedure has to wait for the peer to be available
*/
   public boolean waitPeerExclusiveLock(Procedure procedure, String peerId) {
 schedLock();

http://git-wip-us.apache.org/repos/asf/hbase/blob/8253f500/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 0291165..fd77e1f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -905,6 +905,27 @@ public class TestMasterProcedureScheduler {
 }
   }
 
+  public static class TestPeerProcedure extends TestProcedure implements 
PeerProcedureInterface {
+private final String peerId;
+private final PeerOperationType opType;
+
+public TestPeerProcedure(long procId, String peerId, PeerOperationType 
opType) {
+  super(procId);
+  this.peerId = peerId;
+  this.opType = opType;
+}
+
+@Override
+public String getPeerId() {
+  return peerId;
+}
+
+@Override
+public PeerOperationType getPeerOperationType() {
+  return opType;
+}
+  }
+
   private static LockProcedure createLockProcedure(LockType lockType, long 
procId) throws Exception {
 LockProcedure procedure = new LockProcedure();
 
@@ -927,22 +948,19 @@ public class TestMasterProcedureScheduler {
 return createLockProcedure(LockType.SHARED, procId);
   }
 
-  private static void assertLockResource(LockedResource resource,
-  LockedResourceType resourceType, String resourceName)
-  {
+  private static void assertLockResource(LockedResource resource, 
LockedResourceType resourceType,
+  String resourceName) {
 assertEquals(resourceType, resource.getResourceType());
 assertEquals(resourceName, resource.getResourceName());
   }
 
-  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure)
-  {
+  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure) {
 assertEquals(LockType.EXCLUSIVE, resource.getLockType());
 assertEquals(procedure, resource.getExclusiveLockOwnerProcedure());
 assertEquals(0, resource.getSharedLockCount());
   }
 
-  private static void assertSharedLock(LockedResource resource, int lockCount)
-  {
+  pr

[01/35] hbase git commit: HBASE-19641 AsyncHBaseAdmin should use exponential backoff when polling the procedure result [Forced Update!]

2018-01-03 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19397 2172026ca -> e424657c9 (forced update)


HBASE-19641 AsyncHBaseAdmin should use exponential backoff when polling the 
procedure result


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1fa3637b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1fa3637b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1fa3637b

Branch: refs/heads/HBASE-19397
Commit: 1fa3637b4d0020b1c4387610e8aa6b970c0138b8
Parents: a47afc8
Author: zhangduo 
Authored: Wed Jan 3 16:41:21 2018 +0800
Committer: zhangduo 
Committed: Wed Jan 3 18:32:54 2018 +0800

--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 57 ++--
 1 file changed, 27 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1fa3637b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 7a8d081..ceda280 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -89,6 +89,7 @@ import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
 import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 import org.apache.hbase.thirdparty.io.netty.util.Timeout;
 import org.apache.hbase.thirdparty.io.netty.util.TimerTask;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
@@ -2553,40 +2554,36 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
 future.completeExceptionally(error);
 return;
   }
-  getProcedureResult(procId, future);
+  getProcedureResult(procId, future, 0);
 });
 return future;
   }
 
-  private void getProcedureResult(final long procId, CompletableFuture 
future) {
-this. newMasterCaller()
-.action(
-  (controller, stub) -> this
-  . call(
-controller, stub, 
GetProcedureResultRequest.newBuilder().setProcId(procId).build(),
-(s, c, req, done) -> s.getProcedureResult(c, req, done), 
(resp) -> resp))
-.call()
-.whenComplete(
-  (response, error) -> {
-if (error != null) {
-  LOG.warn("failed to get the procedure result procId=" + procId,
-ConnectionUtils.translateException(error));
-  retryTimer.newTimeout(t -> getProcedureResult(procId, future), 
pauseNs,
-TimeUnit.NANOSECONDS);
-  return;
-}
-if (response.getState() == 
GetProcedureResultResponse.State.RUNNING) {
-  retryTimer.newTimeout(t -> getProcedureResult(procId, future), 
pauseNs,
-TimeUnit.NANOSECONDS);
-  return;
-}
-if (response.hasException()) {
-  IOException ioe = 
ForeignExceptionUtil.toIOException(response.getException());
-  future.completeExceptionally(ioe);
-} else {
-  future.complete(null);
-}
-  });
+  private void getProcedureResult(long procId, CompletableFuture future, 
int retries) {
+this. newMasterCaller().action((controller, 
stub) -> this
+. call(
+  controller, stub, 
GetProcedureResultRequest.newBuilder().setProcId(procId).build(),
+  (s, c, req, done) -> s.getProcedureResult(c, req, done), (resp) -> 
resp))
+.call().whenComplete((response, error) -> {
+  if (error != null) {
+LOG.warn("failed to get the procedure result procId={}", procId,
+  ConnectionUtils.translateException(error));
+retryTimer.newTimeout(t -> getProcedureResult(procId, future, 
retries + 1),
+  ConnectionUtils.getPauseTime(pauseNs, retries), 
TimeUnit.NANOSECONDS);
+return;
+  }
+  if (response.getState() == GetProcedureResultResponse.State.RUNNING) 
{
+retryTimer.newTimeout(t -> getProcedureResult(procId, future, 
retries + 1),
+  ConnectionUtils.getPauseTime(pauseNs, retries), 
TimeUnit.NANOSECONDS);
+return;
+  }
+  if (response.hasException()) {
+IOException ioe = 
ForeignExceptionUtil.toIOException(response.getException());
+future.completeExceptionally(ioe);
+  } else {
+future.complete(null);
+  }
+});
   }
 
   private

[28/35] hbase git commit: HBASE-19635 Introduce a thread at RS side to call reportProcedureDone

2018-01-03 Thread zhangduo
HBASE-19635 Introduce a thread at RS side to call reportProcedureDone


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d72f6f13
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d72f6f13
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d72f6f13

Branch: refs/heads/HBASE-19397
Commit: d72f6f1374f2ebf7ec7985138cf9b8b3a5fe98ca
Parents: 20a612f
Author: zhangduo 
Authored: Wed Dec 27 20:13:42 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../src/main/protobuf/RegionServerStatus.proto  |   5 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  15 ++-
 .../hbase/regionserver/HRegionServer.java   |  72 
 .../RemoteProcedureResultReporter.java  | 111 +++
 .../handler/RSProcedureHandler.java |   2 +-
 5 files changed, 149 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d72f6f13/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 4f75941..3f836cd 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -146,7 +146,7 @@ message RegionSpaceUseReportRequest {
 message RegionSpaceUseReportResponse {
 }
 
-message ReportProcedureDoneRequest {
+message RemoteProcedureResult {
   required uint64 proc_id = 1;
   enum Status {
 SUCCESS = 1;
@@ -155,6 +155,9 @@ message ReportProcedureDoneRequest {
   required Status status = 2;
   optional ForeignExceptionMessage error = 3;
 }
+message ReportProcedureDoneRequest {
+  repeated RemoteProcedureResult result = 1;
+}
 
 message ReportProcedureDoneResponse {
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d72f6f13/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 889128a..9c28a07 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -265,6 +265,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
@@ -2254,12 +2255,14 @@ public class MasterRpcServices extends RSRpcServices
   @Override
   public ReportProcedureDoneResponse reportProcedureDone(RpcController 
controller,
   ReportProcedureDoneRequest request) throws ServiceException {
-if (request.getStatus() == ReportProcedureDoneRequest.Status.SUCCESS) {
-  master.remoteProcedureCompleted(request.getProcId());
-} else {
-  master.remoteProcedureFailed(request.getProcId(),
-RemoteProcedureException.fromProto(request.getError()));
-}
+request.getResultList().forEach(result -> {
+  if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) {
+master.remoteProcedureCompleted(result.getProcId());
+  } else {
+master.remoteProcedureFailed(result.getProcId(),
+  RemoteProcedureException.fromProto(result.getError()));
+  }
+});
 return ReportProcedureDoneResponse.getDefaultInstance();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d72f6f13/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index d413d9e..246eecc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRe

[18/35] hbase git commit: HBASE-19525 RS side changes for moving peer modification from zk watcher to procedure

2018-01-03 Thread zhangduo
HBASE-19525 RS side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4907a731
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4907a731
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4907a731

Branch: refs/heads/HBASE-19397
Commit: 4907a731c1798bdf131a6e0aca78312e245dc2ae
Parents: 7cdda89
Author: huzheng 
Authored: Wed Dec 20 10:47:18 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java |  11 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  13 +-
 .../hbase/replication/ReplicationListener.java  |  14 --
 .../hbase/replication/ReplicationPeer.java  |  28 ++-
 .../replication/ReplicationPeerZKImpl.java  | 180 ---
 .../replication/ReplicationPeersZKImpl.java |  19 +-
 .../replication/ReplicationTrackerZKImpl.java   |  73 +-
 .../regionserver/ReplicationSourceService.java  |   9 +-
 .../handler/RSProcedureHandler.java |   3 +
 .../replication/BaseReplicationEndpoint.java|   2 +-
 .../regionserver/PeerProcedureHandler.java  |  38 
 .../regionserver/PeerProcedureHandlerImpl.java  |  81 +++
 .../regionserver/RefreshPeerCallable.java   |  39 +++-
 .../replication/regionserver/Replication.java   |  10 +
 .../regionserver/ReplicationSource.java |   9 +-
 .../regionserver/ReplicationSourceManager.java  |  37 ++-
 .../TestReplicationAdminUsingProcedure.java | 226 +++
 .../replication/DummyModifyPeerProcedure.java   |  48 
 .../TestDummyModifyPeerProcedure.java   |  80 ---
 .../TestReplicationTrackerZKImpl.java   |  51 -
 .../TestReplicationSourceManager.java   |  32 ++-
 21 files changed, 532 insertions(+), 471 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4907a731/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index d549607..2f2dc86 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.InvalidProtocolBufferException;
@@ -199,7 +201,7 @@ public final class ProtobufUtil {
* byte array that is bytes.length plus {@link 
ProtobufMagic#PB_MAGIC}.length.
*/
   public static byte [] prependPBMagic(final byte [] bytes) {
-return Bytes.add(ProtobufMagic.PB_MAGIC, bytes);
+return Bytes.add(PB_MAGIC, bytes);
   }
 
   /**
@@ -224,10 +226,11 @@ public final class ProtobufUtil {
* @param bytes bytes to check
* @throws DeserializationException if we are missing the pb magic prefix
*/
-  public static void expectPBMagicPrefix(final byte [] bytes) throws 
DeserializationException {
+  public static void expectPBMagicPrefix(final byte[] bytes) throws 
DeserializationException {
 if (!isPBMagicPrefix(bytes)) {
-  throw new DeserializationException("Missing pb magic " +
-  Bytes.toString(ProtobufMagic.PB_MAGIC) + " prefix");
+  String bytesPrefix = bytes == null ? "null" : 
Bytes.toStringBinary(bytes, 0, PB_MAGIC.length);
+  throw new DeserializationException(
+  "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " 
+ bytesPrefix);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4907a731/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 7a5efb1..246f1bf 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.shaded.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -282,7 +284,7 @@ public final class ProtobufUtil {
* byte array that is bytes.length plus {@link 
ProtobufMagic#PB_MAG

[06/35] hbase git commit: HBASE-19691 Removes Global(A) requirement for getClusterStatus

2018-01-03 Thread zhangduo
HBASE-19691 Removes Global(A) requirement for getClusterStatus

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a98bb4c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a98bb4c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a98bb4c

Branch: refs/heads/HBASE-19397
Commit: 9a98bb4ce9d3e600a2b982995914222c305ebe8a
Parents: d8ef30c
Author: Josh Elser 
Authored: Wed Jan 3 16:57:12 2018 -0500
Committer: Josh Elser 
Committed: Wed Jan 3 18:47:53 2018 -0500

--
 .../apache/hadoop/hbase/security/access/AccessController.java  | 6 --
 .../hadoop/hbase/security/access/TestAccessController.java | 4 ++--
 src/main/asciidoc/_chapters/appendix_acl_matrix.adoc   | 2 +-
 3 files changed, 3 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a98bb4c/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 4e1924f..4110dfd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2751,12 +2751,6 @@ public class AccessController implements 
MasterCoprocessor, RegionCoprocessor,
 checkLockPermissions(getActiveUser(ctx), null, tableName, null, 
description);
   }
 
-  @Override
-  public void preGetClusterStatus(final 
ObserverContext ctx)
-  throws IOException {
-requirePermission(getActiveUser(ctx), "getClusterStatus", Action.ADMIN);
-  }
-
   private void checkLockPermissions(User user, String namespace,
   TableName tableName, RegionInfo[] regionInfos, String reason)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9a98bb4c/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index f181747..83a6dfd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -3134,7 +3134,7 @@ public class TestAccessController extends SecureTestUtil {
   }
 };
 
-verifyAllowed(action, SUPERUSER, USER_ADMIN);
-verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+verifyAllowed(
+action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO, 
USER_NONE, USER_OWNER);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9a98bb4c/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
--
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc 
b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index 0c99b1f..83043f7 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -123,7 +123,7 @@ In case the table goes out of date, the unit tests which 
check for accuracy of p
 || getReplicationPeerConfig | superuser\|global(A)
 || updateReplicationPeerConfig | superuser\|global(A)
 || listReplicationPeers | superuser\|global(A)
-|| getClusterStatus | superuser\|global(A)
+|| getClusterStatus | any user
 | Region | openRegion | superuser\|global(A)
 || closeRegion | superuser\|global(A)
 || flush | 
superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)



[04/35] hbase git commit: HBASE-19620 Add UT to confirm the race in MasterRpcServices.getProcedureResult

2018-01-03 Thread zhangduo
HBASE-19620 Add UT to confirm the race in MasterRpcServices.getProcedureResult


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ab0e4596
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ab0e4596
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ab0e4596

Branch: refs/heads/HBASE-19397
Commit: ab0e459693cc7ad3e605c25e4d1267c754dca6cd
Parents: d84c4ec
Author: zhangduo 
Authored: Wed Jan 3 17:24:20 2018 +0800
Committer: zhangduo 
Committed: Wed Jan 3 20:29:07 2018 +0800

--
 .../hbase/client/TestGetProcedureResult.java| 142 +++
 1 file changed, 142 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ab0e4596/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java
new file mode 100644
index 000..60e18f7
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestGetProcedureResult.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
+
+/**
+ * Testcase for HBASE-19608.
+ */
+@Category({ MasterTests.class, MediumTests.class })
+public class TestGetProcedureResult {
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  public static final class DummyProcedure extends 
Procedure
+  implements TableProcedureInterface {
+
+private final CountDownLatch failureSet = new CountDownLatch(1);
+
+private final CountDownLatch canRollback = new CountDownLatch(1);
+
+@Override
+public TableName getTableName() {
+  return TableName.valueOf("dummy");
+}
+
+@Override
+public TableOperationType getTableOperationType() {
+  return TableOperationType.READ;
+}
+
+@Override
+protected Procedure[] execute(MasterProcedureEnv env)
+throws ProcedureYieldException, ProcedureSuspendedException, 
InterruptedException {
+  setFailure("dummy", new IOException("inject error"));
+  failureSet.countDown();
+  return null;
+}
+
+@Override
+protected void rollback(MasterProcedureEnv env) throws IOException, 
InterruptedException {
+  canRollback.await();
+}
+
+@Override
+protected boolean abort(MasterProcedureEnv env) {
+  return false;
+}
+
+@Override
+protected void serializeStateData(ProcedureStateSerializer serializer) 
throws IOException {

[14/35] hbase git commit: HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure

2018-01-03 Thread zhangduo
HBASE-19536 Client side changes for moving peer modification from zk watcher to 
procedure

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3edefde8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3edefde8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3edefde8

Branch: refs/heads/HBASE-19397
Commit: 3edefde8a03d971967a2ff1bf72d61a84edd3754
Parents: 4e4711a
Author: Guanghao Zhang 
Authored: Tue Dec 19 15:50:57 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  87 ++-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 149 ++-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  82 +-
 3 files changed, 238 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3edefde8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index a94593c..38feb2a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2463,7 +2463,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @throws IOException if a remote or network exception occurs
*/
   default void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig)
@@ -2474,7 +2474,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @param enabled peer state, true if ENABLED and false if DISABLED
* @throws IOException if a remote or network exception occurs
*/
@@ -2482,6 +2482,37 @@ public interface Admin extends Abortable, Closeable {
   throws IOException;
 
   /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  default Future addReplicationPeerAsync(String peerId, 
ReplicationPeerConfig peerConfig)
+  throws IOException {
+return addReplicationPeerAsync(peerId, peerConfig, true);
+  }
+
+  /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @param enabled peer state, true if ENABLED and false if DISABLED
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig 
peerConfig,
+  boolean enabled) throws IOException;
+
+  /**
* Remove a peer and stop the replication.
* @param peerId a short name that identifies the peer
* @throws IOException if a remote or network exception occurs
@@ -2489,6 +2520,18 @@ public interface Admin extends Abortable, Closeable {
   void removeReplicationPeer(String peerId) throws IOException;
 
   /**
+   * Remove a replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name tha

[33/35] hbase git commit: HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface

2018-01-03 Thread zhangduo
HBASE-19622 Reimplement ReplicationPeers with the new replication storage 
interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1104d16a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1104d16a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1104d16a

Branch: refs/heads/HBASE-19397
Commit: 1104d16a49132a0e1703efb8728fae9874e84879
Parents: d72f6f1
Author: huzheng 
Authored: Tue Dec 26 16:46:10 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  10 +-
 .../replication/VerifyReplication.java  |   9 +-
 .../hbase/replication/ReplicationFactory.java   |  10 +-
 .../hbase/replication/ReplicationPeerImpl.java  |  60 +-
 .../replication/ReplicationPeerStorage.java |   3 +-
 .../hbase/replication/ReplicationPeers.java | 235 
 .../replication/ReplicationPeersZKImpl.java | 543 ---
 .../replication/ZKReplicationPeerStorage.java   |  12 +-
 .../replication/ZKReplicationStorageBase.java   |   3 +-
 .../replication/TestReplicationStateBasic.java  | 125 ++---
 .../replication/TestReplicationStateZKImpl.java |   2 +-
 .../TestZKReplicationPeerStorage.java   |  12 +-
 .../cleaner/ReplicationZKNodeCleaner.java   |  57 +-
 .../replication/ReplicationPeerManager.java |   6 +-
 .../regionserver/DumpReplicationQueues.java |   2 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  49 +-
 .../replication/regionserver/Replication.java   |   2 +-
 .../regionserver/ReplicationSource.java |   6 +-
 .../regionserver/ReplicationSourceManager.java  |  45 +-
 .../cleaner/TestReplicationHFileCleaner.java|   7 +-
 .../replication/TestMultiSlaveReplication.java  |   2 -
 .../TestReplicationTrackerZKImpl.java   |  36 +-
 .../TestReplicationSourceManager.java   |  17 +-
 .../hadoop/hbase/HBaseZKTestingUtility.java |   3 +-
 24 files changed, 304 insertions(+), 952 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1104d16a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 022bf64..a234a9b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -247,22 +247,22 @@ public final class ReplicationPeerConfigUtil {
   public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes)
   throws DeserializationException {
 if (ProtobufUtil.isPBMagicPrefix(bytes)) {
-  int pblen = ProtobufUtil.lengthOfPBMagic();
+  int pbLen = ProtobufUtil.lengthOfPBMagic();
   ReplicationProtos.ReplicationPeer.Builder builder =
   ReplicationProtos.ReplicationPeer.newBuilder();
   ReplicationProtos.ReplicationPeer peer;
   try {
-ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
+ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen);
 peer = builder.build();
   } catch (IOException e) {
 throw new DeserializationException(e);
   }
   return convert(peer);
 } else {
-  if (bytes.length > 0) {
-return 
ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build();
+  if (bytes == null || bytes.length <= 0) {
+throw new DeserializationException("Bytes to deserialize should not be 
empty.");
   }
-  return ReplicationPeerConfig.newBuilder().setClusterKey("").build();
+  return 
ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1104d16a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 64ef279..15ac2ab 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -336,15 +336,10 @@ public class VerifyReplication extends Configured 
implements Tool {
 @Override public boolean isAborte

[34/35] hbase git commit: HBASE-19686 Use KeyLocker instead of ReentrantLock in PeerProcedureHandlerImpl

2018-01-03 Thread zhangduo
HBASE-19686 Use KeyLocker instead of ReentrantLock in PeerProcedureHandlerImpl


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6922f80b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6922f80b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6922f80b

Branch: refs/heads/HBASE-19397
Commit: 6922f80b1ef15414a832c0f94e99d1cea6f5d9df
Parents: eee095c
Author: zhangduo 
Authored: Tue Jan 2 16:13:55 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../regionserver/PeerProcedureHandlerImpl.java  | 41 ++--
 1 file changed, 29 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6922f80b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
index 1efe180..c09c6a0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
@@ -19,10 +19,10 @@
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import java.io.IOException;
-import java.util.concurrent.locks.ReentrantLock;
-
+import java.util.concurrent.locks.Lock;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -32,7 +32,7 @@ public class PeerProcedureHandlerImpl implements 
PeerProcedureHandler {
   private static final Logger LOG = 
LoggerFactory.getLogger(PeerProcedureHandlerImpl.class);
 
   private final ReplicationSourceManager replicationSourceManager;
-  private final ReentrantLock peersLock = new ReentrantLock();
+  private final KeyLocker peersLock = new KeyLocker<>();
 
   public PeerProcedureHandlerImpl(ReplicationSourceManager 
replicationSourceManager) {
 this.replicationSourceManager = replicationSourceManager;
@@ -40,40 +40,57 @@ public class PeerProcedureHandlerImpl implements 
PeerProcedureHandler {
 
   @Override
   public void addPeer(String peerId) throws ReplicationException, IOException {
-peersLock.lock();
+Lock peerLock = peersLock.acquireLock(peerId);
 try {
   replicationSourceManager.addPeer(peerId);
 } finally {
-  peersLock.unlock();
+  peerLock.unlock();
 }
   }
 
   @Override
   public void removePeer(String peerId) throws ReplicationException, 
IOException {
-peersLock.lock();
+Lock peerLock = peersLock.acquireLock(peerId);
 try {
   if (replicationSourceManager.getReplicationPeers().getPeer(peerId) != 
null) {
 replicationSourceManager.removePeer(peerId);
   }
 } finally {
-  peersLock.unlock();
+  peerLock.unlock();
 }
   }
 
   @Override
   public void disablePeer(String peerId) throws ReplicationException, 
IOException {
-PeerState newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
-LOG.info("disable replication peer, id: " + peerId + ", new state: " + 
newState);
+PeerState newState;
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
+} finally {
+  peerLock.unlock();
+}
+LOG.info("disable replication peer, id: {}, new state: {}", peerId, 
newState);
   }
 
   @Override
   public void enablePeer(String peerId) throws ReplicationException, 
IOException {
-PeerState newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
-LOG.info("enable replication peer, id: " + peerId + ", new state: " + 
newState);
+PeerState newState;
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
+} finally {
+  peerLock.unlock();
+}
+LOG.info("enable replication peer, id: {}, new state: {}", peerId, 
newState);
   }
 
   @Override
   public void updatePeerConfig(String peerId) throws ReplicationException, 
IOException {
-replicationSourceManager.getReplicationPeers().refreshPeerConfig(peerId);
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  replicationSourceManager.getReplicationPeers().refreshPeerConfig(peerId);
+} finally {
+  peerLock.unlock();
+}

[09/35] hbase git commit: HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker and remove ReplicationZKNodeCleanerChore

2018-01-03 Thread zhangduo
HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker 
and remove ReplicationZKNodeCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5df509b7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5df509b7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5df509b7

Branch: refs/heads/HBASE-19397
Commit: 5df509b7406ae71aa49ae1e9b85e5aa62ffd0485
Parents: c888203
Author: zhangduo 
Authored: Wed Jan 3 09:39:44 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../replication/VerifyReplication.java  |   6 +-
 .../hbase/replication/ReplicationPeers.java |  26 +--
 .../hbase/replication/ReplicationUtils.java |  69 +++
 .../replication/TestReplicationStateBasic.java  |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  13 --
 .../cleaner/ReplicationZKNodeCleaner.java   | 192 ---
 .../cleaner/ReplicationZKNodeCleanerChore.java  |  54 --
 .../replication/ReplicationPeerManager.java |  18 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  13 +-
 .../hbase/util/hbck/ReplicationChecker.java | 109 +++
 .../cleaner/TestReplicationZKNodeCleaner.java   | 109 ---
 .../hbase/util/TestHBaseFsckReplication.java| 101 ++
 .../hadoop/hbase/util/hbck/HbckTestingUtil.java |   6 +-
 13 files changed, 259 insertions(+), 459 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5df509b7/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index f005f80..0f3b9fd 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -48,8 +48,8 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
@@ -342,10 +342,10 @@ public class VerifyReplication extends Configured 
implements Tool {
 }
   });
   ReplicationPeerStorage storage =
-  ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf);
+ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf);
   ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId);
   return Pair.newPair(peerConfig,
-ReplicationPeers.getPeerClusterConfiguration(peerConfig, conf));
+ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf));
 } catch (ReplicationException e) {
   throw new IOException("An error occurred while trying to connect to the 
remove peer cluster",
   e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5df509b7/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index ad3fee7..95192b8 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -17,14 +17,11 @@
  */
 package org.apache.hadoop.hbase.replication;
 
-import java.io.IOException;
 import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CompoundConfiguration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -106,25 +103,6 @@ public class ReplicationPeers {
 return Collections.unmodifiableSet(peerCache.keySet());
   }
 
-  public static Configuration 
getPee

[22/35] hbase git commit: HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase

2018-01-03 Thread zhangduo
HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b8b84a9f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b8b84a9f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b8b84a9f

Branch: refs/heads/HBASE-19397
Commit: b8b84a9f5719928195d4534968fb4bfdd2521141
Parents: 5df509b
Author: huzheng 
Authored: Fri Dec 29 15:55:28 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |   5 +-
 .../replication/ReplicationStateZKBase.java | 153 ---
 .../replication/ReplicationTrackerZKImpl.java   |  18 ++-
 .../replication/ZKReplicationPeerStorage.java   |  24 ++-
 .../replication/ZKReplicationStorageBase.java   |  13 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../master/ReplicationPeerConfigUpgrader.java   | 128 
 .../regionserver/DumpReplicationQueues.java |  18 +--
 .../replication/regionserver/Replication.java   |   3 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   3 +-
 .../TestReplicationTrackerZKImpl.java   |   3 +-
 .../replication/master/TestTableCFsUpdater.java |  41 ++---
 .../TestReplicationSourceManager.java   |   6 +-
 13 files changed, 135 insertions(+), 284 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b8b84a9f/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 6c66aff..2a970ba 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -33,9 +33,8 @@ public class ReplicationFactory {
 return new ReplicationPeers(zk, conf);
   }
 
-  public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,
-  final ReplicationPeers replicationPeers, Configuration conf, Abortable 
abortable,
+  public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, 
Abortable abortable,
   Stoppable stopper) {
-return new ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, 
abortable, stopper);
+return new ReplicationTrackerZKImpl(zookeeper, abortable, stopper);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b8b84a9f/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
deleted file mode 100644
index f49537c..000
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-import org.apache.hadoop.hbase.zookeeper.ZKConfig;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.yetus.audience.Inte

[20/35] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2018-01-03 Thread zhangduo
HBASE-19543 Abstract a replication storage interface to extract the zk specific 
code


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/28f23155
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/28f23155
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/28f23155

Branch: refs/heads/HBASE-19397
Commit: 28f2315531f1b31a24d5596ebfac966f5e4ece44
Parents: 4907a73
Author: zhangduo 
Authored: Fri Dec 22 14:37:28 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../hadoop/hbase/util/CollectionUtils.java  |   3 +
 hbase-replication/pom.xml   |  12 +
 .../replication/ReplicationPeerStorage.java |  74 
 .../replication/ReplicationQueueStorage.java| 164 +++
 .../replication/ReplicationStateZKBase.java |   1 -
 .../replication/ReplicationStorageFactory.java  |  49 +++
 .../replication/ZKReplicationPeerStorage.java   | 164 +++
 .../replication/ZKReplicationQueueStorage.java  | 425 +++
 .../replication/ZKReplicationStorageBase.java   |  75 
 .../TestZKReplicationPeerStorage.java   | 171 
 .../TestZKReplicationQueueStorage.java  | 171 
 .../org/apache/hadoop/hbase/master/HMaster.java |  36 +-
 .../hadoop/hbase/master/MasterServices.java |   6 +-
 .../master/procedure/MasterProcedureEnv.java|  24 +-
 .../master/replication/AddPeerProcedure.java|   6 +-
 .../replication/DisablePeerProcedure.java   |   7 +-
 .../master/replication/EnablePeerProcedure.java |   6 +-
 .../master/replication/ModifyPeerProcedure.java |  41 +-
 .../master/replication/RemovePeerProcedure.java |   6 +-
 .../master/replication/ReplicationManager.java  | 199 -
 .../replication/ReplicationPeerManager.java | 331 +++
 .../replication/UpdatePeerConfigProcedure.java  |   7 +-
 .../replication/TestReplicationAdmin.java   |  62 ++-
 .../hbase/master/MockNoopMasterServices.java|  12 +-
 .../hbase/master/TestMasterNoCluster.java   |   4 +-
 .../TestReplicationDisableInactivePeer.java |   6 +-
 26 files changed, 1750 insertions(+), 312 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/28f23155/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
index 875b124..8bbb6f1 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
@@ -107,6 +107,9 @@ public class CollectionUtils {
 return list.get(list.size() - 1);
   }
 
+  public static  List nullToEmpty(List list) {
+return list != null ? list : Collections.emptyList();
+  }
   /**
* In HBASE-16648 we found that ConcurrentHashMap.get is much faster than 
computeIfAbsent if the
* value already exists. Notice that the implementation does not guarantee 
that the supplier will

http://git-wip-us.apache.org/repos/asf/hbase/blob/28f23155/hbase-replication/pom.xml
--
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
index ab22199..4e3cea0 100644
--- a/hbase-replication/pom.xml
+++ b/hbase-replication/pom.xml
@@ -121,6 +121,18 @@
   org.apache.hbase
   hbase-zookeeper
 
+
+  org.apache.hbase
+  hbase-common
+  test-jar
+  test
+
+
+  org.apache.hbase
+  hbase-zookeeper
+  test-jar
+  test
+
 
 
   org.apache.commons

http://git-wip-us.apache.org/repos/asf/hbase/blob/28f23155/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
new file mode 100644
index 000..e00cd0d
--- /dev/null
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http:/

[21/35] hbase git commit: HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface

2018-01-03 Thread zhangduo
HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bbbda610
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bbbda610
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bbbda610

Branch: refs/heads/HBASE-19397
Commit: bbbda610f946a8b9d9811ef567a1729dc29b5279
Parents: 0d05d79
Author: Guanghao Zhang 
Authored: Tue Dec 26 11:39:34 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../replication/VerifyReplication.java  |   5 -
 .../hbase/replication/ReplicationPeer.java  |  42 ++--
 .../hbase/replication/ReplicationPeerImpl.java  | 170 ++
 .../replication/ReplicationPeerZKImpl.java  | 233 ---
 .../hbase/replication/ReplicationPeers.java |   4 +-
 .../replication/ReplicationPeersZKImpl.java |  23 +-
 .../replication/TestReplicationStateBasic.java  |   7 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  29 +--
 8 files changed, 217 insertions(+), 296 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bbbda610/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 04db45d..64ef279 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -330,7 +329,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   private static Pair 
getPeerQuorumConfig(
   final Configuration conf, String peerId) throws IOException {
 ZKWatcher localZKW = null;
-ReplicationPeerZKImpl peer = null;
 try {
   localZKW = new ZKWatcher(conf, "VerifyReplication",
   new Abortable() {
@@ -351,9 +349,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   throw new IOException(
   "An error occurred while trying to connect to the remove peer 
cluster", e);
 } finally {
-  if (peer != null) {
-peer.close();
-  }
   if (localZKW != null) {
 localZKW.close();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bbbda610/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index b66d76d..4846018 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
 
-
 /**
  * ReplicationPeer manages enabled / disabled state for the peer.
  */
@@ -49,65 +48,52 @@ public interface ReplicationPeer {
   String getId();
 
   /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig();
-
-  /**
-   * Get the peer config object. if loadFromBackingStore is true, it will load 
from backing store
-   * directly and update its load peer config. otherwise, just return the 
local cached peer config.
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig(boolean loadFromBackingStore)
-  throws ReplicationException;
-
-  /**
* Returns the state of the peer by reading local cache.
* @return the enabled state
*/
   PeerState getPeerState();
 
   /**
-   * Returns the state of peer, if loadFromBackingStore is true, it will load 
from backing store
-   * directly and update its local peer state. otherwise, just return the 
local cached peer state.

[12/35] hbase git commit: HBASE-19216 Implement a general framework to execute remote procedure on RS

2018-01-03 Thread zhangduo
HBASE-19216 Implement a general framework to execute remote procedure on RS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25139a39
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25139a39
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25139a39

Branch: refs/heads/HBASE-19397
Commit: 25139a39a591ea3550938e1e28989c5263f5a622
Parents: 9a98bb4
Author: zhangduo 
Authored: Fri Dec 15 21:06:44 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../hbase/procedure2/LockedResourceType.java|   4 +-
 .../procedure2/RemoteProcedureDispatcher.java   |  23 +-
 .../src/main/protobuf/Admin.proto   |   9 +-
 .../src/main/protobuf/MasterProcedure.proto |  30 +++
 .../src/main/protobuf/RegionServerStatus.proto  |  15 ++
 .../apache/hadoop/hbase/executor/EventType.java |  26 ++-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  33 ++-
 .../hadoop/hbase/master/MasterRpcServices.java  |  13 ++
 .../assignment/RegionTransitionProcedure.java   |  18 +-
 .../procedure/MasterProcedureScheduler.java | 224 +--
 .../procedure/PeerProcedureInterface.java   |  34 +++
 .../master/procedure/RSProcedureDispatcher.java | 101 +
 .../master/replication/ModifyPeerProcedure.java | 127 +++
 .../master/replication/RefreshPeerCallable.java |  67 ++
 .../replication/RefreshPeerProcedure.java   | 197 
 .../hbase/procedure2/RSProcedureCallable.java   |  43 
 .../hbase/regionserver/HRegionServer.java   |  90 ++--
 .../hbase/regionserver/RSRpcServices.java   |  56 +++--
 .../handler/RSProcedureHandler.java |  51 +
 .../assignment/TestAssignmentManager.java   |  20 +-
 .../replication/DummyModifyPeerProcedure.java   |  41 
 .../TestDummyModifyPeerProcedure.java   |  80 +++
 .../security/access/TestAccessController.java   |   6 +-
 24 files changed, 1125 insertions(+), 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/25139a39/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
index c5fe62b..dc9b5d4 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,5 +22,5 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public enum LockedResourceType {
-  SERVER, NAMESPACE, TABLE, REGION
+  SERVER, NAMESPACE, TABLE, REGION, PEER
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/25139a39/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 8bbfcec..02676a8 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -226,13 +226,30 @@ public abstract class RemoteProcedureDispatcher
-   * @param 
*/
   public interface RemoteProcedure {
+/**
+ * For building the remote operation.
+ */
 RemoteOperation remoteCallBuild(TEnv env, TRemote remote);
-void remoteCallCompleted(TEnv env, TRemote remote, RemoteOperation 
response);
+
+/**
+ * Called when the executeProcedure call is failed.
+ */
 void remoteCallFailed(TEnv env, TRemote remote, IOException exception);
+
+/**
+ * Called when RS tells the remote procedure is succeeded through the
+ * {@code reportProcedureDone} method.
+ */
+void remoteOperationCompleted(TEnv env);
+
+/**
+ * Called when RS tells the remote procedure is failed through the {@code 
reportProcedureDone}
+ * method.
+ * @param error the error message
+ */
+void remoteOperationFailed(TEnv env, String error);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/25139a39/hbase-protocol-shaded/src/main/protobuf/Admin.proto
---

[03/35] hbase git commit: HBASE-19666 TestDefaultCompactSelection test failed

2018-01-03 Thread zhangduo
HBASE-19666 TestDefaultCompactSelection test failed

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d84c4ec2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d84c4ec2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d84c4ec2

Branch: refs/heads/HBASE-19397
Commit: d84c4ec29f1552581fdafbd08defd5b56e983c23
Parents: 777ad8c
Author: Balazs Meszaros 
Authored: Wed Jan 3 18:49:11 2018 +0800
Committer: Chia-Ping Tsai 
Committed: Wed Jan 3 19:08:50 2018 +0800

--
 .../hbase/regionserver/TestDefaultCompactSelection.java   | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d84c4ec2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
index 6038bb2..4dea315 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
@@ -34,6 +34,13 @@ import org.junit.experimental.categories.Category;
 @Category(SmallTests.class)
 public class TestDefaultCompactSelection extends TestCompactionPolicy {
 
+  @Override
+  protected void config() {
+super.config();
+// DON'T change this config since all test cases assume 
HStore.BLOCKING_STOREFILES_KEY is 10.
+this.conf.setLong(HStore.BLOCKING_STOREFILES_KEY, 10);
+  }
+
   @Test
   public void testCompactionRatio() throws IOException {
 TimeOffsetEnvironmentEdge edge = new TimeOffsetEnvironmentEdge();



[19/35] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2018-01-03 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/28f23155/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
deleted file mode 100644
index b6f8784..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.replication;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.replication.BaseReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Manages and performs all replication admin operations.
- * 
- * Used to add/remove a replication peer.
- */
-@InterfaceAudience.Private
-public class ReplicationManager {
-  private final ReplicationQueuesClient replicationQueuesClient;
-  private final ReplicationPeers replicationPeers;
-
-  public ReplicationManager(Configuration conf, ZKWatcher zkw, Abortable 
abortable)
-  throws IOException {
-try {
-  this.replicationQueuesClient = ReplicationFactory
-  .getReplicationQueuesClient(new 
ReplicationQueuesClientArguments(conf, abortable, zkw));
-  this.replicationQueuesClient.init();
-  this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf,
-this.replicationQueuesClient, abortable);
-  this.replicationPeers.init();
-} catch (Exception e) {
-  throw new IOException("Failed to construct ReplicationManager", e);
-}
-  }
-
-  public void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig, boolean enabled)
-  throws ReplicationException {
-checkPeerConfig(peerConfig);
-replicationPeers.registerPeer(peerId, peerConfig, enabled);
-replicationPeers.peerConnected(peerId);
-  }
-
-  public void removeReplicationPeer(String peerId) throws ReplicationException 
{
-replicationPeers.peerDisconnected(peerId);
-replicationPeers.unregisterPeer(peerId);
-  }
-
-  public void enableReplicationPeer(String peerId) throws ReplicationException 
{
-this.replicationPeers.enablePeer(peerId);
-  }
-
-  public void disableReplicationPeer(String peerId) throws 
ReplicationException {
-this.replicationPeers.disablePeer(peerId);
-  }
-
-  public ReplicationPeerConfig getPeerConfig(String peerId)
-  throws ReplicationException, ReplicationPeerNotFoundException {
-ReplicationPeerConfig peerConfig = 
replicationPeers.getReplicationPeerConfig(peerId);
-if (peerConfig == null) {
-  throw new ReplicationPeerNotFoundException(peerId);
-}
-return peerConfig;
-  }
-
-  public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig)
-  throws ReplicationException, IOException {
-checkPeerConfig(peerConfig);
-this.replicationPeers.updatePeerConfig(peerId, peerConfig);
-  }
-
-  public List listReplicationPeers(Pattern pattern)
-  throws ReplicationException {
-List peers = new ArrayList<>();
-List peerIds = replicationPeers.getAllPeerIds();
-for (String peerId : peer

[13/35] hbase git commit: HBASE-19524 Master side changes for moving peer modification from zk watcher to procedure

2018-01-03 Thread zhangduo
HBASE-19524 Master side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4e4711a4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4e4711a4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4e4711a4

Branch: refs/heads/HBASE-19397
Commit: 4e4711a493c92e2bcbfe151ae3c61e4589908932
Parents: 25139a3
Author: zhangduo 
Authored: Mon Dec 18 15:22:36 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../procedure2/RemoteProcedureDispatcher.java   |   3 +-
 .../src/main/protobuf/MasterProcedure.proto |  21 +++-
 .../src/main/protobuf/RegionServerStatus.proto  |   3 +-
 .../src/main/protobuf/Replication.proto |   5 +
 .../replication/ReplicationPeersZKImpl.java |   4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java | 100 ---
 .../hadoop/hbase/master/MasterRpcServices.java  |   4 +-
 .../hadoop/hbase/master/MasterServices.java |  26 +++--
 .../assignment/RegionTransitionProcedure.java   |  13 +--
 .../master/procedure/MasterProcedureEnv.java|   5 +
 .../master/procedure/ProcedurePrepareLatch.java |   2 +-
 .../master/replication/AddPeerProcedure.java|  97 ++
 .../replication/DisablePeerProcedure.java   |  70 +
 .../master/replication/EnablePeerProcedure.java |  69 +
 .../master/replication/ModifyPeerProcedure.java |  97 +++---
 .../master/replication/RefreshPeerCallable.java |  67 -
 .../replication/RefreshPeerProcedure.java   |  28 --
 .../master/replication/RemovePeerProcedure.java |  69 +
 .../master/replication/ReplicationManager.java  |  76 +++---
 .../replication/UpdatePeerConfigProcedure.java  |  92 +
 .../hbase/regionserver/HRegionServer.java   |   6 +-
 .../regionserver/RefreshPeerCallable.java   |  70 +
 .../hbase/master/MockNoopMasterServices.java|  23 +++--
 .../replication/DummyModifyPeerProcedure.java   |  13 ++-
 24 files changed, 737 insertions(+), 226 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4e4711a4/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 02676a8..bdff1ca 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -247,9 +247,8 @@ public abstract class RemoteProcedureDispatcherhttp://git-wip-us.apache.org/repos/asf/hbase/blob/4e4711a4/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 0e2bdba..ae676ea 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -27,6 +27,7 @@ option optimize_for = SPEED;
 import "HBase.proto";
 import "RPC.proto";
 import "Snapshot.proto";
+import "Replication.proto";
 
 // 
 //  WARNING - Compatibility rules
@@ -367,9 +368,10 @@ message GCMergedRegionsStateData {
 }
 
 enum PeerModificationState {
-  UPDATE_PEER_STORAGE = 1;
-  REFRESH_PEER_ON_RS = 2;
-  POST_PEER_MODIFICATION = 3;
+  PRE_PEER_MODIFICATION = 1;
+  UPDATE_PEER_STORAGE = 2;
+  REFRESH_PEER_ON_RS = 3;
+  POST_PEER_MODIFICATION = 4;
 }
 
 message PeerModificationStateData {
@@ -394,4 +396,17 @@ message RefreshPeerParameter {
   required string peer_id = 1;
   required PeerModificationType type = 2;
   required ServerName target_server = 3;
+}
+
+message ModifyPeerStateData {
+  required string peer_id = 1;
+}
+
+message AddPeerStateData {
+  required ReplicationPeer peer_config = 1;
+  required bool enabled = 2;
+}
+
+message UpdatePeerConfigStateData {
+  required ReplicationPeer peer_config = 1;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e4711a4/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index eb396ac..4f75941 100644
--- a/hbase-protocol-sh

[02/35] hbase git commit: HBASE-19604 Fixed Checkstyle errors in hbase-protocol-shaded and enabled Checkstyle to fail on violations

2018-01-03 Thread zhangduo
HBASE-19604 Fixed Checkstyle errors in hbase-protocol-shaded and enabled 
Checkstyle to fail on violations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/777ad8ca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/777ad8ca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/777ad8ca

Branch: refs/heads/HBASE-19397
Commit: 777ad8caa2cabf6c4276bf96a3016f60da15f8dc
Parents: 1fa3637
Author: Jan Hentschel 
Authored: Sat Dec 23 17:38:49 2017 +0100
Committer: Jan Hentschel 
Committed: Wed Jan 3 13:41:47 2018 +0300

--
 hbase-protocol-shaded/pom.xml   | 16 
 .../hadoop/hbase/util/ForeignExceptionUtil.java | 10 --
 2 files changed, 24 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/777ad8ca/hbase-protocol-shaded/pom.xml
--
diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml
index ab04e98..03030b8 100644
--- a/hbase-protocol-shaded/pom.xml
+++ b/hbase-protocol-shaded/pom.xml
@@ -179,6 +179,22 @@
   
 
   
+  
+org.apache.maven.plugins
+maven-checkstyle-plugin
+
+  
+checkstyle
+validate
+
+  check
+
+
+  true
+
+  
+
+  
 
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/777ad8ca/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java
index 0e4bb94..f8cef89 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java
@@ -93,7 +93,10 @@ public final class ForeignExceptionUtil {
 GenericExceptionMessage payload = gemBuilder.build();
 ForeignExceptionMessage.Builder exception = 
ForeignExceptionMessage.newBuilder();
 exception.setGenericException(payload);
-if (source != null) exception.setSource(source);
+if (source != null) {
+  exception.setSource(source);
+}
+
 return exception.build();
   }
 
@@ -104,7 +107,10 @@ public final class ForeignExceptionUtil {
*/
   public static List 
toProtoStackTraceElement(StackTraceElement[] trace) {
 // if there is no stack trace, ignore it and just return the message
-if (trace == null) return null;
+if (trace == null) {
+  return null;
+}
+
 // build the stack trace for the message
 List pbTrace = new ArrayList<>(trace.length);
 for (StackTraceElement elem : trace) {



[29/35] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2018-01-03 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/48b9ae4e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 6e27a21..d8f9625 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -21,13 +21,13 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileStatus;
@@ -48,17 +48,18 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
+import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.AtomicLongMap;
 
 /**
@@ -303,57 +304,53 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
   }
 
   public String dumpQueues(ClusterConnection connection, ZKWatcher zkw, 
Set peerIds,
-   boolean hdfs) throws Exception {
-ReplicationQueuesClient queuesClient;
+  boolean hdfs) throws Exception {
+ReplicationQueueStorage queueStorage;
 ReplicationPeers replicationPeers;
 ReplicationQueues replicationQueues;
 ReplicationTracker replicationTracker;
-ReplicationQueuesClientArguments replicationArgs =
-new ReplicationQueuesClientArguments(getConf(), new 
WarnOnlyAbortable(), zkw);
+ReplicationQueuesArguments replicationArgs =
+new ReplicationQueuesArguments(getConf(), new WarnOnlyAbortable(), 
zkw);
 StringBuilder sb = new StringBuilder();
 
-queuesClient = 
ReplicationFactory.getReplicationQueuesClient(replicationArgs);
-queuesClient.init();
+queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, 
getConf());
 replicationQueues = 
ReplicationFactory.getReplicationQueues(replicationArgs);
-replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), 
queuesClient, connection);
+replicationPeers =
+ReplicationFactory.getReplicationPeers(zkw, getConf(), queueStorage, 
connection);
 replicationTracker = ReplicationFactory.getReplicationTracker(zkw, 
replicationPeers, getConf(),
   new WarnOnlyAbortable(), new WarnOnlyStoppable());
-List liveRegionServers = 
replicationTracker.getListOfRegionServers();
+Set liveRegionServers = new 
HashSet<>(replicationTracker.getListOfRegionServers());
 
 // Loops each peer on each RS and dumps the queues
-try {
-  List regionservers = queuesClient.getListOfReplicators();
-  if (regionservers == null || regionservers.isEmpty()) {
-return sb.toString();
+List regionservers = queueStorage.getListOfReplicators();
+if (regionservers == null || regionservers.isEmpty()) {
+  return sb.toString();
+}
+for (ServerName regionserver : regionservers) {
+  List queueIds = queueStorage.getAllQueues(regionserver);
+  replicationQueues.init(regionserver.getServerName());
+  if (!liveRegionServers.contains(regionserver.getServerName())) {
+deadRegionServers.add(regionserver.getServerName());
   }
-  for (String regionserver : regionservers) {
-List queueIds = queuesClient.getAllQueues(regionserver);
-replicationQueues.init(regionserver);
-if (!liveRegionServers.contains(regionserver)) {
-  deadRegionServers.add(regionserver);
-}
-for (String 

[15/35] hbase git commit: HBASE-19564 Procedure id is missing in the response of peer related operations

2018-01-03 Thread zhangduo
HBASE-19564 Procedure id is missing in the response of peer related operations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3bb465a5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3bb465a5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3bb465a5

Branch: refs/heads/HBASE-19397
Commit: 3bb465a54de57cb22e9e81e5abff5a1bc8fc6999
Parents: 3edefde
Author: zhangduo 
Authored: Wed Dec 20 20:57:37 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../hadoop/hbase/master/MasterRpcServices.java  | 24 ++--
 .../master/replication/ModifyPeerProcedure.java |  4 +---
 2 files changed, 13 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3bb465a5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index c795ce1..889128a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1886,10 +1886,10 @@ public class MasterRpcServices extends RSRpcServices
   public AddReplicationPeerResponse addReplicationPeer(RpcController 
controller,
   AddReplicationPeerRequest request) throws ServiceException {
 try {
-  master.addReplicationPeer(request.getPeerId(),
-ReplicationPeerConfigUtil.convert(request.getPeerConfig()), 
request.getPeerState()
-.getState().equals(ReplicationState.State.ENABLED));
-  return AddReplicationPeerResponse.newBuilder().build();
+  long procId = master.addReplicationPeer(request.getPeerId(),
+ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
+
request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
+  return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1899,8 +1899,8 @@ public class MasterRpcServices extends RSRpcServices
   public RemoveReplicationPeerResponse removeReplicationPeer(RpcController 
controller,
   RemoveReplicationPeerRequest request) throws ServiceException {
 try {
-  master.removeReplicationPeer(request.getPeerId());
-  return RemoveReplicationPeerResponse.newBuilder().build();
+  long procId = master.removeReplicationPeer(request.getPeerId());
+  return 
RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1910,8 +1910,8 @@ public class MasterRpcServices extends RSRpcServices
   public EnableReplicationPeerResponse enableReplicationPeer(RpcController 
controller,
   EnableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.enableReplicationPeer(request.getPeerId());
-  return EnableReplicationPeerResponse.newBuilder().build();
+  long procId = master.enableReplicationPeer(request.getPeerId());
+  return 
EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1921,8 +1921,8 @@ public class MasterRpcServices extends RSRpcServices
   public DisableReplicationPeerResponse disableReplicationPeer(RpcController 
controller,
   DisableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.disableReplicationPeer(request.getPeerId());
-  return DisableReplicationPeerResponse.newBuilder().build();
+  long procId = master.disableReplicationPeer(request.getPeerId());
+  return 
DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1948,9 +1948,9 @@ public class MasterRpcServices extends RSRpcServices
   public UpdateReplicationPeerConfigResponse 
updateReplicationPeerConfig(RpcController controller,
   UpdateReplicationPeerConfigRequest request) throws ServiceException {
 try {
-  master.updateReplicationPeerConfig(request.getPeerId(),
+  long procId = master.updateReplicationPeerConfig(request.getPeerId(),
 ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
-  return UpdateReplicationPeerConfigResponse.newBuilder().build();
+  return 
UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new Ser

[07/35] hbase git commit: HBASE-19579 Add peer lock test for shell command list_locks

2018-01-03 Thread zhangduo
HBASE-19579 Add peer lock test for shell command list_locks

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0d05d799
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0d05d799
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0d05d799

Branch: refs/heads/HBASE-19397
Commit: 0d05d7992416434574ccd8247e7910cd98ecce08
Parents: 48b9ae4
Author: Guanghao Zhang 
Authored: Sat Dec 23 21:04:27 2017 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../src/main/protobuf/LockService.proto  |  1 +
 .../src/test/ruby/shell/list_locks_test.rb   | 19 +++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0d05d799/hbase-protocol-shaded/src/main/protobuf/LockService.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto 
b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index b8d180c..0675070 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -77,6 +77,7 @@ enum LockedResourceType {
   NAMESPACE = 2;
   TABLE = 3;
   REGION = 4;
+  PEER = 5;
 }
 
 message LockedResource {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0d05d799/hbase-shell/src/test/ruby/shell/list_locks_test.rb
--
diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb 
b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
index f465a6b..ef1c0ce 100644
--- a/hbase-shell/src/test/ruby/shell/list_locks_test.rb
+++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
@@ -67,6 +67,25 @@ module Hbase
 proc_id)
 end
 
+define_test 'list peer locks' do
+  lock = create_exclusive_lock(0)
+  peer_id = '1'
+
+  @scheduler.waitPeerExclusiveLock(lock, peer_id)
+  output = capture_stdout { @list_locks.command }
+  @scheduler.wakePeerExclusiveLock(lock, peer_id)
+
+  assert_equal(
+"PEER(1)\n" \
+"Lock type: EXCLUSIVE, procedure: {" \
+  
"\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \
+  "\"procId\"=>\"0\", \"submittedTime\"=>\"0\", 
\"state\"=>\"RUNNABLE\", " \
+  "\"lastUpdate\"=>\"0\", " \
+  "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", 
\"description\"=>\"description\"}]" \
+"}\n\n",
+output)
+end
+
 define_test 'list server locks' do
   lock = create_exclusive_lock(0)
 



[35/35] hbase git commit: HBASE-19697 Remove TestReplicationAdminUsingProcedure

2018-01-03 Thread zhangduo
HBASE-19697 Remove TestReplicationAdminUsingProcedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e424657c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e424657c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e424657c

Branch: refs/heads/HBASE-19397
Commit: e424657c92aeed35264086cc7b7389fa6a33669f
Parents: b8b84a9
Author: zhangduo 
Authored: Wed Jan 3 21:13:57 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:44 2018 +0800

--
 .../TestReplicationAdminUsingProcedure.java | 225 ---
 1 file changed, 225 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e424657c/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
deleted file mode 100644
index 1300376..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client.replication;
-
-import java.io.IOException;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.TestReplicationBase;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.log4j.Logger;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
-
-@Category({ MediumTests.class, ClientTests.class })
-public class TestReplicationAdminUsingProcedure extends TestReplicationBase {
-
-  private static final String PEER_ID = "2";
-  private static final Logger LOG = 
Logger.getLogger(TestReplicationAdminUsingProcedure.class);
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-conf1.setInt("hbase.multihconnection.threads.max", 10);
-
-// Start the master & slave mini cluster.
-TestReplicationBase.setUpBeforeClass();
-
-// Remove the replication peer
-hbaseAdmin.removeReplicationPeer(PEER_ID);
-  }
-
-  private void loadData(int startRowKey, int endRowKey) throws IOException {
-for (int i = startRowKey; i < endRowKey; i++) {
-  byte[] rowKey = Bytes.add(row, Bytes.toBytes(i));
-  Put put = new Put(rowKey);
-  put.addColumn(famName, null, Bytes.toBytes(i));
-  htable1.put(put);
-}
-  }
-
-  private void waitForReplication(int expectedRows, int retries)
-  throws IOException, InterruptedException {
-Scan scan;
-for (int i = 0; i < retries; i++) {
-  scan = new Scan();
-  if (i == retries - 1) {
-throw new IOException("Waited too much time for normal batch 
replication");
-  }
-  try (ResultScanner scanner = htable2.getScanner(scan)) {
-int count = 0;
-for (Result res : scanner) {
-  count++;
-}
-if (count != expectedRows) {
-  LOG.info("Only got " + count + " rows,  expected rows: " + 
expectedRows);
-  Thread.sleep(SLEEP_TIME);
-} else {
-  return;
-}
-  }
-}
-  }
-
-  @Before
-  public void setUp() throws IOException {
-ReplicationPeerConfig rpc = new ReplicationPeerConfig();
-rpc.se

[31/35] hbase git commit: HBASE-19544 Add UTs for testing concurrent modifications on replication peer

2018-01-03 Thread zhangduo
HBASE-19544 Add UTs for testing concurrent modifications on replication peer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c8882030
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c8882030
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c8882030

Branch: refs/heads/HBASE-19397
Commit: c888203044fd1cee689cf6adf5de245967037f1d
Parents: 6922f80
Author: Guanghao Zhang 
Authored: Tue Jan 2 17:07:41 2018 +0800
Committer: zhangduo 
Committed: Thu Jan 4 09:22:35 2018 +0800

--
 .../replication/TestReplicationAdmin.java   | 69 
 1 file changed, 69 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c8882030/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 9b71595..89cf393 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -31,6 +31,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
@@ -55,6 +56,8 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Unit testing of ReplicationAdmin
@@ -62,6 +65,8 @@ import org.junit.rules.TestName;
 @Category({MediumTests.class, ClientTests.class})
 public class TestReplicationAdmin {
 
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestReplicationAdmin.class);
+
   private final static HBaseTestingUtility TEST_UTIL =
   new HBaseTestingUtility();
 
@@ -111,6 +116,70 @@ public class TestReplicationAdmin {
   }
 
   @Test
+  public void testConcurrentPeerOperations() throws Exception {
+int threadNum = 5;
+AtomicLong successCount = new AtomicLong(0);
+
+// Test concurrent add peer operation
+Thread[] addPeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  addPeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.addReplicationPeer(ID_ONE,
+ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build());
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when add replication peer", e);
+}
+  });
+  addPeers[i].start();
+}
+for (Thread addPeer : addPeers) {
+  addPeer.join();
+}
+assertEquals(1, successCount.get());
+
+// Test concurrent remove peer operation
+successCount.set(0);
+Thread[] removePeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  removePeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.removeReplicationPeer(ID_ONE);
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when remove replication peer", e);
+}
+  });
+  removePeers[i].start();
+}
+for (Thread removePeer : removePeers) {
+  removePeer.join();
+}
+assertEquals(1, successCount.get());
+
+// Test concurrent add peer operation again
+successCount.set(0);
+addPeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  addPeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.addReplicationPeer(ID_ONE,
+ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build());
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when add replication peer", e);
+}
+  });
+  addPeers[i].start();
+}
+for (Thread addPeer : addPeers) {
+  addPeer.join();
+}
+assertEquals(1, successCount.get());
+  }
+
+  @Test
   public void testAddInvalidPeer() {
 ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
 builder.setClusterKey(KEY_ONE);



[4/4] hbase git commit: HBASE-19691 Removes Global(A) requirement for getClusterStatus

2018-01-03 Thread elserj
HBASE-19691 Removes Global(A) requirement for getClusterStatus

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bc5186f4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bc5186f4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bc5186f4

Branch: refs/heads/branch-1.4
Commit: bc5186f4dff10cb5824f84f4939547d77b63d909
Parents: 80d183a
Author: Josh Elser 
Authored: Wed Jan 3 16:57:12 2018 -0500
Committer: Josh Elser 
Committed: Wed Jan 3 19:15:57 2018 -0500

--
 .../apache/hadoop/hbase/security/access/AccessController.java  | 6 --
 .../hadoop/hbase/security/access/TestAccessController.java | 4 ++--
 2 files changed, 2 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bc5186f4/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index e90fe05..b06b2bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2741,10 +2741,4 @@ public class AccessController extends 
BaseMasterAndRegionObserver
   String groupName) throws IOException {
 requirePermission("balanceRSGroup", Action.ADMIN);
   }
-
-  @Override
-  public void preGetClusterStatus(final 
ObserverContext ctx)
-  throws IOException {
-requirePermission("getClusterStatus", Action.ADMIN);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc5186f4/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 5770a41..5071ca0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -3051,7 +3051,7 @@ public class TestAccessController extends SecureTestUtil {
   }
 };
 
-verifyAllowed(action, SUPERUSER, USER_ADMIN);
-verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+verifyAllowed(
+action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO, 
USER_NONE, USER_OWNER);
   }
 }



[1/4] hbase git commit: HBASE-19691 Removes Global(A) requirement for getClusterStatus

2018-01-03 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-1 fd33b3589 -> 490728ae7
  refs/heads/branch-1.4 80d183a70 -> bc5186f4d
  refs/heads/branch-2 97dc7d87c -> f0011ebfe
  refs/heads/master d8ef30c5e -> 9a98bb4ce


HBASE-19691 Removes Global(A) requirement for getClusterStatus

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a98bb4c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a98bb4c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a98bb4c

Branch: refs/heads/master
Commit: 9a98bb4ce9d3e600a2b982995914222c305ebe8a
Parents: d8ef30c
Author: Josh Elser 
Authored: Wed Jan 3 16:57:12 2018 -0500
Committer: Josh Elser 
Committed: Wed Jan 3 18:47:53 2018 -0500

--
 .../apache/hadoop/hbase/security/access/AccessController.java  | 6 --
 .../hadoop/hbase/security/access/TestAccessController.java | 4 ++--
 src/main/asciidoc/_chapters/appendix_acl_matrix.adoc   | 2 +-
 3 files changed, 3 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a98bb4c/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 4e1924f..4110dfd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2751,12 +2751,6 @@ public class AccessController implements 
MasterCoprocessor, RegionCoprocessor,
 checkLockPermissions(getActiveUser(ctx), null, tableName, null, 
description);
   }
 
-  @Override
-  public void preGetClusterStatus(final 
ObserverContext ctx)
-  throws IOException {
-requirePermission(getActiveUser(ctx), "getClusterStatus", Action.ADMIN);
-  }
-
   private void checkLockPermissions(User user, String namespace,
   TableName tableName, RegionInfo[] regionInfos, String reason)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9a98bb4c/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index f181747..83a6dfd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -3134,7 +3134,7 @@ public class TestAccessController extends SecureTestUtil {
   }
 };
 
-verifyAllowed(action, SUPERUSER, USER_ADMIN);
-verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+verifyAllowed(
+action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO, 
USER_NONE, USER_OWNER);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9a98bb4c/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
--
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc 
b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index 0c99b1f..83043f7 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -123,7 +123,7 @@ In case the table goes out of date, the unit tests which 
check for accuracy of p
 || getReplicationPeerConfig | superuser\|global(A)
 || updateReplicationPeerConfig | superuser\|global(A)
 || listReplicationPeers | superuser\|global(A)
-|| getClusterStatus | superuser\|global(A)
+|| getClusterStatus | any user
 | Region | openRegion | superuser\|global(A)
 || closeRegion | superuser\|global(A)
 || flush | 
superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)



[3/4] hbase git commit: HBASE-19691 Removes Global(A) requirement for getClusterStatus

2018-01-03 Thread elserj
HBASE-19691 Removes Global(A) requirement for getClusterStatus

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/490728ae
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/490728ae
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/490728ae

Branch: refs/heads/branch-1
Commit: 490728ae7f9e288bcc5570e1e5db2c1422bbbec6
Parents: fd33b35
Author: Josh Elser 
Authored: Wed Jan 3 16:57:12 2018 -0500
Committer: Josh Elser 
Committed: Wed Jan 3 19:15:49 2018 -0500

--
 .../apache/hadoop/hbase/security/access/AccessController.java  | 6 --
 .../hadoop/hbase/security/access/TestAccessController.java | 4 ++--
 2 files changed, 2 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/490728ae/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index e90fe05..b06b2bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2741,10 +2741,4 @@ public class AccessController extends 
BaseMasterAndRegionObserver
   String groupName) throws IOException {
 requirePermission("balanceRSGroup", Action.ADMIN);
   }
-
-  @Override
-  public void preGetClusterStatus(final 
ObserverContext ctx)
-  throws IOException {
-requirePermission("getClusterStatus", Action.ADMIN);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/490728ae/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 5770a41..5071ca0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -3051,7 +3051,7 @@ public class TestAccessController extends SecureTestUtil {
   }
 };
 
-verifyAllowed(action, SUPERUSER, USER_ADMIN);
-verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+verifyAllowed(
+action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO, 
USER_NONE, USER_OWNER);
   }
 }



[2/4] hbase git commit: HBASE-19691 Removes Global(A) requirement for getClusterStatus

2018-01-03 Thread elserj
HBASE-19691 Removes Global(A) requirement for getClusterStatus

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f0011ebf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f0011ebf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f0011ebf

Branch: refs/heads/branch-2
Commit: f0011ebfe0415d31e345be2f12c1256124a5b27f
Parents: 97dc7d8
Author: Josh Elser 
Authored: Wed Jan 3 16:57:12 2018 -0500
Committer: Josh Elser 
Committed: Wed Jan 3 19:02:26 2018 -0500

--
 .../apache/hadoop/hbase/security/access/AccessController.java  | 6 --
 .../hadoop/hbase/security/access/TestAccessController.java | 4 ++--
 src/main/asciidoc/_chapters/appendix_acl_matrix.adoc   | 2 +-
 3 files changed, 3 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f0011ebf/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 4e1924f..4110dfd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2751,12 +2751,6 @@ public class AccessController implements 
MasterCoprocessor, RegionCoprocessor,
 checkLockPermissions(getActiveUser(ctx), null, tableName, null, 
description);
   }
 
-  @Override
-  public void preGetClusterStatus(final 
ObserverContext ctx)
-  throws IOException {
-requirePermission(getActiveUser(ctx), "getClusterStatus", Action.ADMIN);
-  }
-
   private void checkLockPermissions(User user, String namespace,
   TableName tableName, RegionInfo[] regionInfos, String reason)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f0011ebf/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index f181747..83a6dfd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -3134,7 +3134,7 @@ public class TestAccessController extends SecureTestUtil {
   }
 };
 
-verifyAllowed(action, SUPERUSER, USER_ADMIN);
-verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+verifyAllowed(
+action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO, 
USER_NONE, USER_OWNER);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f0011ebf/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
--
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc 
b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index 0c99b1f..83043f7 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -123,7 +123,7 @@ In case the table goes out of date, the unit tests which 
check for accuracy of p
 || getReplicationPeerConfig | superuser\|global(A)
 || updateReplicationPeerConfig | superuser\|global(A)
 || listReplicationPeers | superuser\|global(A)
-|| getClusterStatus | superuser\|global(A)
+|| getClusterStatus | any user
 | Region | openRegion | superuser\|global(A)
 || closeRegion | superuser\|global(A)
 || flush | 
superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)



[1/2] hbase git commit: HBASE-19358 Improve the stability of splitting log when do fail over

2018-01-03 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 a7383851b -> 80d183a70


HBASE-19358 Improve the stability of splitting log when do fail over

Signed-off-by: Yu Li 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/83680887
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/83680887
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/83680887

Branch: refs/heads/branch-1.4
Commit: 83680887b6fd92a6c17419be0cf1dc25446a414f
Parents: a738385
Author: Jingyun Tian 
Authored: Tue Jan 2 17:21:32 2018 +0800
Committer: Andrew Purtell 
Committed: Wed Jan 3 13:25:37 2018 -0800

--
 .../apache/hadoop/hbase/wal/WALSplitter.java| 358 +--
 .../TestWALReplayBoundedLogWriterCreation.java  |  33 ++
 .../TestWALSplitBoundedLogWriterCreation.java   |  44 +++
 3 files changed, 330 insertions(+), 105 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/83680887/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 7c74649..0781562 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -25,6 +25,7 @@ import java.io.InterruptedIOException;
 import java.text.ParseException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
@@ -163,6 +164,9 @@ public class WALSplitter {
 
   protected boolean distributedLogReplay;
 
+  private final boolean splitWriterCreationBounded;
+
+
   // Map encodedRegionName -> lastFlushedSequenceId
   protected Map lastFlushedSequenceIds = new 
ConcurrentHashMap();
 
@@ -182,6 +186,8 @@ public class WALSplitter {
   // the file being split currently
   private FileStatus fileBeingSplit;
 
+  public final static String SPLIT_WRITER_CREATION_BOUNDED = 
"hbase.split.writer.creation.bounded";
+
   @VisibleForTesting
   WALSplitter(final WALFactory factory, Configuration conf, Path rootDir,
   FileSystem fs, LastSequenceId idChecker,
@@ -196,10 +202,10 @@ public class WALSplitter {
 this.csm = (BaseCoordinatedStateManager)csm;
 this.walFactory = factory;
 this.controller = new PipelineController();
-
+this.splitWriterCreationBounded = 
conf.getBoolean(SPLIT_WRITER_CREATION_BOUNDED, false);
 entryBuffers = new EntryBuffers(controller,
 this.conf.getInt("hbase.regionserver.hlog.splitlog.buffersize",
-128*1024*1024));
+128*1024*1024), splitWriterCreationBounded);
 
 // a larger minBatchSize may slow down recovery because replay writer has 
to wait for
 // enough edits before replaying them
@@ -214,7 +220,12 @@ public class WALSplitter {
 LOG.info("ZooKeeperWatcher is passed in as NULL so disable 
distrubitedLogRepaly.");
   }
   this.distributedLogReplay = false;
-  outputSink = new LogRecoveredEditsOutputSink(controller, entryBuffers, 
numWriterThreads);
+  if(splitWriterCreationBounded){
+outputSink = new BoundedLogWriterCreationOutputSink(controller,
+entryBuffers, numWriterThreads);
+  }else {
+outputSink = new LogRecoveredEditsOutputSink(controller, entryBuffers, 
numWriterThreads);
+  }
 }
 
   }
@@ -925,11 +936,19 @@ public class WALSplitter {
 Set currentlyWriting = new TreeSet(Bytes.BYTES_COMPARATOR);
 
 long totalBuffered = 0;
-long maxHeapUsage;
+final long maxHeapUsage;
+boolean splitWriterCreationBounded;
+
 
 public EntryBuffers(PipelineController controller, long maxHeapUsage) {
+  this(controller, maxHeapUsage, false);
+}
+
+public EntryBuffers(PipelineController controller, long maxHeapUsage,
+boolean splitWriterCreationBounded) {
   this.controller = controller;
   this.maxHeapUsage = maxHeapUsage;
+  this.splitWriterCreationBounded = splitWriterCreationBounded;
 }
 
 /**
@@ -969,6 +988,14 @@ public class WALSplitter {
  * @return RegionEntryBuffer a buffer of edits to be written or replayed.
  */
 synchronized RegionEntryBuffer getChunkToWrite() {
+  // The core part of limiting opening writers is it doesn't return chunk 
only if the heap size
+  // is over maxHeapUsage. Thus it doesn't need to create a writer for 
each region
+  // during splitting. It will flush all the logs in the buffer after 
splitting through a
+  // threadpool, which means the number of writers it created is under 
control
+  if(splitWriterCreationBounded && totalBuffer

[2/2] hbase git commit: HBASE-18625 Splitting of region with replica, doesn't update region list in serverHolding. A server crash leads to overlap.

2018-01-03 Thread apurtell
HBASE-18625 Splitting of region with replica, doesn't update region list in 
serverHolding. A server crash leads to overlap.

Signed-off-by: ramkrishna.s.vasudevan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80d183a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80d183a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80d183a7

Branch: refs/heads/branch-1.4
Commit: 80d183a707c6d0203da1565f6a7ca3edf4bb2291
Parents: 8368088
Author: huaxiang sun 
Authored: Wed Jan 3 12:24:05 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Jan 3 13:25:40 2018 -0800

--
 .../hadoop/hbase/master/RegionStates.java   | 33 ++-
 .../TestCatalogJanitorInMemoryStates.java   | 91 +---
 2 files changed, 112 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/80d183a7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 599e649..7ce6257 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -596,6 +596,19 @@ public class RegionStates {
   }
 
   /**
+   * Used in some unit tests
+   */
+  @VisibleForTesting
+  synchronized boolean existsInServerHoldings(final ServerName serverName,
+  final HRegionInfo hri) {
+Set oldRegions = serverHoldings.get(serverName);
+if (oldRegions != null) {
+  return oldRegions.contains(hri);
+}
+return false;
+  }
+
+  /**
* A dead server's wals have been split so that all the regions
* used to be open on it can be safely assigned now. Mark them assignable.
*/
@@ -664,8 +677,26 @@ public class RegionStates {
   deleteRegion(hri);
   return;
 }
+
+/*
+ * One tricky case, if region here is a replica region and its parent is at
+ * SPLIT state, its newState should be same as its parent, not OFFLINE.
+ */
 State newState =
-  expectedState == null ? State.OFFLINE : expectedState;
+expectedState == null ? State.OFFLINE : expectedState;
+
+if ((expectedState == null) && !RegionReplicaUtil.isDefaultReplica(hri)) {
+  RegionState primateState = getRegionState(
+  RegionReplicaUtil.getRegionInfoForDefaultReplica(hri));
+  if ((primateState != null) && (primateState.getState() == State.SPLIT)) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Update region " + hri + "to SPLIT, from primary region " +
+  RegionReplicaUtil.getRegionInfoForDefaultReplica(hri));
+}
+newState = State.SPLIT;
+  }
+}
+
 updateRegionState(hri, newState);
 String encodedName = hri.getEncodedName();
 synchronized (this) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/80d183a7/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
index 34cf19f..5ec3d6a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
@@ -18,21 +18,20 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.*;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import org.junit.AfterClass;
@@ -44,12 +43,6 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.junit.rules.Tes

hbase git commit: HBASE-18625 Splitting of region with replica, doesn't update region list in serverHolding. A server crash leads to overlap.

2018-01-03 Thread huaxiangsun
Repository: hbase
Updated Branches:
  refs/heads/branch-1 6faed49ad -> fd33b3589


HBASE-18625 Splitting of region with replica, doesn't update region list in 
serverHolding. A server crash leads to overlap.

Signed-off-by: ramkrishna.s.vasudevan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fd33b358
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fd33b358
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fd33b358

Branch: refs/heads/branch-1
Commit: fd33b3589e9ce03ed478552628403d5c08ae2394
Parents: 6faed49
Author: huaxiang sun 
Authored: Wed Jan 3 12:24:05 2018 -0800
Committer: Huaxiang Sun 
Committed: Wed Jan 3 12:32:44 2018 -0800

--
 .../hadoop/hbase/master/RegionStates.java   | 33 ++-
 .../TestCatalogJanitorInMemoryStates.java   | 91 +---
 2 files changed, 112 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fd33b358/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 599e649..7ce6257 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -596,6 +596,19 @@ public class RegionStates {
   }
 
   /**
+   * Used in some unit tests
+   */
+  @VisibleForTesting
+  synchronized boolean existsInServerHoldings(final ServerName serverName,
+  final HRegionInfo hri) {
+Set oldRegions = serverHoldings.get(serverName);
+if (oldRegions != null) {
+  return oldRegions.contains(hri);
+}
+return false;
+  }
+
+  /**
* A dead server's wals have been split so that all the regions
* used to be open on it can be safely assigned now. Mark them assignable.
*/
@@ -664,8 +677,26 @@ public class RegionStates {
   deleteRegion(hri);
   return;
 }
+
+/*
+ * One tricky case, if region here is a replica region and its parent is at
+ * SPLIT state, its newState should be same as its parent, not OFFLINE.
+ */
 State newState =
-  expectedState == null ? State.OFFLINE : expectedState;
+expectedState == null ? State.OFFLINE : expectedState;
+
+if ((expectedState == null) && !RegionReplicaUtil.isDefaultReplica(hri)) {
+  RegionState primateState = getRegionState(
+  RegionReplicaUtil.getRegionInfoForDefaultReplica(hri));
+  if ((primateState != null) && (primateState.getState() == State.SPLIT)) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Update region " + hri + "to SPLIT, from primary region " +
+  RegionReplicaUtil.getRegionInfoForDefaultReplica(hri));
+}
+newState = State.SPLIT;
+  }
+}
+
 updateRegionState(hri, newState);
 String encodedName = hri.getEncodedName();
 synchronized (this) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/fd33b358/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
index 34cf19f..5ec3d6a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
@@ -18,21 +18,20 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.*;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import org.junit.AfterClass;
@@ -44,12 +43,6 @@ import org.junit.experimental.cat

[20/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.M

[13/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
index b8e321a..439a50d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
@@ -468,274 +468,216 @@
 460  }
 461
 462  /**
-463   * Used to gracefully handle fallback 
to deprecated methods when we
-464   * evolve coprocessor APIs.
-465   *
-466   * When a particular Coprocessor API is 
updated to change methods, hosts can support fallback
-467   * to the deprecated API by using this 
method to determine if an instance implements the new API.
-468   * In the event that said support is 
partial, then in the face of a runtime issue that prevents
-469   * proper operation {@link 
#legacyWarning(Class, String)} should be used to let operators know.
-470   *
-471   * For examples of this in action, see 
the implementation of
-472   * 
    -473 *
  • {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} -474 *
  • {@link org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost} -475 *
-476 * -477 * @param clazz Coprocessor you wish to evaluate -478 * @param methodName the name of the non-deprecated method version -479 * @param parameterTypes the Class of the non-deprecated method's arguments in the order they are -480 * declared. -481 */ -482 @InterfaceAudience.Private -483 protected static boolean useLegacyMethod(final Class clazz, -484 final String methodName, final Class... parameterTypes) { -485boolean useLegacy; -486// Use reflection to see if they implement the non-deprecated version -487try { -488 clazz.getDeclaredMethod(methodName, parameterTypes); -489 LOG.debug("Found an implementation of '" + methodName + "' that uses updated method " + -490 "signature. Skipping legacy support for invocations in '" + clazz +"'."); -491 useLegacy = false; -492} catch (NoSuchMethodException exception) { -493 useLegacy = true; -494} catch (SecurityException exception) { -495 LOG.warn("The Security Manager denied our attempt to detect if the coprocessor '" + clazz + -496 "' requires legacy support; assuming it does. If you get later errors about legacy " + -497 "coprocessor use, consider updating your security policy to allow access to the package" + -498 " and declared members of your implementation."); -499 LOG.debug("Details of Security Manager rejection.", exception); -500 useLegacy = true; +463 * Used to limit legacy handling to once per Coprocessor class per classloader. +464 */ +465 private static final Set> legacyWarning = +466 new ConcurrentSkipListSet<>( +467 new Comparator>() { +468@Override +469public int compare(Class c1, Class c2) { +470 if (c1.equals(c2)) { +471return 0; +472 } +473 return c1.getName().compareTo(c2.getName()); +474} +475 }); +476 +477 /** +478 * Implementations defined function to get an observer of type {@code O} from a coprocessor of +479 * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each +480 * observer they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for +481 * each of RegionObserver, EndpointObserver and BulkLoadObserver. +482 * These getters are used by {@code ObserverOperation} to get appropriate observer from the +483 * coprocessor. +484 */ +485 @FunctionalInterface +486 public interface ObserverGetter extends Function> {} +487 +488 private abstract class ObserverOperation extends ObserverContextImpl { +489ObserverGetter observerGetter; +490 +491 ObserverOperation(ObserverGetter observerGetter) { +492 this(observerGetter, null); +493} +494 +495 ObserverOperation(ObserverGetter observerGetter, User user) { +496 this(observerGetter, user, false); +497} +498 +499 ObserverOperation(ObserverGetter observerGetter, boolean bypassable) { +500 this(observerGetter, null, bypassable); 501} -502return useLegacy; -503 } -504 -505 /** -506 * Used to limit legacy handling to once per Coprocessor class per classloader. -507 */ -508 private static fina

[03/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
index 1318b95..841130a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
@@ -55,1647 +55,1615 @@
 047import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 048import 
org.apache.hadoop.hbase.coprocessor.MasterObserver;
 049import 
org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
-050import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-051import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-052import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-053import 
org.apache.hadoop.hbase.metrics.MetricRegistry;
-054import 
org.apache.hadoop.hbase.net.Address;
-055import 
org.apache.hadoop.hbase.procedure2.LockType;
-056import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-057import 
org.apache.hadoop.hbase.procedure2.Procedure;
-058import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-059import 
org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
-060import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-061import 
org.apache.hadoop.hbase.security.User;
-062import 
org.apache.yetus.audience.InterfaceAudience;
-063import org.slf4j.Logger;
-064import org.slf4j.LoggerFactory;
-065
-066/**
-067 * Provides the coprocessor framework and 
environment for master oriented
-068 * operations.  {@link HMaster} interacts 
with the loaded coprocessors
-069 * through this class.
-070 */
-071@InterfaceAudience.Private
-072public class MasterCoprocessorHost
-073extends 
CoprocessorHost {
-074
-075  private static final Logger LOG = 
LoggerFactory.getLogger(MasterCoprocessorHost.class);
-076
-077  /**
-078   * Coprocessor environment extension 
providing access to master related
-079   * services.
-080   */
-081  private static class MasterEnvironment 
extends BaseEnvironment
-082  implements 
MasterCoprocessorEnvironment {
-083private final boolean 
supportGroupCPs;
-084private final MetricRegistry 
metricRegistry;
-085private final MasterServices 
services;
-086
-087public MasterEnvironment(final 
MasterCoprocessor impl, final int priority, final int seq,
-088final Configuration conf, final 
MasterServices services) {
-089  super(impl, priority, seq, conf);
-090  this.services = services;
-091  supportGroupCPs = 
!useLegacyMethod(impl.getClass(),
-092  "preBalanceRSGroup", 
ObserverContext.class, String.class);
-093  this.metricRegistry =
-094  
MetricsCoprocessor.createRegistryForMasterCoprocessor(impl.getClass().getName());
-095}
-096
-097@Override
-098public ServerName getServerName() {
-099  return 
this.services.getServerName();
-100}
-101
-102@Override
-103public Connection getConnection() {
-104  return new 
SharedConnection(this.services.getConnection());
-105}
-106
-107@Override
-108public Connection 
createConnection(Configuration conf) throws IOException {
-109  return 
this.services.createConnection(conf);
-110}
-111
-112@Override
-113public MetricRegistry 
getMetricRegistryForMaster() {
-114  return metricRegistry;
-115}
-116
-117@Override
-118public void shutdown() {
-119  super.shutdown();
-120  
MetricsCoprocessor.removeRegistry(this.metricRegistry);
-121}
-122  }
-123
-124  /**
-125   * Special version of MasterEnvironment 
that exposes MasterServices for Core Coprocessors only.
-126   * Temporary hack until Core 
Coprocessors are integrated into Core.
-127   */
-128  private static class 
MasterEnvironmentForCoreCoprocessors extends MasterEnvironment
-129  implements HasMasterServices {
-130private final MasterServices 
masterServices;
-131
-132public 
MasterEnvironmentForCoreCoprocessors(final MasterCoprocessor impl, final int 
priority,
-133final int seq, final 
Configuration conf, final MasterServices services) {
-134  super(impl, priority, seq, conf, 
services);
-135  this.masterServices = services;
-136}
-137
-138/**
-139 * @return An instance of 
MasterServices, an object NOT for general user-space Coprocessor
-140 * consumption.
-141 */
-142public MasterServices 
getMasterServices() {
-143  return this.masterServices;
-144}
-145  }
-146
-147  private MasterServices 
masterServices;
-148
-149  public MasterCoprocessorHost

[05/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
index 1318b95..841130a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
@@ -55,1647 +55,1615 @@
 047import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 048import 
org.apache.hadoop.hbase.coprocessor.MasterObserver;
 049import 
org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
-050import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-051import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-052import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-053import 
org.apache.hadoop.hbase.metrics.MetricRegistry;
-054import 
org.apache.hadoop.hbase.net.Address;
-055import 
org.apache.hadoop.hbase.procedure2.LockType;
-056import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-057import 
org.apache.hadoop.hbase.procedure2.Procedure;
-058import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-059import 
org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
-060import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-061import 
org.apache.hadoop.hbase.security.User;
-062import 
org.apache.yetus.audience.InterfaceAudience;
-063import org.slf4j.Logger;
-064import org.slf4j.LoggerFactory;
-065
-066/**
-067 * Provides the coprocessor framework and 
environment for master oriented
-068 * operations.  {@link HMaster} interacts 
with the loaded coprocessors
-069 * through this class.
-070 */
-071@InterfaceAudience.Private
-072public class MasterCoprocessorHost
-073extends 
CoprocessorHost {
-074
-075  private static final Logger LOG = 
LoggerFactory.getLogger(MasterCoprocessorHost.class);
-076
-077  /**
-078   * Coprocessor environment extension 
providing access to master related
-079   * services.
-080   */
-081  private static class MasterEnvironment 
extends BaseEnvironment
-082  implements 
MasterCoprocessorEnvironment {
-083private final boolean 
supportGroupCPs;
-084private final MetricRegistry 
metricRegistry;
-085private final MasterServices 
services;
-086
-087public MasterEnvironment(final 
MasterCoprocessor impl, final int priority, final int seq,
-088final Configuration conf, final 
MasterServices services) {
-089  super(impl, priority, seq, conf);
-090  this.services = services;
-091  supportGroupCPs = 
!useLegacyMethod(impl.getClass(),
-092  "preBalanceRSGroup", 
ObserverContext.class, String.class);
-093  this.metricRegistry =
-094  
MetricsCoprocessor.createRegistryForMasterCoprocessor(impl.getClass().getName());
-095}
-096
-097@Override
-098public ServerName getServerName() {
-099  return 
this.services.getServerName();
-100}
-101
-102@Override
-103public Connection getConnection() {
-104  return new 
SharedConnection(this.services.getConnection());
-105}
-106
-107@Override
-108public Connection 
createConnection(Configuration conf) throws IOException {
-109  return 
this.services.createConnection(conf);
-110}
-111
-112@Override
-113public MetricRegistry 
getMetricRegistryForMaster() {
-114  return metricRegistry;
-115}
-116
-117@Override
-118public void shutdown() {
-119  super.shutdown();
-120  
MetricsCoprocessor.removeRegistry(this.metricRegistry);
-121}
-122  }
-123
-124  /**
-125   * Special version of MasterEnvironment 
that exposes MasterServices for Core Coprocessors only.
-126   * Temporary hack until Core 
Coprocessors are integrated into Core.
-127   */
-128  private static class 
MasterEnvironmentForCoreCoprocessors extends MasterEnvironment
-129  implements HasMasterServices {
-130private final MasterServices 
masterServices;
-131
-132public 
MasterEnvironmentForCoreCoprocessors(final MasterCoprocessor impl, final int 
priority,
-133final int seq, final 
Configuration conf, final MasterServices services) {
-134  super(impl, priority, seq, conf, 
services);
-135  this.masterServices = services;
-136}
-137
-138/**
-139 * @return An instance of 
MasterServices, an object NOT for general user-space Coprocessor
-140 * consumption.
-141 */
-142public MasterServices 
getMasterServices() {
-143  return this.masterServices;
-144}
-145  }
-146
-147  private MasterServices 
masterServices;
-148
-149  public MasterCoprocessorHost(final 
MasterServices service

[45/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
index d7a5950..84be9bd 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":6,"i3":10,"i4":6,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":9};
+var methods = 
{"i0":10,"i1":10,"i2":6,"i3":10,"i4":6,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -396,19 +396,12 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-protected void
-legacyWarning(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class clazz,
- http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String message)
-limits the amount of logging to once per coprocessor 
class.
-
-
-
 void
 load(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass,
 int priority,
 org.apache.hadoop.conf.Configuration conf) 
 
-
+
 E
 load(org.apache.hadoop.fs.Path path,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String className,
@@ -417,7 +410,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Load a coprocessor implementation into the host
 
 
-
+
 E
 load(org.apache.hadoop.fs.Path path,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String className,
@@ -427,26 +420,17 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Load a coprocessor implementation into the host
 
 
-
+
 protected void
 loadSystemCoprocessors(org.apache.hadoop.conf.Configuration conf,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String confKey)
 Load system coprocessors once only.
 
 
-
+
 void
 shutdown(E e) 
 
-
-protected static boolean
-useLegacyMethod(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class clazz,
-   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String methodName,
-   http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in 
java.lang">Class... parameterTypes)
-Used to gracefully handle fallback to deprecated methods 
when we
- evolve coprocessor APIs.
-
-
 
 
 
@@ -688,7 +672,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 legacyWarning
-private static final http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetClass> legacyWarning
+private static final http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetClass> legacyWarning
 Used to limit legacy handling to once per Coprocessor class 
per classloader.
 
 
@@ -1001,60 +985,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
-
-
-
-
-useLegacyMethod
-@InterfaceAudience.Private
-protected static boolean useLegacyMethod(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class clazz,
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String methodName,
-

[22/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyTableProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyTableProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134impo

[47/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index 23721d2..19aa0b9 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class RawAsyncHBaseAdmin
+class RawAsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -765,8 +765,9 @@ implements 
 private void
-getProcedureResult(long procId,
-  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid> future) 
+getProcedureResult(long procId,
+  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid> future,
+  int retries) 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureString>
@@ -1401,7 +1402,7 @@ implements 
 
 FLUSH_TABLE_PROCEDURE_SIGNATURE
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String FLUSH_TABLE_PROCEDURE_SIGNATURE
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String FLUSH_TABLE_PROCEDURE_SIGNATURE
 
 See Also:
 Constant
 Field Values
@@ -1414,7 +1415,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -1423,7 +1424,7 @@ implements 
 
 connection
-private final AsyncConnectionImpl connection
+private final AsyncConnectionImpl connection
 
 
 
@@ -1432,7 +1433,7 @@ implements 
 
 retryTimer
-private 
final org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer retryTimer
+private 
final org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer retryTimer
 
 
 
@@ -1441,7 +1442,7 @@ implements 
 
 metaTable
-private final AsyncTable metaTable
+private final AsyncTable metaTable
 
 
 
@@ -1450,7 +1451,7 @@ implements 
 
 rpcTimeoutNs
-private final long rpcTimeoutNs
+private final long rpcTimeoutNs
 
 
 
@@ -1459,7 +1460,7 @@ implements 
 
 operationTimeoutNs
-private final long operationTimeoutNs
+private final long operationTimeoutNs
 
 
 
@@ -1468,7 +1469,7 @@ implements 
 
 pauseNs
-private final long pauseNs
+private final long pauseNs
 
 
 
@@ -1477,7 +1478,7 @@ implements 
 
 maxAttempts
-private final int maxAttempts
+private final int maxAttempts
 
 
 
@@ -1486,7 +1487,7 @@ implements 
 
 startLogErrorsCnt
-private final int startLogErrorsCnt
+private final int startLogErrorsCnt
 
 
 
@@ -1495,7 +1496,7 @@ implements 
 
 ng
-private final NonceGenerator ng
+private final NonceGenerator ng
 
 
 
@@ -1512,7 +1513,7 @@ implements 
 
 RawAsyncHBaseAdmin
-RawAsyncHBaseAdmin(AsyncConnectionImpl connection,
+RawAsyncHBaseAdmin(AsyncConnectionImpl connection,

org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer retryTimer,
AsyncAdminBuilderBase builder)
 
@@ -1531,7 +1532,7 @@ implements 
 
 newMasterCaller
-private  AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder newMasterCaller()
+private  AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder newMasterCaller()
 
 
 
@@ -1540,7 +1541,7 @@ implements 
 
 newAdminCaller
-private  AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder newAdminCaller()
+private  AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder newAdminCaller()
 
 
 
@@ -1551,7 +1552,7 @@ implements 
 
 call
-private  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in 
java.util.concurrent">CompletableFuture call(HBaseRpcController controller,
+private  http://docs.oracle.com/javase/8/docs/

[33/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColu

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Delet

[14/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.html
index dc127cd..2640e94 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.html
@@ -47,483 +47,482 @@
 039import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfigBuilder;
 040import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 041import 
org.apache.hadoop.hbase.util.Bytes;
-042import 
org.apache.hadoop.hbase.util.Strings;
-043import 
org.apache.yetus.audience.InterfaceAudience;
-044import 
org.apache.yetus.audience.InterfaceStability;
-045import org.slf4j.Logger;
-046import org.slf4j.LoggerFactory;
-047
-048import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-049import 
org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
-050import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-051import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-052import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-053import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-054
-055/**
-056 * Helper for TableCFs Operations.
-057 */
-058@InterfaceAudience.Private
-059@InterfaceStability.Stable
-060public final class 
ReplicationPeerConfigUtil {
-061
-062  private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationPeerConfigUtil.class);
-063
-064  private ReplicationPeerConfigUtil() 
{}
-065
-066  public static String 
convertToString(Set namespaces) {
-067if (namespaces == null) {
-068  return null;
-069}
-070return StringUtils.join(namespaces, 
';');
-071  }
-072
-073  /** convert map to TableCFs Object */
-074  public static 
ReplicationProtos.TableCF[] convert(
-075  Map> tableCfs) {
-076if (tableCfs == null) {
-077  return null;
-078}
-079List 
tableCFList = new ArrayList<>(tableCfs.entrySet().size());
-080ReplicationProtos.TableCF.Builder 
tableCFBuilder =  ReplicationProtos.TableCF.newBuilder();
-081for (Map.Entry> entry : tableCfs.entrySet()) {
-082  tableCFBuilder.clear();
-083  
tableCFBuilder.setTableName(ProtobufUtil.toProtoTableName(entry.getKey()));
-084  Collection v = 
entry.getValue();
-085  if (v != null && 
!v.isEmpty()) {
-086for (String value : 
entry.getValue()) {
-087  
tableCFBuilder.addFamilies(ByteString.copyFromUtf8(value));
-088}
-089  }
-090  
tableCFList.add(tableCFBuilder.build());
-091}
-092return tableCFList.toArray(new 
ReplicationProtos.TableCF[tableCFList.size()]);
-093  }
-094
-095  public static String 
convertToString(Map> 
tableCfs) {
-096if (tableCfs == null) {
-097  return null;
-098}
-099return convert(convert(tableCfs));
-100  }
-101
-102  /**
-103   *  Convert string to TableCFs 
Object.
-104   *  This is only for read TableCFs 
information from TableCF node.
-105   *  Input String Format: 
ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;ns3.table3.
-106   * */
-107  public static 
ReplicationProtos.TableCF[] convert(String tableCFsConfig) {
-108if (tableCFsConfig == null || 
tableCFsConfig.trim().length() == 0) {
-109  return null;
-110}
-111
-112ReplicationProtos.TableCF.Builder 
tableCFBuilder = ReplicationProtos.TableCF.newBuilder();
-113String[] tables = 
tableCFsConfig.split(";");
-114List 
tableCFList = new ArrayList<>(tables.length);
-115
-116for (String tab : tables) {
-117  // 1 ignore empty table config
-118  tab = tab.trim();
-119  if (tab.length() == 0) {
-120continue;
-121  }
-122  // 2 split to "table" and 
"cf1,cf2"
-123  //   for each table: 
"table#cf1,cf2" or "table"
-124  String[] pair = tab.split(":");
-125  String tabName = pair[0].trim();
-126  if (pair.length > 2 || 
tabName.length() == 0) {
-127LOG.info("incorrect format:" + 
tableCFsConfig);
-128continue;
-129  }
-130
-131  tableCFBuilder.clear();
-132  // split namespace from tableName
-133  String ns = "default";
-134  String tName = tabName;
-135  String[] dbs = 
tabName.split("\\.");
-136  if (dbs != null && 
dbs.length == 2) {
-137ns = dbs[0];
-138tName = dbs[1];
-139  }
-140  tableCFBuilder.setTableName(
-141
ProtobufUtil.toProtoTableName(TableName.valueOf(ns

[49/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 5d8d7b3..ba429a2 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 ©2007 - 2018 The Apache Software Foundation
 
-  File: 3468,
- Errors: 19099,
+  File: 3471,
+ Errors: 19048,
  Warnings: 0,
  Infos: 0
   
@@ -1301,7 +1301,7 @@ under the License.
   0
 
 
-  2
+  0
 
   
   
@@ -1581,7 +1581,7 @@ under the License.
   0
 
 
-  2
+  0
 
   
   
@@ -1777,7 +1777,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -4591,7 +4591,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -6019,7 +6019,7 @@ under the License.
   0
 
 
-  2
+  0
 
   
   
@@ -6607,7 +6607,7 @@ under the License.
   0
 
 
-  5
+  4
 
   
   
@@ -7685,7 +7685,7 @@ under the License.
   0
 
 
-  3
+  2
 
   
   
@@ -9869,7 +9869,7 @@ under the License.
   0
 
 
-  3
+  0
 
   
   
@@ -10919,7 +10919,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -11465,7 +11465,7 @@ under the License.
   0
 
 
-  2
+  0
 
   
   
@@ -13523,7 +13523,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -15371,7 +15371,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -16827,7 +16827,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -17527,7 +17527,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -22133,7 +22133,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -24135,7 +24135,7 @@ under the License.
   0
 
 
-  51
+  45
 
   
   
@@ -24947,7 +24947,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -25955,7 +25955,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -26767,7 +26767,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -29861,7 +29861,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -30323,7 +30323,7 @@ under the License.
   0
 
 
-  102
+  101
 
   
   
@@ -32666,6 +32666,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.wal.TestWALS

[31/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterPr

[12/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
index b8e321a..439a50d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
@@ -468,274 +468,216 @@
 460  }
 461
 462  /**
-463   * Used to gracefully handle fallback 
to deprecated methods when we
-464   * evolve coprocessor APIs.
-465   *
-466   * When a particular Coprocessor API is 
updated to change methods, hosts can support fallback
-467   * to the deprecated API by using this 
method to determine if an instance implements the new API.
-468   * In the event that said support is 
partial, then in the face of a runtime issue that prevents
-469   * proper operation {@link 
#legacyWarning(Class, String)} should be used to let operators know.
-470   *
-471   * For examples of this in action, see 
the implementation of
-472   * 
    -473 *
  • {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} -474 *
  • {@link org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost} -475 *
-476 * -477 * @param clazz Coprocessor you wish to evaluate -478 * @param methodName the name of the non-deprecated method version -479 * @param parameterTypes the Class of the non-deprecated method's arguments in the order they are -480 * declared. -481 */ -482 @InterfaceAudience.Private -483 protected static boolean useLegacyMethod(final Class clazz, -484 final String methodName, final Class... parameterTypes) { -485boolean useLegacy; -486// Use reflection to see if they implement the non-deprecated version -487try { -488 clazz.getDeclaredMethod(methodName, parameterTypes); -489 LOG.debug("Found an implementation of '" + methodName + "' that uses updated method " + -490 "signature. Skipping legacy support for invocations in '" + clazz +"'."); -491 useLegacy = false; -492} catch (NoSuchMethodException exception) { -493 useLegacy = true; -494} catch (SecurityException exception) { -495 LOG.warn("The Security Manager denied our attempt to detect if the coprocessor '" + clazz + -496 "' requires legacy support; assuming it does. If you get later errors about legacy " + -497 "coprocessor use, consider updating your security policy to allow access to the package" + -498 " and declared members of your implementation."); -499 LOG.debug("Details of Security Manager rejection.", exception); -500 useLegacy = true; +463 * Used to limit legacy handling to once per Coprocessor class per classloader. +464 */ +465 private static final Set> legacyWarning = +466 new ConcurrentSkipListSet<>( +467 new Comparator>() { +468@Override +469public int compare(Class c1, Class c2) { +470 if (c1.equals(c2)) { +471return 0; +472 } +473 return c1.getName().compareTo(c2.getName()); +474} +475 }); +476 +477 /** +478 * Implementations defined function to get an observer of type {@code O} from a coprocessor of +479 * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each +480 * observer they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for +481 * each of RegionObserver, EndpointObserver and BulkLoadObserver. +482 * These getters are used by {@code ObserverOperation} to get appropriate observer from the +483 * coprocessor. +484 */ +485 @FunctionalInterface +486 public interface ObserverGetter extends Function> {} +487 +488 private abstract class ObserverOperation extends ObserverContextImpl { +489ObserverGetter observerGetter; +490 +491 ObserverOperation(ObserverGetter observerGetter) { +492 this(observerGetter, null); +493} +494 +495 ObserverOperation(ObserverGetter observerGetter, User user) { +496 this(observerGetter, user, false); +497} +498 +499 ObserverOperation(ObserverGetter observerGetter, boolean bypassable) { +500 this(observerGetter, null, bypassable); 501} -502return useLegacy; -503 } -504 -505 /** -506 * Used to limit legacy handling to once per Coprocessor class per classloader. -507 */ -508 private static final Set> legacyWarning = -509 new Concurr

[44/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
index 6d5299b..0e397dc 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MasterCoprocessorHost
+public class MasterCoprocessorHost
 extends CoprocessorHost
 Provides the coprocessor framework and environment for 
master oriented
  operations.  HMaster interacts with the 
loaded coprocessors
@@ -927,7 +927,7 @@ extends CoprocessorHost
-abortServer,
 abortServer,
 checkAndLoadInstance,
 execOperation,
 execOperationWithResult,
 execShutdown, findCoprocessor,
 findCoprocessor,
 findCoprocessorEnvironment,
 findCoprocessors,
 getCoprocessors,
 getLoadedCoprocessors,
 handleCoprocessorThrowable, legacyWarning,
 load,
 load,
 load,
 loadSystemCoprocessors,
 shutdown, useLegacyMethod
+abortServer,
 abortServer,
 checkAndLoadInstance,
 execOperation,
 execOperationWithResult,
 execShutdown, findCoprocessor,
 findCoprocessor,
 findCoprocessorEnvironment,
 findCoprocessors,
 getCoprocessors,
 getLoadedCoprocessors,
 handleCoprocessorThrowable, load,
 load,
 load,
 loadSystemCoprocessors,
 shutdown
 
 
 
@@ -956,7 +956,7 @@ extends 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -965,7 +965,7 @@ extends 
 
 masterServices
-private MasterServices masterServices
+private MasterServices masterServices
 
 
 
@@ -974,7 +974,7 @@ extends 
 
 masterObserverGetter
-private CoprocessorHost.ObserverGetter 
masterObserverGetter
+private CoprocessorHost.ObserverGetter 
masterObserverGetter
 
 
 
@@ -991,7 +991,7 @@ extends 
 
 MasterCoprocessorHost
-public MasterCoprocessorHost(MasterServices services,
+public MasterCoprocessorHost(MasterServices services,
  
org.apache.hadoop.conf.Configuration conf)
 
 
@@ -1009,7 +1009,7 @@ extends 
 
 createEnvironment
-public MasterCoprocessorHost.MasterEnvironment createEnvironment(MasterCoprocessor instance,
+public MasterCoprocessorHost.MasterEnvironment createEnvironment(MasterCoprocessor instance,
  
int priority,
  int seq,
  
org.apache.hadoop.conf.Configuration conf)
@@ -1027,7 +1027,7 @@ extends 
 
 checkAndGetInstance
-public MasterCoprocessor checkAndGetInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass)
+public MasterCoprocessor checkAndGetInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass)
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/InstantiationException.html?is-external=true";
 title="class or interface in java.lang">InstantiationException,
  http://docs.oracle.com/javase/8/docs/api/java/lang/IllegalAccessException.html?is-external=true";
 title="class or interface in java.lang">IllegalAccessException
 Description copied from 
class: CoprocessorHost
@@ -1051,7 +1051,7 @@ extends 
 
 preCreateNamespace
-public void preCreateNamespace(NamespaceDescriptor ns)
+public void preCreateNamespace(NamespaceDescriptor ns)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1065,7 +1065,7 @@ extends 
 
 postCreateNamespace
-public void postCreateNamespace(NamespaceDescriptor ns)
+public void postCreateNamespace(NamespaceDescriptor ns)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1079,7 +1079,7 @@ extends 
 
 preDeleteNamespace
-public void preDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespaceName)
+public void preDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespaceName)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 

[25/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Delet

[11/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
index b8e321a..439a50d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
@@ -468,274 +468,216 @@
 460  }
 461
 462  /**
-463   * Used to gracefully handle fallback 
to deprecated methods when we
-464   * evolve coprocessor APIs.
-465   *
-466   * When a particular Coprocessor API is 
updated to change methods, hosts can support fallback
-467   * to the deprecated API by using this 
method to determine if an instance implements the new API.
-468   * In the event that said support is 
partial, then in the face of a runtime issue that prevents
-469   * proper operation {@link 
#legacyWarning(Class, String)} should be used to let operators know.
-470   *
-471   * For examples of this in action, see 
the implementation of
-472   * 
    -473 *
  • {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} -474 *
  • {@link org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost} -475 *
-476 * -477 * @param clazz Coprocessor you wish to evaluate -478 * @param methodName the name of the non-deprecated method version -479 * @param parameterTypes the Class of the non-deprecated method's arguments in the order they are -480 * declared. -481 */ -482 @InterfaceAudience.Private -483 protected static boolean useLegacyMethod(final Class clazz, -484 final String methodName, final Class... parameterTypes) { -485boolean useLegacy; -486// Use reflection to see if they implement the non-deprecated version -487try { -488 clazz.getDeclaredMethod(methodName, parameterTypes); -489 LOG.debug("Found an implementation of '" + methodName + "' that uses updated method " + -490 "signature. Skipping legacy support for invocations in '" + clazz +"'."); -491 useLegacy = false; -492} catch (NoSuchMethodException exception) { -493 useLegacy = true; -494} catch (SecurityException exception) { -495 LOG.warn("The Security Manager denied our attempt to detect if the coprocessor '" + clazz + -496 "' requires legacy support; assuming it does. If you get later errors about legacy " + -497 "coprocessor use, consider updating your security policy to allow access to the package" + -498 " and declared members of your implementation."); -499 LOG.debug("Details of Security Manager rejection.", exception); -500 useLegacy = true; +463 * Used to limit legacy handling to once per Coprocessor class per classloader. +464 */ +465 private static final Set> legacyWarning = +466 new ConcurrentSkipListSet<>( +467 new Comparator>() { +468@Override +469public int compare(Class c1, Class c2) { +470 if (c1.equals(c2)) { +471return 0; +472 } +473 return c1.getName().compareTo(c2.getName()); +474} +475 }); +476 +477 /** +478 * Implementations defined function to get an observer of type {@code O} from a coprocessor of +479 * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each +480 * observer they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for +481 * each of RegionObserver, EndpointObserver and BulkLoadObserver. +482 * These getters are used by {@code ObserverOperation} to get appropriate observer from the +483 * coprocessor. +484 */ +485 @FunctionalInterface +486 public interface ObserverGetter extends Function> {} +487 +488 private abstract class ObserverOperation extends ObserverContextImpl { +489ObserverGetter observerGetter; +490 +491 ObserverOperation(ObserverGetter observerGetter) { +492 this(observerGetter, null); +493} +494 +495 ObserverOperation(ObserverGetter observerGetter, User user) { +496 this(observerGetter, user, false); +497} +498 +499 ObserverOperation(ObserverGetter observerGetter, boolean bypassable) { +500 this(observerGetter, null, bypassable); 501} -502return useLegacy; -503 } -504 -505 /** -506 * Used to limit legacy handling to once per Coprocessor class per classloader. -507 */ -508 private static final Set> legacyWarning = -509

[26/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceReq

[23/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColu

[07/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.HFileBlockTranscoder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.HFileBlockTranscoder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.HFileBlockTranscoder.html
index 8373f27..80df615 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.HFileBlockTranscoder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.HFileBlockTranscoder.html
@@ -6,294 +6,293 @@
 
 
 
-001
-002/**
-003 * Copyright The Apache Software 
Foundation
-004 *
-005 * Licensed to the Apache Software 
Foundation (ASF) under one or more
-006 * contributor license agreements. See 
the NOTICE file distributed with this
-007 * work for additional information 
regarding copyright ownership. The ASF
-008 * licenses this file to you under the 
Apache License, Version 2.0 (the
-009 * "License"); you may not use this file 
except in compliance with the License.
-010 * You may obtain a copy of the License 
at
-011 *
-012 * 
http://www.apache.org/licenses/LICENSE-2.0
-013 *
-014 * Unless required by applicable law or 
agreed to in writing, software
-015 * distributed under the License is 
distributed on an "AS IS" BASIS, WITHOUT
-016 * WARRANTIES OR CONDITIONS OF ANY KIND, 
either express or implied. See the
-017 * License for the specific language 
governing permissions and limitations
-018 * under the License.
-019 */
-020
-021package 
org.apache.hadoop.hbase.io.hfile;
-022
-023import java.io.IOException;
-024import java.net.InetSocketAddress;
-025import java.nio.ByteBuffer;
-026import java.util.ArrayList;
-027import java.util.Iterator;
-028import java.util.List;
-029import 
java.util.NoSuchElementException;
-030import 
java.util.concurrent.ExecutionException;
-031
-032import 
org.apache.hadoop.conf.Configuration;
-033import 
org.apache.hadoop.hbase.HConstants;
-034import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-035import 
org.apache.hadoop.hbase.nio.ByteBuff;
-036import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-037import 
org.apache.hadoop.hbase.trace.TraceUtil;
-038import 
org.apache.hadoop.hbase.util.Addressing;
-039import 
org.apache.htrace.core.TraceScope;
-040import 
org.apache.yetus.audience.InterfaceAudience;
-041import org.slf4j.Logger;
-042import org.slf4j.LoggerFactory;
-043
-044import net.spy.memcached.CachedData;
-045import 
net.spy.memcached.ConnectionFactoryBuilder;
-046import net.spy.memcached.FailureMode;
-047import 
net.spy.memcached.MemcachedClient;
-048import 
net.spy.memcached.transcoders.Transcoder;
-049
-050/**
-051 * Class to store blocks into 
memcached.
-052 * This should only be used on a cluster 
of Memcached daemons that are tuned well and have a
-053 * good network connection to the HBase 
regionservers. Any other use will likely slow down HBase
-054 * greatly.
-055 */
-056@InterfaceAudience.Private
-057public class MemcachedBlockCache 
implements BlockCache {
-058  private static final Logger LOG = 
LoggerFactory.getLogger(MemcachedBlockCache.class.getName());
-059
-060  // Some memcache versions won't take 
more than 1024 * 1024. So set the limit below
-061  // that just in case this client is 
used with those versions.
-062  public static final int MAX_SIZE = 1020 
* 1024;
-063
-064  // Config key for what memcached 
servers to use.
-065  // They should be specified in a comma 
sperated list with ports.
-066  // like:
-067  //
-068  // host1:11211,host3:8080,host4:11211
-069  public static final String 
MEMCACHED_CONFIG_KEY = "hbase.cache.memcached.servers";
-070  public static final String 
MEMCACHED_TIMEOUT_KEY = "hbase.cache.memcached.timeout";
-071  public static final String 
MEMCACHED_OPTIMEOUT_KEY = "hbase.cache.memcached.optimeout";
-072  public static final String 
MEMCACHED_OPTIMIZE_KEY = "hbase.cache.memcached.spy.optimze";
-073  public static final long 
MEMCACHED_DEFAULT_TIMEOUT = 500;
-074  public static final boolean 
MEMCACHED_OPTIMIZE_DEFAULT = false;
-075
-076  private final MemcachedClient client;
-077  private final HFileBlockTranscoder tc = 
new HFileBlockTranscoder();
-078  private final CacheStats cacheStats = 
new CacheStats("MemcachedBlockCache");
-079
-080  public 
MemcachedBlockCache(Configuration c) throws IOException {
-081LOG.info("Creating 
MemcachedBlockCache");
-082
-083long opTimeout = 
c.getLong(MEMCACHED_OPTIMEOUT_KEY, MEMCACHED_DEFAULT_TIMEOUT);
-084long queueTimeout = 
c.getLong(MEMCACHED_TIMEOUT_KEY, opTimeout + MEMCACHED_DEFAULT_TIMEOUT);
-085boolean optimize = 
c.getBoolean(MEMCACHED_OPTIMIZE_KEY, MEMCACHED_OPTIMIZE_DEFAULT);
-086
-087ConnectionFactoryBuilder builder = 
new ConnectionFactoryBuilder()
-088.setOpTimeout(opTimeout)
-089
.setOpQueueMaxBlockTime(queueTimeout) //

hbase-site git commit: INFRA-10751 Empty commit

2018-01-03 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site bb3985727 -> 1f4f0eec6


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/1f4f0eec
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/1f4f0eec
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/1f4f0eec

Branch: refs/heads/asf-site
Commit: 1f4f0eec617c0ff500fbff635463b26e743f4dcb
Parents: bb39857
Author: jenkins 
Authored: Wed Jan 3 15:19:53 2018 +
Committer: jenkins 
Committed: Wed Jan 3 15:19:53 2018 +

--

--




[04/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
index 1318b95..841130a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
@@ -55,1647 +55,1615 @@
 047import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 048import 
org.apache.hadoop.hbase.coprocessor.MasterObserver;
 049import 
org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
-050import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-051import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-052import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-053import 
org.apache.hadoop.hbase.metrics.MetricRegistry;
-054import 
org.apache.hadoop.hbase.net.Address;
-055import 
org.apache.hadoop.hbase.procedure2.LockType;
-056import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-057import 
org.apache.hadoop.hbase.procedure2.Procedure;
-058import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-059import 
org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
-060import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-061import 
org.apache.hadoop.hbase.security.User;
-062import 
org.apache.yetus.audience.InterfaceAudience;
-063import org.slf4j.Logger;
-064import org.slf4j.LoggerFactory;
-065
-066/**
-067 * Provides the coprocessor framework and 
environment for master oriented
-068 * operations.  {@link HMaster} interacts 
with the loaded coprocessors
-069 * through this class.
-070 */
-071@InterfaceAudience.Private
-072public class MasterCoprocessorHost
-073extends 
CoprocessorHost {
-074
-075  private static final Logger LOG = 
LoggerFactory.getLogger(MasterCoprocessorHost.class);
-076
-077  /**
-078   * Coprocessor environment extension 
providing access to master related
-079   * services.
-080   */
-081  private static class MasterEnvironment 
extends BaseEnvironment
-082  implements 
MasterCoprocessorEnvironment {
-083private final boolean 
supportGroupCPs;
-084private final MetricRegistry 
metricRegistry;
-085private final MasterServices 
services;
-086
-087public MasterEnvironment(final 
MasterCoprocessor impl, final int priority, final int seq,
-088final Configuration conf, final 
MasterServices services) {
-089  super(impl, priority, seq, conf);
-090  this.services = services;
-091  supportGroupCPs = 
!useLegacyMethod(impl.getClass(),
-092  "preBalanceRSGroup", 
ObserverContext.class, String.class);
-093  this.metricRegistry =
-094  
MetricsCoprocessor.createRegistryForMasterCoprocessor(impl.getClass().getName());
-095}
-096
-097@Override
-098public ServerName getServerName() {
-099  return 
this.services.getServerName();
-100}
-101
-102@Override
-103public Connection getConnection() {
-104  return new 
SharedConnection(this.services.getConnection());
-105}
-106
-107@Override
-108public Connection 
createConnection(Configuration conf) throws IOException {
-109  return 
this.services.createConnection(conf);
-110}
-111
-112@Override
-113public MetricRegistry 
getMetricRegistryForMaster() {
-114  return metricRegistry;
-115}
-116
-117@Override
-118public void shutdown() {
-119  super.shutdown();
-120  
MetricsCoprocessor.removeRegistry(this.metricRegistry);
-121}
-122  }
-123
-124  /**
-125   * Special version of MasterEnvironment 
that exposes MasterServices for Core Coprocessors only.
-126   * Temporary hack until Core 
Coprocessors are integrated into Core.
-127   */
-128  private static class 
MasterEnvironmentForCoreCoprocessors extends MasterEnvironment
-129  implements HasMasterServices {
-130private final MasterServices 
masterServices;
-131
-132public 
MasterEnvironmentForCoreCoprocessors(final MasterCoprocessor impl, final int 
priority,
-133final int seq, final 
Configuration conf, final MasterServices services) {
-134  super(impl, priority, seq, conf, 
services);
-135  this.masterServices = services;
-136}
-137
-138/**
-139 * @return An instance of 
MasterServices, an object NOT for general user-space Coprocessor
-140 * consumption.
-141 */
-142public MasterServices 
getMasterServices() {
-143  return this.masterServices;
-144}
-145  }
-146
-147  private Master

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceReq

[21/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134import 
org.ap

[51/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/bb398572
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/bb398572
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/bb398572

Branch: refs/heads/asf-site
Commit: bb3985727c0fb8a1a3ac0932d5d77edad012ae45
Parents: 4e7a246
Author: jenkins 
Authored: Wed Jan 3 15:19:15 2018 +
Committer: jenkins 
Committed: Wed Jan 3 15:19:15 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 6 +-
 .../apache/hadoop/hbase/quotas/QuotaFilter.html |10 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 34240 -
 checkstyle.rss  |   108 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |13 +-
 devapidocs/deprecated-list.html | 4 +-
 devapidocs/index-all.html   |70 +-
 .../apache/hadoop/hbase/KeyValueTestUtil.html   |30 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../hadoop/hbase/class-use/Coprocessor.html |25 -
 ...dmin.AddColumnFamilyProcedureBiConsumer.html | 6 +-
 .../client/RawAsyncHBaseAdmin.AdminRpcCall.html | 4 +-
 .../client/RawAsyncHBaseAdmin.Converter.html| 4 +-
 ...dmin.CreateNamespaceProcedureBiConsumer.html | 6 +-
 ...aseAdmin.CreateTableProcedureBiConsumer.html | 6 +-
 ...n.DeleteColumnFamilyProcedureBiConsumer.html | 6 +-
 ...dmin.DeleteNamespaceProcedureBiConsumer.html | 6 +-
 ...aseAdmin.DeleteTableProcedureBiConsumer.html | 8 +-
 ...seAdmin.DisableTableProcedureBiConsumer.html | 6 +-
 ...aseAdmin.EnableTableProcedureBiConsumer.html | 6 +-
 .../RawAsyncHBaseAdmin.MasterRpcCall.html   | 4 +-
 ...min.MergeTableRegionProcedureBiConsumer.html | 6 +-
 ...n.ModifyColumnFamilyProcedureBiConsumer.html | 6 +-
 ...dmin.ModifyNamespaceProcedureBiConsumer.html | 6 +-
 ...aseAdmin.ModifyTableProcedureBiConsumer.html | 6 +-
 ...HBaseAdmin.NamespaceProcedureBiConsumer.html |14 +-
 .../RawAsyncHBaseAdmin.ProcedureBiConsumer.html |10 +-
 ...min.SplitTableRegionProcedureBiConsumer.html | 6 +-
 .../RawAsyncHBaseAdmin.TableOperator.html   | 4 +-
 ...syncHBaseAdmin.TableProcedureBiConsumer.html |14 +-
 ...eAdmin.TruncateTableProcedureBiConsumer.html | 6 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.html |   378 +-
 .../hadoop/hbase/client/package-tree.html   |22 +-
 .../replication/ReplicationPeerConfigUtil.html  |44 +-
 .../CoprocessorHost.ObserverGetter.html | 2 +-
 .../CoprocessorHost.ObserverOperation.html  |16 +-
 ...ocessorHost.ObserverOperationWithResult.html |18 +-
 ...ssorHost.ObserverOperationWithoutResult.html |12 +-
 .../hbase/coprocessor/CoprocessorHost.html  |81 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 ...emcachedBlockCache.HFileBlockTranscoder.html |12 +-
 .../hbase/io/hfile/MemcachedBlockCache.html |58 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 4 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 2 +-
 ...MasterCoprocessorHost.MasterEnvironment.html |31 +-
 ...st.MasterEnvironmentForCoreCoprocessors.html | 8 +-
 ...CoprocessorHost.MasterObserverOperation.html |10 +-
 .../hbase/master/MasterCoprocessorHost.html |   294 +-
 .../master/MetricsAssignmentManagerSource.html  |30 +-
 .../hbase/master/MetricsMasterSource.html   |28 +-
 .../hbase/master/balancer/package-tree.html | 2 +-
 .../hadoop/hbase/master/package-tree.html   | 6 +-
 .../org/apache/hadoop/hbase/package-tree.html   |16 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 4 +-
 .../hadoop/hbase/quotas/package-tree.html   | 8 +-
 .../regionserver/HStore.StoreFlusherImpl.html   |34 +-
 .../hadoop/hbase/regionserver/HStore.html   |   382 +-
 .../MetricsHeapMemoryManagerSource.html | 4 +-
 .../regionserver/MetricsRegionServerSource.html | 2 +-
 .../MetricsRegionServerSourceImpl.html  | 2 +
 .../regionserver/MetricsRegionWrapper.html  |32 +-
 .../regionserver/MetricsRegionWrapperImpl.html  |26 +-
 .../regionserver/RegionCoprocessorHost.html | 2 +-
 .../RegionServerCoprocessorHost.html| 2 +-
 .../hadoop/hbase/reg

[27/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134impo

[41/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.html
index 1088938..a9b8e6c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.html
@@ -692,7 +692,7 @@ extends 
 
 ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME
-static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME
+static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME
 
 See Also:
 Constant
 Field Values
@@ -705,7 +705,7 @@ extends 
 
 ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC
-static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC
+static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC
 
 See Also:
 Constant
 Field Values

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
index 3d7ee46..6c7ef76 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
@@ -4808,7 +4808,7 @@ extends Update the PutBatch time histogram if a batch contains a 
Put op
 
 Parameters:
-t - 
+t - time it took
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
index 23a1532..ec53127 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
@@ -1615,6 +1615,8 @@ implements Specified by:
 updatePutBatch in
 interface MetricsRegionServerSource
+Parameters:
+t - time it took
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
index 5bf1e61..fe902d5 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
@@ -142,11 +142,15 @@ var activeTableTab = "activeTableTab";
 
 
 long
-getMaxCompactionQueueSize() 
+getMaxCompactionQueueSize()
+Note that this metric is updated periodically and hence 
might miss some data points.
+
 
 
 long
-getMaxFlushQueueSize() 
+getMaxFlushQueueSize()
+Note that this metric is updated periodically and hence 
might miss some data points.
+
 
 
 long
@@ -490,8 +494,8 @@ var activeTableTab = "activeTableTab";
 long getNumCompactionsQueued()
 
 Returns:
-the total number of compactions that are currently queued(or being 
executed) at point in
-  time
+the total number of compactions that are currently queued(or being 
executed) at point
+ in time
 
 
 
@@ -504,8 +508,8 @@ var activeTableTab = "activeTableTab";
 long getNumFlushesQueued()
 
 Returns:
-the total number of flushes currently queued(being executed) for this 
region at point in
-  time
+the total number of flushes currently queued(being executed) for this 
region at point
+ in time
 
 
 
@@ -515,11 +519,11 @@ var activeTableTab = "activeTableTab";
 
 
 getMaxCompactionQueueSize
-long getMaxCompactionQueueSize()
+long getMaxCompactionQueueSize()
+Note that this metric is updated periodically and hence 
might miss some data points.
 
 Returns:
-the max number of compactions queued for this region
- Note that this metric is updated periodically and hence might miss some d

[30/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColu

[34/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-135import 
or

[42/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 582d67f..428b0d8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class HStore
+public class HStore
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver
 A Store holds a column family in a Region.  Its a memstore 
and a set of zero
@@ -1226,7 +1226,7 @@ implements 
 
 MEMSTORE_CLASS_NAME
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String MEMSTORE_CLASS_NAME
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String MEMSTORE_CLASS_NAME
 
 See Also:
 Constant
 Field Values
@@ -1239,7 +1239,7 @@ implements 
 
 COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
 
 See Also:
 Constant
 Field Values
@@ -1252,7 +1252,7 @@ implements 
 
 BLOCKING_STOREFILES_KEY
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY
 
 See Also:
 Constant
 Field Values
@@ -1265,7 +1265,7 @@ implements 
 
 BLOCK_STORAGE_POLICY_KEY
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY
 
 See Also:
 Constant
 Field Values
@@ -1278,7 +1278,7 @@ implements 
 
 DEFAULT_BLOCK_STORAGE_POLICY
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String DEFAULT_BLOCK_STORAGE_POLICY
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String DEFAULT_BLOCK_STORAGE_POLICY
 
 See Also:
 Constant
 Field Values
@@ -1291,7 +1291,7 @@ implements 
 
 DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER
-public static final int DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER
+public static final int DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER
 
 See Also:
 Constant
 Field Values
@@ -1304,7 +1304,7 @@ implements 
 
 DEFAULT_BLOCKING_STOREFILE_COUNT
-public static final int DEFAULT_BLOCKING_STOREFILE_COUNT
+public static final int DEFAULT_BLOCKING_STOREFILE_COUNT
 
 See Also:
 Constant
 Field Values
@@ -1317,7 +1317,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -1326,7 +1326,7 @@ implements 
 
 memstore
-protected final MemStore memstore
+protected final MemStore memstore
 
 
 
@@ -1335,7 +1335,7 @@ implements 
 
 region
-protected final HRegion region
+protected final HRegion region
 
 
 
@@ -1344,7 +1344,7 @@ implements 
 
 family
-private final ColumnFamilyDescriptor 
family
+private final ColumnFamilyDescriptor 
family
 
 
 
@@ -1353,7 +1353,7 @@ implements 
 
 fs
-private final HRegionFileSystem fs
+private final HRegionFileSystem fs
 
 
 
@@ -1362,7 +1362,7 @@ implements 
 
 conf
-protected org.apache.hadoop.conf.Configuration conf
+protected org.apache.hadoop.conf.Configuration conf
 
 
 
@@ -1371,7 +1371,7 @@ implements 
 
 cacheConf
-protected CacheConfig cacheConf
+protected CacheConfig cacheConf
 
 
 
@@ -1380,7 +1380,7 @@ implements 
 
 lastCompactSize
-private long lastCompactSize
+private long lastCompactSize
 
 
 
@@ -1389,7 +1389,7 @@ implements 
 
 forceMajor
-volatile boolean forceMajor
+volatile boolean forceMajor
 
 
 
@@ -1398,7 +1398,7 @@ implements 
 
 closeCheckInterval
-static int closeCheckInterval
+static int closeCheckInterval
 
 
 
@@ -1407,7 +1407,7 @@ implements 
 
 storeSiz

[08/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
index b8e321a..439a50d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html
@@ -468,274 +468,216 @@
 460  }
 461
 462  /**
-463   * Used to gracefully handle fallback 
to deprecated methods when we
-464   * evolve coprocessor APIs.
-465   *
-466   * When a particular Coprocessor API is 
updated to change methods, hosts can support fallback
-467   * to the deprecated API by using this 
method to determine if an instance implements the new API.
-468   * In the event that said support is 
partial, then in the face of a runtime issue that prevents
-469   * proper operation {@link 
#legacyWarning(Class, String)} should be used to let operators know.
-470   *
-471   * For examples of this in action, see 
the implementation of
-472   * 
    -473 *
  • {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} -474 *
  • {@link org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost} -475 *
-476 * -477 * @param clazz Coprocessor you wish to evaluate -478 * @param methodName the name of the non-deprecated method version -479 * @param parameterTypes the Class of the non-deprecated method's arguments in the order they are -480 * declared. -481 */ -482 @InterfaceAudience.Private -483 protected static boolean useLegacyMethod(final Class clazz, -484 final String methodName, final Class... parameterTypes) { -485boolean useLegacy; -486// Use reflection to see if they implement the non-deprecated version -487try { -488 clazz.getDeclaredMethod(methodName, parameterTypes); -489 LOG.debug("Found an implementation of '" + methodName + "' that uses updated method " + -490 "signature. Skipping legacy support for invocations in '" + clazz +"'."); -491 useLegacy = false; -492} catch (NoSuchMethodException exception) { -493 useLegacy = true; -494} catch (SecurityException exception) { -495 LOG.warn("The Security Manager denied our attempt to detect if the coprocessor '" + clazz + -496 "' requires legacy support; assuming it does. If you get later errors about legacy " + -497 "coprocessor use, consider updating your security policy to allow access to the package" + -498 " and declared members of your implementation."); -499 LOG.debug("Details of Security Manager rejection.", exception); -500 useLegacy = true; +463 * Used to limit legacy handling to once per Coprocessor class per classloader. +464 */ +465 private static final Set> legacyWarning = +466 new ConcurrentSkipListSet<>( +467 new Comparator>() { +468@Override +469public int compare(Class c1, Class c2) { +470 if (c1.equals(c2)) { +471return 0; +472 } +473 return c1.getName().compareTo(c2.getName()); +474} +475 }); +476 +477 /** +478 * Implementations defined function to get an observer of type {@code O} from a coprocessor of +479 * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each +480 * observer they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for +481 * each of RegionObserver, EndpointObserver and BulkLoadObserver. +482 * These getters are used by {@code ObserverOperation} to get appropriate observer from the +483 * coprocessor. +484 */ +485 @FunctionalInterface +486 public interface ObserverGetter extends Function> {} +487 +488 private abstract class ObserverOperation extends ObserverContextImpl { +489ObserverGetter observerGetter; +490 +491 ObserverOperation(ObserverGetter observerGetter) { +492 this(observerGetter, null); +493} +494 +495 ObserverOperation(ObserverGetter observerGetter, User user) { +496 this(observerGetter, user, false); +497} +498 +499 ObserverOperation(ObserverGetter observerGetter, boolean bypassable) { +500 this(observerGetter, null, bypassable); 501} -502return useLegacy; -503 } -504 -505 /** -506 * Used to limit legacy handling to once per Coprocessor class per classloader. -507 */ -508 private static final Set> legacyWarning = -509 new ConcurrentSkipListSet<>( -510 new Comparator

[32/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134impo

[38/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
index a81b8f8..16228f3 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
@@ -112,7 +112,7 @@
 
 
 
-private static final class WALSplitter.WriterAndPath
+private static final class WALSplitter.WriterAndPath
 extends WALSplitter.SinkWriter
 Private data structure that wraps a Writer and its Path, 
also collecting statistics about the
  data written to this output.
@@ -215,7 +215,7 @@ extends 
 
 p
-final org.apache.hadoop.fs.Path p
+final org.apache.hadoop.fs.Path p
 
 
 
@@ -224,7 +224,7 @@ extends 
 
 w
-final WALProvider.Writer w
+final WALProvider.Writer w
 
 
 
@@ -233,7 +233,7 @@ extends 
 
 minLogSeqNum
-final long minLogSeqNum
+final long minLogSeqNum
 
 
 
@@ -250,7 +250,7 @@ extends 
 
 WriterAndPath
-WriterAndPath(org.apache.hadoop.fs.Path p,
+WriterAndPath(org.apache.hadoop.fs.Path p,
   WALProvider.Writer w,
   long minLogSeqNum)
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
index 902e1f7..455de23 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class WALSplitter.WriterThread
+public static class WALSplitter.WriterThread
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true";
 title="class or interface in java.lang">Thread
 
 
@@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 shouldStop
-private volatile boolean shouldStop
+private volatile boolean shouldStop
 
 
 
@@ -275,7 +275,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 controller
-private WALSplitter.PipelineController controller
+private WALSplitter.PipelineController controller
 
 
 
@@ -284,7 +284,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 entryBuffers
-private WALSplitter.EntryBuffers entryBuffers
+private WALSplitter.EntryBuffers entryBuffers
 
 
 
@@ -293,7 +293,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 outputSink
-private WALSplitter.OutputSink outputSink
+private WALSplitter.OutputSink outputSink
 
 
 
@@ -310,7 +310,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 WriterThread
-WriterThread(WALSplitter.PipelineController controller,
+WriterThread(WALSplitter.PipelineController controller,
  WALSplitter.EntryBuffers entryBuffers,
  WALSplitter.OutputSink sink,
  int i)
@@ -330,7 +330,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 run
-public void run()
+public void run()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--";
 title="class or interface in java.lang">run in 
interface http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true";
 title="class or interface in java.lang">Runnable
@@ -345,7 +345,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 doRun
-private void doRun()
+private void doRun()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -359,7 +359,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 writeBuffer
-private void writeBuffer(WALSplitter.RegionEntryBuffer buffer)
+private void writeBuffer(WALSplitter.RegionEntryBuffer buffer)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -373,7 +373,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?
 
 
 finish
-void finish()
+void finish()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.html
index dfc0c06..a607640 100644

  1   2   >