hbase git commit: HBASE-19598 Addendum fix typo

2018-03-08 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 04798d674 -> 756cccecf


HBASE-19598 Addendum fix typo


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/756cccec
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/756cccec
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/756cccec

Branch: refs/heads/master
Commit: 756cccecffef3a64ae328022ae47fc1d1087dc6c
Parents: 04798d6
Author: zhangduo 
Authored: Fri Mar 9 15:37:22 2018 +0800
Committer: zhangduo 
Committed: Fri Mar 9 15:37:22 2018 +0800

--
 .../apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/756cccec/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
index 6399e20..cee7a4a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
@@ -83,7 +83,7 @@ public class TestAssignmentManagerMetrics {
 // set tablesOnMaster to none
 conf.set("hbase.balancer.tablesOnMaster", "none");
 
-// set client sync wait timeout to 10sec
+// set client sync wait timeout to 5sec
 conf.setInt("hbase.client.sync.wait.timeout.msec", 5000);
 conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
 conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 2500);



hbase git commit: HBASE-19598 Addendum increase sync wait time

2018-03-08 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 033485dff -> 04798d674


HBASE-19598 Addendum increase sync wait time


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/04798d67
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/04798d67
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/04798d67

Branch: refs/heads/master
Commit: 04798d6747ebc9ec91c82df2d9c084922c61da86
Parents: 033485d
Author: zhangduo 
Authored: Fri Mar 9 15:34:24 2018 +0800
Committer: zhangduo 
Committed: Fri Mar 9 15:34:24 2018 +0800

--
 .../apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/04798d67/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
index adda3a8..6399e20 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
@@ -83,8 +83,8 @@ public class TestAssignmentManagerMetrics {
 // set tablesOnMaster to none
 conf.set("hbase.balancer.tablesOnMaster", "none");
 
-// set client sync wait timeout to 5sec
-conf.setInt("hbase.client.sync.wait.timeout.msec", 2500);
+// set client sync wait timeout to 10sec
+conf.setInt("hbase.client.sync.wait.timeout.msec", 5000);
 conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
 conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 2500);
 // set a small interval for updating rit metrics



hbase git commit: HBASE-19598 Fix TestAssignmentManagerMetrics flaky test

2018-03-08 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master a513678a7 -> 033485dff


HBASE-19598 Fix TestAssignmentManagerMetrics flaky test


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/033485df
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/033485df
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/033485df

Branch: refs/heads/master
Commit: 033485dff3998c72be6c4efd04ccf04355951db6
Parents: a513678
Author: zhangduo 
Authored: Fri Mar 9 10:39:47 2018 +0800
Committer: zhangduo 
Committed: Fri Mar 9 11:47:55 2018 +0800

--
 .../master/TestAssignmentManagerMetrics.java| 34 +++-
 1 file changed, 18 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/033485df/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
index 287fc70..adda3a8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -54,14 +55,13 @@ public class TestAssignmentManagerMetrics {
   HBaseClassTestRule.forClass(TestAssignmentManagerMetrics.class);
 
   private static final Logger LOG = 
LoggerFactory.getLogger(TestAssignmentManagerMetrics.class);
-  private static final MetricsAssertHelper metricsHelper = CompatibilityFactory
+  private static final MetricsAssertHelper METRICS_HELPER = 
CompatibilityFactory
   .getInstance(MetricsAssertHelper.class);
 
-  private static MiniHBaseCluster cluster;
-  private static HMaster master;
+  private static MiniHBaseCluster CLUSTER;
+  private static HMaster MASTER;
   private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static Configuration conf;
-  private static final int msgInterval = 1000;
+  private static final int MSG_INTERVAL = 1000;
 
   @Rule
   public TestName name = new TestName();
@@ -69,7 +69,7 @@ public class TestAssignmentManagerMetrics {
   @BeforeClass
   public static void startCluster() throws Exception {
 LOG.info("Starting cluster");
-conf = TEST_UTIL.getConfiguration();
+Configuration conf = TEST_UTIL.getConfiguration();
 
 // Disable sanity check for coprocessor
 conf.setBoolean("hbase.table.sanity.checks", false);
@@ -78,7 +78,7 @@ public class TestAssignmentManagerMetrics {
 conf.setInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 20);
 
 // set msgInterval to 1 second
-conf.setInt("hbase.regionserver.msginterval", msgInterval);
+conf.setInt("hbase.regionserver.msginterval", MSG_INTERVAL);
 
 // set tablesOnMaster to none
 conf.set("hbase.balancer.tablesOnMaster", "none");
@@ -87,10 +87,12 @@ public class TestAssignmentManagerMetrics {
 conf.setInt("hbase.client.sync.wait.timeout.msec", 2500);
 conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
 conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 2500);
+// set a small interval for updating rit metrics
+conf.setInt(AssignmentManager.RIT_CHORE_INTERVAL_MSEC_CONF_KEY, 
MSG_INTERVAL);
 
 TEST_UTIL.startMiniCluster(1);
-cluster = TEST_UTIL.getHBaseCluster();
-master = cluster.getMaster();
+CLUSTER = TEST_UTIL.getHBaseCluster();
+MASTER = CLUSTER.getMaster();
   }
 
   @AfterClass
@@ -112,14 +114,14 @@ public class TestAssignmentManagerMetrics {
   table.put(put);
 
   // Sleep 3 seconds, wait for doMetrics chore catching up
-  Thread.sleep(msgInterval * 3);
+  Thread.sleep(MSG_INTERVAL * 3);
 
   // check the RIT is 0
   MetricsAssignmentManagerSource amSource =
-  
master.getAssignmentManager().getAssignmentManagerMetrics().getMetricsProcSource();
+  
MASTER.getAssignmentManager().getAssignmentManagerMetrics().getMetricsProcSource();
 
-  metricsHelper.assertGauge(MetricsAssignmentManagerSource.RIT_COUNT_NAME, 
0, amSource);
-  

hbase git commit: HBASE-20160 TestRestartCluster.testRetainAssignmentOnRestart uses the wrong condition to decide whether the assignment is finished

2018-03-08 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 c99e6f743 -> b4e297f3f


HBASE-20160 TestRestartCluster.testRetainAssignmentOnRestart uses the wrong 
condition to decide whether the assignment is finished


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4e297f3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4e297f3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4e297f3

Branch: refs/heads/branch-2
Commit: b4e297f3f689a6468cf4fd9a4712dccad7b75dbf
Parents: c99e6f7
Author: zhangduo 
Authored: Thu Mar 8 18:03:04 2018 +0800
Committer: zhangduo 
Committed: Fri Mar 9 11:08:57 2018 +0800

--
 .../hadoop/hbase/master/TestRestartCluster.java | 32 +---
 1 file changed, 14 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b4e297f3/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
index 56976b3..088dff5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -45,7 +44,7 @@ import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@Category({MasterTests.class, LargeTests.class})
+@Category({ MasterTests.class, LargeTests.class })
 public class TestRestartCluster {
 
   @ClassRule
@@ -60,7 +59,7 @@ public class TestRestartCluster {
   TableName.valueOf("restartTableTwo"),
   TableName.valueOf("restartTableThree")
   };
-  private static final byte [] FAMILY = Bytes.toBytes("family");
+  private static final byte[] FAMILY = Bytes.toBytes("family");
 
   @After public void tearDown() throws Exception {
 UTIL.shutdownMiniCluster();
@@ -115,17 +114,13 @@ public class TestRestartCluster {
   @Test
   public void testRetainAssignmentOnRestart() throws Exception {
 UTIL.startMiniCluster(2);
-while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
-  Threads.sleep(1);
-}
 // Turn off balancer
-UTIL.getMiniHBaseCluster().getMaster().
-  getMasterRpcServices().synchronousBalanceSwitch(false);
+
UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false);
 LOG.info("\n\nCreating tables");
-for(TableName TABLE : TABLES) {
+for (TableName TABLE : TABLES) {
   UTIL.createTable(TABLE, FAMILY);
 }
-for(TableName TABLE : TABLES) {
+for (TableName TABLE : TABLES) {
   UTIL.waitTableEnabled(TABLE);
 }
 
@@ -157,6 +152,7 @@ public class TestRestartCluster {
 }
 
 LOG.info("\n\nShutting down HBase cluster");
+cluster.stopMaster(0);
 cluster.shutdown();
 cluster.waitUntilShutDown();
 
@@ -194,11 +190,8 @@ public class TestRestartCluster {
 }
 
 // Wait till master is initialized and all regions are assigned
-RegionStates regionStates = 
master.getAssignmentManager().getRegionStates();
-int expectedRegions = regionToRegionServerMap.size() + 1;
-while (!master.isInitialized()
-|| regionStates.getRegionAssignments().size() != expectedRegions) {
-  Threads.sleep(100);
+for (TableName TABLE : TABLES) {
+  UTIL.waitTableAvailable(TABLE);
 }
 
 snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getConnection());
@@ -206,11 +199,14 @@ public class TestRestartCluster {
 Map newRegionToRegionServerMap =
   snapshot.getRegionToRegionServerMap();
 assertEquals(regionToRegionServerMap.size(), 
newRegionToRegionServerMap.size());
-for (Map.Entry entry: 
newRegionToRegionServerMap.entrySet()) {
-  if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable())) 
continue;
+for (Map.Entry entry : 
newRegionToRegionServerMap.entrySet()) {
+  if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable())) {
+continue;
+  }
   ServerName oldServer = regionToRegionServerMap.get(entry.getKey());
   ServerName 

hbase git commit: HBASE-20160 TestRestartCluster.testRetainAssignmentOnRestart uses the wrong condition to decide whether the assignment is finished

2018-03-08 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 28ebac556 -> 58d840199


HBASE-20160 TestRestartCluster.testRetainAssignmentOnRestart uses the wrong 
condition to decide whether the assignment is finished


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/58d84019
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/58d84019
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/58d84019

Branch: refs/heads/branch-2.0
Commit: 58d8401995df2b81bc1557cef270546cf174da7b
Parents: 28ebac5
Author: zhangduo 
Authored: Thu Mar 8 18:03:04 2018 +0800
Committer: zhangduo 
Committed: Fri Mar 9 11:09:01 2018 +0800

--
 .../hadoop/hbase/master/TestRestartCluster.java | 32 +---
 1 file changed, 14 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/58d84019/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
index 56976b3..088dff5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -45,7 +44,7 @@ import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@Category({MasterTests.class, LargeTests.class})
+@Category({ MasterTests.class, LargeTests.class })
 public class TestRestartCluster {
 
   @ClassRule
@@ -60,7 +59,7 @@ public class TestRestartCluster {
   TableName.valueOf("restartTableTwo"),
   TableName.valueOf("restartTableThree")
   };
-  private static final byte [] FAMILY = Bytes.toBytes("family");
+  private static final byte[] FAMILY = Bytes.toBytes("family");
 
   @After public void tearDown() throws Exception {
 UTIL.shutdownMiniCluster();
@@ -115,17 +114,13 @@ public class TestRestartCluster {
   @Test
   public void testRetainAssignmentOnRestart() throws Exception {
 UTIL.startMiniCluster(2);
-while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
-  Threads.sleep(1);
-}
 // Turn off balancer
-UTIL.getMiniHBaseCluster().getMaster().
-  getMasterRpcServices().synchronousBalanceSwitch(false);
+
UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false);
 LOG.info("\n\nCreating tables");
-for(TableName TABLE : TABLES) {
+for (TableName TABLE : TABLES) {
   UTIL.createTable(TABLE, FAMILY);
 }
-for(TableName TABLE : TABLES) {
+for (TableName TABLE : TABLES) {
   UTIL.waitTableEnabled(TABLE);
 }
 
@@ -157,6 +152,7 @@ public class TestRestartCluster {
 }
 
 LOG.info("\n\nShutting down HBase cluster");
+cluster.stopMaster(0);
 cluster.shutdown();
 cluster.waitUntilShutDown();
 
@@ -194,11 +190,8 @@ public class TestRestartCluster {
 }
 
 // Wait till master is initialized and all regions are assigned
-RegionStates regionStates = 
master.getAssignmentManager().getRegionStates();
-int expectedRegions = regionToRegionServerMap.size() + 1;
-while (!master.isInitialized()
-|| regionStates.getRegionAssignments().size() != expectedRegions) {
-  Threads.sleep(100);
+for (TableName TABLE : TABLES) {
+  UTIL.waitTableAvailable(TABLE);
 }
 
 snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getConnection());
@@ -206,11 +199,14 @@ public class TestRestartCluster {
 Map newRegionToRegionServerMap =
   snapshot.getRegionToRegionServerMap();
 assertEquals(regionToRegionServerMap.size(), 
newRegionToRegionServerMap.size());
-for (Map.Entry entry: 
newRegionToRegionServerMap.entrySet()) {
-  if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable())) 
continue;
+for (Map.Entry entry : 
newRegionToRegionServerMap.entrySet()) {
+  if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable())) {
+continue;
+  }
   ServerName oldServer = regionToRegionServerMap.get(entry.getKey());
   ServerName 

hbase git commit: HBASE-20160 TestRestartCluster.testRetainAssignmentOnRestart uses the wrong condition to decide whether the assignment is finished

2018-03-08 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 4e821d491 -> a513678a7


HBASE-20160 TestRestartCluster.testRetainAssignmentOnRestart uses the wrong 
condition to decide whether the assignment is finished


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a513678a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a513678a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a513678a

Branch: refs/heads/master
Commit: a513678a793d07302e6314f4c678bea9ae5aa133
Parents: 4e821d4
Author: zhangduo 
Authored: Thu Mar 8 18:03:04 2018 +0800
Committer: zhangduo 
Committed: Fri Mar 9 11:08:44 2018 +0800

--
 .../hadoop/hbase/master/TestRestartCluster.java | 32 +---
 1 file changed, 14 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a513678a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
index 56976b3..088dff5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -45,7 +44,7 @@ import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@Category({MasterTests.class, LargeTests.class})
+@Category({ MasterTests.class, LargeTests.class })
 public class TestRestartCluster {
 
   @ClassRule
@@ -60,7 +59,7 @@ public class TestRestartCluster {
   TableName.valueOf("restartTableTwo"),
   TableName.valueOf("restartTableThree")
   };
-  private static final byte [] FAMILY = Bytes.toBytes("family");
+  private static final byte[] FAMILY = Bytes.toBytes("family");
 
   @After public void tearDown() throws Exception {
 UTIL.shutdownMiniCluster();
@@ -115,17 +114,13 @@ public class TestRestartCluster {
   @Test
   public void testRetainAssignmentOnRestart() throws Exception {
 UTIL.startMiniCluster(2);
-while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
-  Threads.sleep(1);
-}
 // Turn off balancer
-UTIL.getMiniHBaseCluster().getMaster().
-  getMasterRpcServices().synchronousBalanceSwitch(false);
+
UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false);
 LOG.info("\n\nCreating tables");
-for(TableName TABLE : TABLES) {
+for (TableName TABLE : TABLES) {
   UTIL.createTable(TABLE, FAMILY);
 }
-for(TableName TABLE : TABLES) {
+for (TableName TABLE : TABLES) {
   UTIL.waitTableEnabled(TABLE);
 }
 
@@ -157,6 +152,7 @@ public class TestRestartCluster {
 }
 
 LOG.info("\n\nShutting down HBase cluster");
+cluster.stopMaster(0);
 cluster.shutdown();
 cluster.waitUntilShutDown();
 
@@ -194,11 +190,8 @@ public class TestRestartCluster {
 }
 
 // Wait till master is initialized and all regions are assigned
-RegionStates regionStates = 
master.getAssignmentManager().getRegionStates();
-int expectedRegions = regionToRegionServerMap.size() + 1;
-while (!master.isInitialized()
-|| regionStates.getRegionAssignments().size() != expectedRegions) {
-  Threads.sleep(100);
+for (TableName TABLE : TABLES) {
+  UTIL.waitTableAvailable(TABLE);
 }
 
 snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getConnection());
@@ -206,11 +199,14 @@ public class TestRestartCluster {
 Map newRegionToRegionServerMap =
   snapshot.getRegionToRegionServerMap();
 assertEquals(regionToRegionServerMap.size(), 
newRegionToRegionServerMap.size());
-for (Map.Entry entry: 
newRegionToRegionServerMap.entrySet()) {
-  if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable())) 
continue;
+for (Map.Entry entry : 
newRegionToRegionServerMap.entrySet()) {
+  if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable())) {
+continue;
+  }
   ServerName oldServer = regionToRegionServerMap.get(entry.getKey());
   ServerName 

hbase git commit: HBASE-17851: WAL to HFile conversion phase MUST detect and handle missing WAL files

2018-03-08 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master a03d09abd -> 4e821d491


HBASE-17851: WAL to HFile conversion phase MUST detect and handle missing WAL 
files

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4e821d49
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4e821d49
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4e821d49

Branch: refs/heads/master
Commit: 4e821d491624dc1db8f4df1154b19c602b76cbfc
Parents: a03d09a
Author: Vladimir Rodionov 
Authored: Thu Mar 8 14:22:28 2018 -0800
Committer: tedyu 
Committed: Thu Mar 8 15:28:27 2018 -0800

--
 .../hbase/mapreduce/TestWALRecordReader.java| 81 +++-
 1 file changed, 80 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4e821d49/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
index e486714..449c4b7 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
@@ -42,6 +42,7 @@ import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALFactory;
@@ -241,6 +242,45 @@ public class TestWALRecordReader {
 testSplit(splits.get(1));
   }
 
+  /**
+   * Test WALRecordReader tolerance to moving WAL from active
+   * to archive directory
+   * @throws Exception exception
+   */
+  @Test
+  public void testWALRecordReaderActiveArchiveTolerance() throws Exception {
+final WALFactory walfactory = new WALFactory(conf, getName());
+WAL log = walfactory.getWAL(info);
+byte [] value = Bytes.toBytes("value");
+WALEdit edit = new WALEdit();
+edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
+System.currentTimeMillis(), value));
+long txid = log.append(info, getWalKeyImpl(System.currentTimeMillis(), 
scopes), edit, true);
+log.sync(txid);
+
+Thread.sleep(10); // make sure 2nd edit gets a later timestamp
+
+edit = new WALEdit();
+edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"),
+System.currentTimeMillis(), value));
+txid = log.append(info, getWalKeyImpl(System.currentTimeMillis(), scopes), 
edit, true);
+log.sync(txid);
+log.shutdown();
+
+// should have 2 log entries now
+WALInputFormat input = new WALInputFormat();
+Configuration jobConf = new Configuration(conf);
+jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString());
+// make sure log is found
+List splits = 
input.getSplits(MapreduceTestingShim.createJobContext(jobConf));
+assertEquals(1, splits.size());
+WALInputFormat.WALSplit split = (WALInputFormat.WALSplit) splits.get(0);
+LOG.debug("log="+logDir+" file="+ split.getLogFileName());
+
+testSplitWithMovingWAL(splits.get(0), Bytes.toBytes("1"), 
Bytes.toBytes("2"));
+
+  }
+
   protected WALKeyImpl getWalKeyImpl(final long time, NavigableMap scopes) {
 return new WALKeyImpl(info.getEncodedNameAsBytes(), tableName, time, mvcc, 
scopes);
   }
@@ -270,4 +310,43 @@ public class TestWALRecordReader {
 assertFalse(reader.nextKeyValue());
 reader.close();
   }
-}
+
+  /**
+   * Create a new reader from the split, match the edits against the passed 
columns,
+   * moving WAL to archive in between readings
+   */
+  private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] 
col2) throws Exception {
+WALRecordReader reader = getReader();
+reader.initialize(split, 
MapReduceTestUtil.createDummyMapTaskAttemptContext(conf));
+
+assertTrue(reader.nextKeyValue());
+Cell cell = reader.getCurrentValue().getCells().get(0);
+if (!Bytes.equals(col1, 0, col1.length, cell.getQualifierArray(), 
cell.getQualifierOffset(),
+  cell.getQualifierLength())) {
+  assertTrue(
+"expected [" + Bytes.toString(col1) + "], actual [" + Bytes.toString(
+  cell.getQualifierArray(), cell.getQualifierOffset(), 
cell.getQualifierLength()) + "]",
+false);
+}
+// Move log file to archive directory
+// While WAL 

[4/8] hbase git commit: HBASE-18467 report nightly results to devs via jira

2018-03-08 Thread busbey
HBASE-18467 report nightly results to devs via jira

- rely on parallel pipeline to ensure all stages always run
- define non-CPS jira commenting function
- comment on jiras in the changeset with summary and links

Signed-off-by: Mike Drob 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/44f11292
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/44f11292
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/44f11292

Branch: refs/heads/branch-2
Commit: 44f11292343a3bd21edf9bdd1b206e4bb5a80732
Parents: 78c02c5
Author: Sean Busbey 
Authored: Wed Aug 9 00:48:46 2017 -0500
Committer: Sean Busbey 
Committed: Thu Mar 8 11:44:31 2018 -0600

--
 dev-support/Jenkinsfile  | 592 ++
 dev-support/hbase_nightly_source-artifact.sh |   1 -
 2 files changed, 367 insertions(+), 226 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/44f11292/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 7224ed3..92c7c9c 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -15,11 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 pipeline {
-  agent {
-node {
-  label 'Hadoop'
-}
-  }
+  agent any
   triggers {
 cron('H */6 * * *')  // Run every 6 hours.
   }
@@ -34,6 +30,12 @@ pipeline {
 // where we check out to across stages
 BASEDIR = "${env.WORKSPACE}/component"
 YETUS_RELEASE = '0.7.0'
+// where we'll write everything from different steps. Need a copy here so 
the final step can check for success/failure.
+OUTPUT_DIR_RELATIVE_GENERAL = 'output-general'
+OUTPUT_DIR_RELATIVE_JDK7 = 'output-jdk7'
+OUTPUT_DIR_RELATIVE_HADOOP2 = 'output-jdk8-hadoop2'
+OUTPUT_DIR_RELATIVE_HADOOP3 = 'output-jdk8-hadoop3'
+
 PROJECT = 'hbase'
 PROJECT_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
 // This section of the docs tells folks not to use the javadoc tag. older 
branches have our old version of the check for said tag.
@@ -62,6 +64,7 @@ pipeline {
 dir('component') {
   checkout scm
 }
+stash name: 'component', includes: "component/*,component/**/*"
   }
 }
 stage ('yetus install') {
@@ -111,252 +114,391 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 stash name: 'yetus', includes: 
"yetus-*/*,yetus-*/**/*,tools/personality.sh"
   }
 }
-stage ('yetus general check') {
-  environment {
-// TODO does hadoopcheck need to be jdk specific?
-// Should be things that work with multijdk
-TESTS = 'all,-unit,-findbugs'
-// on branches that don't support jdk7, this will already be 
JAVA_HOME, so we'll end up not
-// doing multijdk there.
-MULTIJDK = '/usr/lib/jvm/java-8-openjdk-amd64'
-OUTPUT_DIR_RELATIVE = "output-general"
-OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE}"
-  }
-  steps {
-unstash 'yetus'
-sh '''#!/usr/bin/env bash
-  rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
-  rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
-  "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
+stage ('health checks') {
+  parallel {
+stage ('yetus general check') {
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
+  environment {
+// TODO does hadoopcheck need to be jdk specific?
+// Should be things that work with multijdk
+TESTS = 'all,-unit,-findbugs'
+// on branches that don't support jdk7, this will already be 
JAVA_HOME, so we'll end up not
+// doing multijdk there.
+MULTIJDK = '/usr/lib/jvm/java-8-openjdk-amd64'
+OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_GENERAL}"
+OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_GENERAL}"
+  }
+  steps {
+unstash 'yetus'
+unstash 'component'
+sh '''#!/usr/bin/env bash
+  rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+  rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
+  "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
 '''
-// TODO should this be a download from master, similar to how the 
personality is?
-sh "${env.BASEDIR}/dev-support/hbase_nightly_yetus.sh"
-  }
-  post {
-always {
-  // Has to 

[6/8] hbase git commit: HBASE-20075 remove logic for branch-1.1 nightly testing

2018-03-08 Thread busbey
HBASE-20075 remove logic for branch-1.1 nightly testing

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dc845a7d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dc845a7d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dc845a7d

Branch: refs/heads/branch-2.0
Commit: dc845a7df97a96cbc6feb694dac319c1b04c5761
Parents: 39c8acf
Author: Sean Busbey 
Authored: Sat Feb 24 15:50:12 2018 -0600
Committer: Sean Busbey 
Committed: Thu Mar 8 11:46:12 2018 -0600

--
 dev-support/Jenkinsfile | 9 -
 1 file changed, 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dc845a7d/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 601eadb..9c0ff0a 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -195,10 +195,6 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
 '''
 sh '''#!/usr/bin/env bash
-  # for branch-1.1 we don't do jdk8 findbugs, so do it here
-  if [ "${BRANCH_NAME}" == "branch-1.1" ]; then
-TESTS+=",findbugs"
-  fi
   rm -rf "${OUTPUT_DIR}/commentfile}"
   declare -i status=0
   if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
@@ -249,11 +245,6 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   label 'Hadoop'
 }
   }
-  when {
-not {
-  branch 'branch-1.1*'
-}
-  }
   environment {
 TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
 OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP2}"



[3/8] hbase git commit: HBASE-20075 remove logic for branch-1.1 nightly testing

2018-03-08 Thread busbey
HBASE-20075 remove logic for branch-1.1 nightly testing

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4c4af09
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4c4af09
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4c4af09

Branch: refs/heads/branch-2
Commit: d4c4af09a1ee8601e4e1390f108618f02db2d925
Parents: 2764261
Author: Sean Busbey 
Authored: Sat Feb 24 15:50:12 2018 -0600
Committer: Sean Busbey 
Committed: Thu Mar 8 11:44:31 2018 -0600

--
 dev-support/Jenkinsfile | 9 -
 1 file changed, 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4c4af09/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 601eadb..9c0ff0a 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -195,10 +195,6 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
 '''
 sh '''#!/usr/bin/env bash
-  # for branch-1.1 we don't do jdk8 findbugs, so do it here
-  if [ "${BRANCH_NAME}" == "branch-1.1" ]; then
-TESTS+=",findbugs"
-  fi
   rm -rf "${OUTPUT_DIR}/commentfile}"
   declare -i status=0
   if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
@@ -249,11 +245,6 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   label 'Hadoop'
 }
   }
-  when {
-not {
-  branch 'branch-1.1*'
-}
-  }
   environment {
 TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
 OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP2}"



[1/8] hbase git commit: HBASE-18467 addendum parallel steps must account for SCM and marshalling results

2018-03-08 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-2 78c02c508 -> c99e6f743
  refs/heads/branch-2.0 b325ae559 -> 28ebac556


HBASE-18467 addendum parallel steps must account for SCM and marshalling results

* do a scm checkout on the stages that need access to source.
* ensure our install job runs on the ubuntu label
* copy jira comments to main workspace
* simplify the jira comment

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c99e6f74
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c99e6f74
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c99e6f74

Branch: refs/heads/branch-2
Commit: c99e6f7439ece57e94735f9614cf85a3ab228cbe
Parents: d4c4af0
Author: Sean Busbey 
Authored: Thu Mar 1 16:34:08 2018 -0600
Committer: Sean Busbey 
Committed: Thu Mar 8 11:44:31 2018 -0600

--
 dev-support/Jenkinsfile | 102 +++
 1 file changed, 65 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c99e6f74/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 9c0ff0a..5453fd5 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -15,7 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 pipeline {
-  agent any
+  agent {
+node {
+  label 'ubuntu'
+}
+  }
   triggers {
 cron('H */6 * * *')  // Run every 6 hours.
   }
@@ -59,14 +63,6 @@ pipeline {
 booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a 
lot more meta-information.')
   }
   stages {
-stage ('scm checkout') {
-  steps {
-dir('component') {
-  checkout scm
-}
-stash name: 'component', includes: "component/*,component/**/*"
-  }
-}
 stage ('yetus install') {
   steps {
 sh  '''#!/usr/bin/env bash
@@ -114,14 +110,25 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 stash name: 'yetus', includes: 
"yetus-*/*,yetus-*/**/*,tools/personality.sh"
   }
 }
+stage ('init health results') {
+  steps {
+// stash with given name for all tests we might run, so that we can 
unstash all of them even if
+// we skip some due to e.g. branch-specific JDK or Hadoop support
+stash name: 'general-result', allowEmpty: true, includes: 
"${OUTPUT_DIR_RELATIVE_GENERAL}/doesn't-match"
+stash name: 'jdk7-result', allowEmpty: true, includes: 
"${OUTPUT_DIR_RELATIVE_JDK7}/doesn't-match"
+stash name: 'hadoop2-result', allowEmpty: true, includes: 
"${OUTPUT_DIR_RELATIVE_HADOOP2}/doesn't-match"
+stash name: 'hadoop3-result', allowEmpty: true, includes: 
"${OUTPUT_DIR_RELATIVE_HADOOP3}/doesn't-match"
+stash name: 'srctarball-result', allowEmpty: true, includes: 
"output-srctarball/doesn't-match"
+  }
+}
 stage ('health checks') {
   parallel {
 stage ('yetus general check') {
-  agent {
-node {
-  label 'Hadoop'
-}
-  }
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
   environment {
 // TODO does hadoopcheck need to be jdk specific?
 // Should be things that work with multijdk
@@ -134,7 +141,9 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   }
   steps {
 unstash 'yetus'
-unstash 'component'
+dir('component') {
+  checkout scm
+}
 sh '''#!/usr/bin/env bash
   rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
   rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
@@ -156,6 +165,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   }
   post {
 always {
+  stash name: 'general-result', includes: 
"${OUTPUT_DIR_RELATIVE}/commentfile"
   // Has to be relative to WORKSPACE.
   archive "${env.OUTPUT_DIR_RELATIVE}/*"
   archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
@@ -172,11 +182,11 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   }
 }
 stage ('yetus jdk7 checks') {
-  agent {
-node {
-  label 'Hadoop'
-}
-  }
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
   when {
 branch 'branch-1*'
   }
@@ -188,7 +198,9 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   }
   steps {
 unstash 'yetus'
-unstash 'component'
+dir('component') {
+  checkout scm

[7/8] hbase git commit: HBASE-18467 addendum parallel steps must account for SCM and marshalling results

2018-03-08 Thread busbey
HBASE-18467 addendum parallel steps must account for SCM and marshalling results

* do a scm checkout on the stages that need access to source.
* ensure our install job runs on the ubuntu label
* copy jira comments to main workspace
* simplify the jira comment

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/28ebac55
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/28ebac55
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/28ebac55

Branch: refs/heads/branch-2.0
Commit: 28ebac556edac7c40ed5aa7679e2e907b08d9f8c
Parents: dc845a7
Author: Sean Busbey 
Authored: Thu Mar 1 16:34:08 2018 -0600
Committer: Sean Busbey 
Committed: Thu Mar 8 11:46:12 2018 -0600

--
 dev-support/Jenkinsfile | 102 +++
 1 file changed, 65 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/28ebac55/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 9c0ff0a..5453fd5 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -15,7 +15,11 @@
 // specific language governing permissions and limitations
 // under the License.
 pipeline {
-  agent any
+  agent {
+node {
+  label 'ubuntu'
+}
+  }
   triggers {
 cron('H */6 * * *')  // Run every 6 hours.
   }
@@ -59,14 +63,6 @@ pipeline {
 booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a 
lot more meta-information.')
   }
   stages {
-stage ('scm checkout') {
-  steps {
-dir('component') {
-  checkout scm
-}
-stash name: 'component', includes: "component/*,component/**/*"
-  }
-}
 stage ('yetus install') {
   steps {
 sh  '''#!/usr/bin/env bash
@@ -114,14 +110,25 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 stash name: 'yetus', includes: 
"yetus-*/*,yetus-*/**/*,tools/personality.sh"
   }
 }
+stage ('init health results') {
+  steps {
+// stash with given name for all tests we might run, so that we can 
unstash all of them even if
+// we skip some due to e.g. branch-specific JDK or Hadoop support
+stash name: 'general-result', allowEmpty: true, includes: 
"${OUTPUT_DIR_RELATIVE_GENERAL}/doesn't-match"
+stash name: 'jdk7-result', allowEmpty: true, includes: 
"${OUTPUT_DIR_RELATIVE_JDK7}/doesn't-match"
+stash name: 'hadoop2-result', allowEmpty: true, includes: 
"${OUTPUT_DIR_RELATIVE_HADOOP2}/doesn't-match"
+stash name: 'hadoop3-result', allowEmpty: true, includes: 
"${OUTPUT_DIR_RELATIVE_HADOOP3}/doesn't-match"
+stash name: 'srctarball-result', allowEmpty: true, includes: 
"output-srctarball/doesn't-match"
+  }
+}
 stage ('health checks') {
   parallel {
 stage ('yetus general check') {
-  agent {
-node {
-  label 'Hadoop'
-}
-  }
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
   environment {
 // TODO does hadoopcheck need to be jdk specific?
 // Should be things that work with multijdk
@@ -134,7 +141,9 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   }
   steps {
 unstash 'yetus'
-unstash 'component'
+dir('component') {
+  checkout scm
+}
 sh '''#!/usr/bin/env bash
   rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
   rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
@@ -156,6 +165,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   }
   post {
 always {
+  stash name: 'general-result', includes: 
"${OUTPUT_DIR_RELATIVE}/commentfile"
   // Has to be relative to WORKSPACE.
   archive "${env.OUTPUT_DIR_RELATIVE}/*"
   archive "${env.OUTPUT_DIR_RELATIVE}/**/*"
@@ -172,11 +182,11 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   }
 }
 stage ('yetus jdk7 checks') {
-  agent {
-node {
-  label 'Hadoop'
-}
-  }
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
   when {
 branch 'branch-1*'
   }
@@ -188,7 +198,9 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   }
   steps {
 unstash 'yetus'
-unstash 'component'
+dir('component') {
+  checkout scm
+}
 sh '''#!/usr/bin/env bash
   rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"

[5/8] hbase git commit: HBASE-15151 ensure findbugs check runs in nightly tests on all branches.

2018-03-08 Thread busbey
HBASE-15151 ensure findbugs check runs in nightly tests on all branches.

Signed-off-by: Mike Drob 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/39c8acfe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/39c8acfe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/39c8acfe

Branch: refs/heads/branch-2.0
Commit: 39c8acfed658e1714a1e14d95bd280e5113f0327
Parents: 8698e20
Author: Sean Busbey 
Authored: Sun Feb 25 00:35:45 2018 -0600
Committer: Sean Busbey 
Committed: Thu Mar 8 11:46:12 2018 -0600

--
 dev-support/Jenkinsfile | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/39c8acfe/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 92c7c9c..601eadb 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -255,7 +255,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 }
   }
   environment {
-TESTS = 'mvninstall,compile,javac,unit,htmlout'
+TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
 OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
 OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
 // This isn't strictly needed on branches that only support jdk8, 
but doesn't hurt
@@ -327,10 +327,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 }
   }
   environment {
-// Failure in any stage fails the build and consecutive stages are 
not built.
-// Findbugs is part of this last yetus stage to prevent findbugs 
precluding hadoop3
-// tests.
-TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
+TESTS = 'mvninstall,compile,javac,unit,htmlout'
 OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
 OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
 // This isn't strictly needed on branches that only support jdk8, 
but doesn't hurt



[8/8] hbase git commit: HBASE-18467 report nightly results to devs via jira

2018-03-08 Thread busbey
HBASE-18467 report nightly results to devs via jira

- rely on parallel pipeline to ensure all stages always run
- define non-CPS jira commenting function
- comment on jiras in the changeset with summary and links

Signed-off-by: Mike Drob 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8698e207
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8698e207
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8698e207

Branch: refs/heads/branch-2.0
Commit: 8698e207689fb5c8384a46409b4d95634a3a537d
Parents: b325ae5
Author: Sean Busbey 
Authored: Wed Aug 9 00:48:46 2017 -0500
Committer: Sean Busbey 
Committed: Thu Mar 8 11:46:12 2018 -0600

--
 dev-support/Jenkinsfile  | 592 ++
 dev-support/hbase_nightly_source-artifact.sh |   1 -
 2 files changed, 367 insertions(+), 226 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8698e207/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 7224ed3..92c7c9c 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -15,11 +15,7 @@
 // specific language governing permissions and limitations
 // under the License.
 pipeline {
-  agent {
-node {
-  label 'Hadoop'
-}
-  }
+  agent any
   triggers {
 cron('H */6 * * *')  // Run every 6 hours.
   }
@@ -34,6 +30,12 @@ pipeline {
 // where we check out to across stages
 BASEDIR = "${env.WORKSPACE}/component"
 YETUS_RELEASE = '0.7.0'
+// where we'll write everything from different steps. Need a copy here so 
the final step can check for success/failure.
+OUTPUT_DIR_RELATIVE_GENERAL = 'output-general'
+OUTPUT_DIR_RELATIVE_JDK7 = 'output-jdk7'
+OUTPUT_DIR_RELATIVE_HADOOP2 = 'output-jdk8-hadoop2'
+OUTPUT_DIR_RELATIVE_HADOOP3 = 'output-jdk8-hadoop3'
+
 PROJECT = 'hbase'
 PROJECT_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
 // This section of the docs tells folks not to use the javadoc tag. older 
branches have our old version of the check for said tag.
@@ -62,6 +64,7 @@ pipeline {
 dir('component') {
   checkout scm
 }
+stash name: 'component', includes: "component/*,component/**/*"
   }
 }
 stage ('yetus install') {
@@ -111,252 +114,391 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 stash name: 'yetus', includes: 
"yetus-*/*,yetus-*/**/*,tools/personality.sh"
   }
 }
-stage ('yetus general check') {
-  environment {
-// TODO does hadoopcheck need to be jdk specific?
-// Should be things that work with multijdk
-TESTS = 'all,-unit,-findbugs'
-// on branches that don't support jdk7, this will already be 
JAVA_HOME, so we'll end up not
-// doing multijdk there.
-MULTIJDK = '/usr/lib/jvm/java-8-openjdk-amd64'
-OUTPUT_DIR_RELATIVE = "output-general"
-OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE}"
-  }
-  steps {
-unstash 'yetus'
-sh '''#!/usr/bin/env bash
-  rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
-  rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
-  "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
+stage ('health checks') {
+  parallel {
+stage ('yetus general check') {
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
+  environment {
+// TODO does hadoopcheck need to be jdk specific?
+// Should be things that work with multijdk
+TESTS = 'all,-unit,-findbugs'
+// on branches that don't support jdk7, this will already be 
JAVA_HOME, so we'll end up not
+// doing multijdk there.
+MULTIJDK = '/usr/lib/jvm/java-8-openjdk-amd64'
+OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_GENERAL}"
+OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_GENERAL}"
+  }
+  steps {
+unstash 'yetus'
+unstash 'component'
+sh '''#!/usr/bin/env bash
+  rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+  rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
+  "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
 '''
-// TODO should this be a download from master, similar to how the 
personality is?
-sh "${env.BASEDIR}/dev-support/hbase_nightly_yetus.sh"
-  }
-  post {
-always {
-  // Has 

[2/8] hbase git commit: HBASE-15151 ensure findbugs check runs in nightly tests on all branches.

2018-03-08 Thread busbey
HBASE-15151 ensure findbugs check runs in nightly tests on all branches.

Signed-off-by: Mike Drob 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/27642614
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/27642614
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/27642614

Branch: refs/heads/branch-2
Commit: 27642614b4a1d3b6f533997b0735c2cb9c4b6fa0
Parents: 44f1129
Author: Sean Busbey 
Authored: Sun Feb 25 00:35:45 2018 -0600
Committer: Sean Busbey 
Committed: Thu Mar 8 11:44:31 2018 -0600

--
 dev-support/Jenkinsfile | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/27642614/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 92c7c9c..601eadb 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -255,7 +255,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 }
   }
   environment {
-TESTS = 'mvninstall,compile,javac,unit,htmlout'
+TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
 OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
 OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP2}"
 // This isn't strictly needed on branches that only support jdk8, 
but doesn't hurt
@@ -327,10 +327,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 }
   }
   environment {
-// Failure in any stage fails the build and consecutive stages are 
not built.
-// Findbugs is part of this last yetus stage to prevent findbugs 
precluding hadoop3
-// tests.
-TESTS = 'mvninstall,compile,javac,unit,findbugs,htmlout'
+TESTS = 'mvninstall,compile,javac,unit,htmlout'
 OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
 OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_HADOOP3}"
 // This isn't strictly needed on branches that only support jdk8, 
but doesn't hurt



hbase git commit: HBASE-20114 Fix IllegalFormatConversionException in rsgroup.jsp Repplication. Was reverted earlier.

2018-03-08 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 71cc7869d -> 78c02c508


HBASE-20114 Fix IllegalFormatConversionException in rsgroup.jsp
Repplication. Was reverted earlier.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/78c02c50
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/78c02c50
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/78c02c50

Branch: refs/heads/branch-2
Commit: 78c02c508105fa326515fc9b51b65316f1940605
Parents: 71cc786
Author: haxiaolin 
Authored: Fri Mar 2 11:21:12 2018 +0800
Committer: Michael Stack 
Committed: Thu Mar 8 09:29:01 2018 -0800

--
 hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/78c02c50/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
index 7b7e227..43753a5 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
@@ -254,7 +254,7 @@
 %>

  <%= serverName.getServerName() 
%>
- <%= String.format("%.0f", 
sl.getRequestCountPerSecond()) %>
+ <%= sl.getRequestCountPerSecond() %>
  <%= readRequestCount %>
  <%= writeRequestCount %>




hbase git commit: HBASE-20114 Fix IllegalFormatConversionException in rsgroup.jsp Repplication. Was reverted earlier.

2018-03-08 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 1146625c4 -> b325ae559


HBASE-20114 Fix IllegalFormatConversionException in rsgroup.jsp
Repplication. Was reverted earlier.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b325ae55
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b325ae55
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b325ae55

Branch: refs/heads/branch-2.0
Commit: b325ae5593aee52137079d20b19564bf8291e3cb
Parents: 1146625
Author: haxiaolin 
Authored: Fri Mar 2 11:21:12 2018 +0800
Committer: Michael Stack 
Committed: Thu Mar 8 09:27:51 2018 -0800

--
 hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b325ae55/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
index 7b7e227..43753a5 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/rsgroup.jsp
@@ -254,7 +254,7 @@
 %>

  <%= serverName.getServerName() 
%>
- <%= String.format("%.0f", 
sl.getRequestCountPerSecond()) %>
+ <%= sl.getRequestCountPerSecond() %>
  <%= readRequestCount %>
  <%= writeRequestCount %>




hbase git commit: HBASE-20155 update branch-2 version to 2.1.0-SNAPSHOT

2018-03-08 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/branch-2 25efd37f3 -> 71cc7869d


HBASE-20155 update branch-2 version to 2.1.0-SNAPSHOT

Signed-off-by: Peter Somogyi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/71cc7869
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/71cc7869
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/71cc7869

Branch: refs/heads/branch-2
Commit: 71cc7869db30a31fcaf0937770957dbc33b2ae37
Parents: 25efd37
Author: Sean Busbey 
Authored: Wed Mar 7 23:34:17 2018 -0600
Committer: Peter Somogyi 
Committed: Thu Mar 8 08:44:30 2018 -0800

--
 hbase-annotations/pom.xml| 2 +-
 hbase-archetypes/hbase-archetype-builder/pom.xml | 2 +-
 hbase-archetypes/hbase-client-project/pom.xml| 2 +-
 hbase-archetypes/hbase-shaded-client-project/pom.xml | 2 +-
 hbase-archetypes/pom.xml | 2 +-
 hbase-assembly/pom.xml   | 2 +-
 hbase-build-configuration/pom.xml| 2 +-
 hbase-build-support/hbase-error-prone/pom.xml| 6 +++---
 hbase-build-support/pom.xml  | 2 +-
 hbase-checkstyle/pom.xml | 4 ++--
 hbase-client/pom.xml | 2 +-
 hbase-common/pom.xml | 2 +-
 hbase-endpoint/pom.xml   | 2 +-
 hbase-examples/pom.xml   | 2 +-
 hbase-external-blockcache/pom.xml| 2 +-
 hbase-hadoop-compat/pom.xml  | 2 +-
 hbase-hadoop2-compat/pom.xml | 2 +-
 hbase-http/pom.xml   | 2 +-
 hbase-it/pom.xml | 2 +-
 hbase-mapreduce/pom.xml  | 2 +-
 hbase-metrics-api/pom.xml| 2 +-
 hbase-metrics/pom.xml| 2 +-
 hbase-procedure/pom.xml  | 2 +-
 hbase-protocol-shaded/pom.xml| 2 +-
 hbase-protocol/pom.xml   | 2 +-
 hbase-replication/pom.xml| 2 +-
 hbase-resource-bundle/pom.xml| 2 +-
 hbase-rest/pom.xml   | 2 +-
 hbase-rsgroup/pom.xml| 2 +-
 hbase-server/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-check-invariants/pom.xml   | 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-mapreduce/pom.xml  | 2 +-
 hbase-shaded/pom.xml | 2 +-
 hbase-shell/pom.xml  | 2 +-
 hbase-testing-util/pom.xml   | 2 +-
 hbase-thrift/pom.xml | 2 +-
 hbase-zookeeper/pom.xml  | 2 +-
 pom.xml  | 2 +-
 39 files changed, 42 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/71cc7869/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index 8b4dd54..88084fe 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-2.0.0-beta-2
+2.1.0-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/71cc7869/hbase-archetypes/hbase-archetype-builder/pom.xml
--
diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml 
b/hbase-archetypes/hbase-archetype-builder/pom.xml
index 059d849..fd9779f 100644
--- a/hbase-archetypes/hbase-archetype-builder/pom.xml
+++ b/hbase-archetypes/hbase-archetype-builder/pom.xml
@@ -25,7 +25,7 @@
   
 hbase-archetypes
 org.apache.hbase
-2.0.0-beta-2
+2.1.0-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/71cc7869/hbase-archetypes/hbase-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-client-project/pom.xml 
b/hbase-archetypes/hbase-client-project/pom.xml
index 9cc803b..4eccad3 100644
--- a/hbase-archetypes/hbase-client-project/pom.xml
+++ b/hbase-archetypes/hbase-client-project/pom.xml
@@ -26,7 +26,7 @@
   
 hbase-archetypes
 org.apache.hbase
-2.0.0-beta-2
+2.1.0-SNAPSHOT
 ..
   
   hbase-client-project

http://git-wip-us.apache.org/repos/asf/hbase/blob/71cc7869/hbase-archetypes/hbase-shaded-client-project/pom.xml

[16/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
index 67e1068..b9d5fcc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -335,30 +335,25 @@ extends 
-private void
-reportTransitionCLOSED(MasterProcedureEnvenv,
-  RegionStates.RegionStateNoderegionNode)
-
-
 protected void
 serializeStateData(ProcedureStateSerializerserializer)
 The user-level code of the procedure may have some state to
  persist (e.g.
 
 
-
+
 protected boolean
 startTransition(MasterProcedureEnvenv,
RegionStates.RegionStateNoderegionNode)
 
-
+
 void
 toStringClassDetails(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
 title="class or interface in java.lang">StringBuildersb)
 Extend the toString() information with the procedure details
  e.g.
 
 
-
+
 protected boolean
 updateTransition(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode)
@@ -693,23 +688,13 @@ extends 
-
-
-
-
-reportTransitionCLOSED
-privatevoidreportTransitionCLOSED(MasterProcedureEnvenv,
-RegionStates.RegionStateNoderegionNode)
-
-
 
 
 
 
 
 toStringClassDetails
-publicvoidtoStringClassDetails(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
 title="class or interface in java.lang">StringBuildersb)
+publicvoidtoStringClassDetails(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
 title="class or interface in java.lang">StringBuildersb)
 Description copied from 
class:Procedure
 Extend the toString() information with the procedure details
  e.g. className and parameters
@@ -727,7 +712,7 @@ extends 
 
 getServer
-publicServerNamegetServer(MasterProcedureEnvenv)
+publicServerNamegetServer(MasterProcedureEnvenv)
 Description copied from 
class:RegionTransitionProcedure
 Used by ServerCrashProcedure to see if this Assign/Unassign 
needs processing.
 
@@ -744,7 +729,7 @@ extends 
 
 getProcedureMetrics
-protectedProcedureMetricsgetProcedureMetrics(MasterProcedureEnvenv)
+protectedProcedureMetricsgetProcedureMetrics(MasterProcedureEnvenv)
 Description copied from 
class:Procedure
 Override this method to provide procedure specific counters 
for submitted count, failed
  count and time histogram.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
index adfea3a..50cb618 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
@@ -371,50 +371,45 @@
 longseqId)
 
 
-private void
-UnassignProcedure.reportTransitionCLOSED(MasterProcedureEnvenv,
-  RegionStates.RegionStateNoderegionNode)
-
-
 protected boolean
 UnassignProcedure.startTransition(MasterProcedureEnvenv,
RegionStates.RegionStateNoderegionNode)
 
-
+
 protected abstract boolean
 RegionTransitionProcedure.startTransition(MasterProcedureEnvenv,
RegionStates.RegionStateNoderegionNode)
 
-
+
 protected boolean
 AssignProcedure.startTransition(MasterProcedureEnvenv,
RegionStates.RegionStateNoderegionNode)
 
-
+
 void
 AssignmentManager.undoRegionAsClosing(RegionStates.RegionStateNoderegionNode)
 
-
+
 void
 AssignmentManager.undoRegionAsOpening(RegionStates.RegionStateNoderegionNode)
 
-
+
 void
 RegionStateStore.updateRegionLocation(RegionStates.RegionStateNoderegionStateNode)
 
-
+
 protected boolean
 UnassignProcedure.updateTransition(MasterProcedureEnvenv,
 RegionStates.RegionStateNoderegionNode)
 
-
+
 protected abstract boolean
 

[14/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/src-html/org/apache/hadoop/hbase/filter/ByteArrayComparable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ByteArrayComparable.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ByteArrayComparable.html
index da5d73e..5afdb22 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ByteArrayComparable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ByteArrayComparable.html
@@ -40,83 +40,84 @@
 032// adding special compareTo methods. We 
have to clean it. Deprecate this class and replace it
 033// with a more generic one which says it 
compares bytes (not necessary a byte array only)
 034// BytesComparable implements 
ComparableByte will work?
-035public abstract class ByteArrayComparable 
implements Comparablebyte[] {
-036
-037  byte[] value;
-038
-039  /**
-040   * Constructor.
-041   * @param value the value to compare 
against
-042   */
-043  public ByteArrayComparable(byte [] 
value) {
-044this.value = value;
-045  }
-046
-047  public byte[] getValue() {
-048return value;
-049  }
-050
-051  /**
-052   * @return The comparator serialized 
using pb
-053   */
-054  public abstract byte [] 
toByteArray();
-055
-056  /**
-057   * @param pbBytes A pb serialized 
{@link ByteArrayComparable} instance
-058   * @return An instance of {@link 
ByteArrayComparable} made from codebytes/code
-059   * @throws DeserializationException
-060   * @see #toByteArray
-061   */
-062  public static ByteArrayComparable 
parseFrom(final byte [] pbBytes)
-063  throws DeserializationException {
-064throw new DeserializationException(
-065  "parseFrom called on base 
ByteArrayComparable, but should be called on derived type");
-066  }
-067
-068  /**
-069   * @param other
-070   * @return true if and only if the 
fields of the comparator that are serialized
-071   * are equal to the corresponding 
fields in other.  Used for testing.
-072   */
-073  boolean 
areSerializedFieldsEqual(ByteArrayComparable other) {
-074if (other == this) return true;
-075
-076return Bytes.equals(this.getValue(), 
other.getValue());
-077  }
-078
-079  @Override
-080  public int compareTo(byte [] value) {
-081return compareTo(value, 0, 
value.length);
-082  }
-083
-084  /**
-085   * Special compareTo method for 
subclasses, to avoid
-086   * copying byte[] unnecessarily.
-087   * @param value byte[] to compare
-088   * @param offset offset into value
-089   * @param length number of bytes to 
compare
-090   * @return a negative integer, zero, or 
a positive integer as this object
-091   * is less than, equal to, or 
greater than the specified object.
-092   */
-093  public abstract int compareTo(byte [] 
value, int offset, int length);
-094
-095  /**
-096   * Special compareTo method for 
subclasses, to avoid copying bytes unnecessarily.
-097   * @param value bytes to compare within 
a ByteBuffer
-098   * @param offset offset into value
-099   * @param length number of bytes to 
compare
-100   * @return a negative integer, zero, or 
a positive integer as this object
-101   * is less than, equal to, or 
greater than the specified object.
-102   */
-103  public int compareTo(ByteBuffer value, 
int offset, int length) {
-104// For BC, providing a default 
implementation here which is doing a bytes copy to a temp byte[]
-105// and calling compareTo(byte[]). 
Make sure to override this method in subclasses to avoid
-106// copying bytes unnecessarily.
-107byte[] temp = new byte[length];
-108
ByteBufferUtils.copyFromBufferToArray(temp, value, offset, 0, length);
-109return compareTo(temp);
-110  }
-111}
+035@SuppressWarnings("ComparableType") // 
Should this move to Comparator usage?
+036public abstract class ByteArrayComparable 
implements Comparablebyte[] {
+037
+038  byte[] value;
+039
+040  /**
+041   * Constructor.
+042   * @param value the value to compare 
against
+043   */
+044  public ByteArrayComparable(byte [] 
value) {
+045this.value = value;
+046  }
+047
+048  public byte[] getValue() {
+049return value;
+050  }
+051
+052  /**
+053   * @return The comparator serialized 
using pb
+054   */
+055  public abstract byte [] 
toByteArray();
+056
+057  /**
+058   * @param pbBytes A pb serialized 
{@link ByteArrayComparable} instance
+059   * @return An instance of {@link 
ByteArrayComparable} made from codebytes/code
+060   * @throws DeserializationException
+061   * @see #toByteArray
+062   */
+063  public static ByteArrayComparable 
parseFrom(final byte [] pbBytes)
+064  throws DeserializationException {
+065throw new DeserializationException(
+066  "parseFrom called on base 
ByteArrayComparable, but should be called on derived type");
+067  }
+068
+069  /**
+070   * @param other
+071   * @return true if and only if the 
fields of the comparator that are serialized
+072   * 

[21/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/apidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.html
index ce6ff26..577439c 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.html
@@ -77,353 +77,354 @@
 069 * @see java.util.regex.Pattern
 070 */
 071@InterfaceAudience.Public
-072public class RegexStringComparator 
extends ByteArrayComparable {
-073
-074  private static final Logger LOG = 
LoggerFactory.getLogger(RegexStringComparator.class);
-075
-076  private Engine engine;
-077
-078  /** Engine implementation type 
(default=JAVA) */
-079  @InterfaceAudience.Public
-080  public enum EngineType {
-081JAVA,
-082JONI
-083  }
-084
-085  /**
-086   * Constructor
-087   * Adds Pattern.DOTALL to the 
underlying Pattern
-088   * @param expr a valid regular 
expression
-089   */
-090  public RegexStringComparator(String 
expr) {
-091this(expr, Pattern.DOTALL);
-092  }
-093
-094  /**
-095   * Constructor
-096   * Adds Pattern.DOTALL to the 
underlying Pattern
-097   * @param expr a valid regular 
expression
-098   * @param engine engine implementation 
type
-099   */
-100  public RegexStringComparator(String 
expr, EngineType engine) {
-101this(expr, Pattern.DOTALL, engine);
-102  }
-103
-104  /**
-105   * Constructor
-106   * @param expr a valid regular 
expression
-107   * @param flags java.util.regex.Pattern 
flags
-108   */
-109  public RegexStringComparator(String 
expr, int flags) {
-110this(expr, flags, EngineType.JAVA);
-111  }
-112
-113  /**
-114   * Constructor
-115   * @param expr a valid regular 
expression
-116   * @param flags java.util.regex.Pattern 
flags
-117   * @param engine engine implementation 
type
-118   */
-119  public RegexStringComparator(String 
expr, int flags, EngineType engine) {
-120super(Bytes.toBytes(expr));
-121switch (engine) {
-122  case JAVA:
-123this.engine = new 
JavaRegexEngine(expr, flags);
-124break;
-125  case JONI:
-126this.engine = new 
JoniRegexEngine(expr, flags);
-127break;
-128}
-129  }
-130
-131  /**
-132   * Specifies the {@link Charset} to use 
to convert the row key to a String.
-133   * p
-134   * The row key needs to be converted to 
a String in order to be matched
-135   * against the regular expression.  
This method controls which charset is
-136   * used to do this conversion.
-137   * p
-138   * If the row key is made of arbitrary 
bytes, the charset {@code ISO-8859-1}
-139   * is recommended.
-140   * @param charset The charset to use.
-141   */
-142  public void setCharset(final Charset 
charset) {
-143engine.setCharset(charset.name());
-144  }
-145
-146  @Override
-147  public int compareTo(byte[] value, int 
offset, int length) {
-148return engine.compareTo(value, 
offset, length);
-149  }
-150
-151  /**
-152   * @return The comparator serialized 
using pb
-153   */
-154  @Override
-155  public byte [] toByteArray() {
-156return engine.toByteArray();
-157  }
-158
-159  /**
-160   * @param pbBytes A pb serialized 
{@link RegexStringComparator} instance
-161   * @return An instance of {@link 
RegexStringComparator} made from codebytes/code
-162   * @throws DeserializationException
-163   * @see #toByteArray
-164   */
-165  public static RegexStringComparator 
parseFrom(final byte [] pbBytes)
-166  throws DeserializationException {
-167
ComparatorProtos.RegexStringComparator proto;
-168try {
-169  proto = 
ComparatorProtos.RegexStringComparator.parseFrom(pbBytes);
-170} catch 
(InvalidProtocolBufferException e) {
-171  throw new 
DeserializationException(e);
-172}
-173RegexStringComparator comparator;
-174if (proto.hasEngine()) {
-175  EngineType engine = 
EngineType.valueOf(proto.getEngine());
-176  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags(),
-177engine);
-178} else {
-179  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
-180}
-181String charset = 
proto.getCharset();
-182if (charset.length()  0) {
-183  try {
-184
comparator.getEngine().setCharset(charset);
-185  } catch 
(IllegalCharsetNameException e) {
-186LOG.error("invalid charset", 
e);
-187  }
-188}
-189return comparator;
-190  }
-191
-192  /**
-193   * @param other
-194   * @return true if and only if the 
fields of the comparator that are serialized
-195   * are equal to the corresponding 
fields in other.  Used for testing.
-196   */
-197  @Override
-198  boolean 
areSerializedFieldsEqual(ByteArrayComparable other) {
-199if (other == this) 

[10/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.JoniRegexEngine.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.JoniRegexEngine.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.JoniRegexEngine.html
index ce6ff26..577439c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.JoniRegexEngine.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.JoniRegexEngine.html
@@ -77,353 +77,354 @@
 069 * @see java.util.regex.Pattern
 070 */
 071@InterfaceAudience.Public
-072public class RegexStringComparator 
extends ByteArrayComparable {
-073
-074  private static final Logger LOG = 
LoggerFactory.getLogger(RegexStringComparator.class);
-075
-076  private Engine engine;
-077
-078  /** Engine implementation type 
(default=JAVA) */
-079  @InterfaceAudience.Public
-080  public enum EngineType {
-081JAVA,
-082JONI
-083  }
-084
-085  /**
-086   * Constructor
-087   * Adds Pattern.DOTALL to the 
underlying Pattern
-088   * @param expr a valid regular 
expression
-089   */
-090  public RegexStringComparator(String 
expr) {
-091this(expr, Pattern.DOTALL);
-092  }
-093
-094  /**
-095   * Constructor
-096   * Adds Pattern.DOTALL to the 
underlying Pattern
-097   * @param expr a valid regular 
expression
-098   * @param engine engine implementation 
type
-099   */
-100  public RegexStringComparator(String 
expr, EngineType engine) {
-101this(expr, Pattern.DOTALL, engine);
-102  }
-103
-104  /**
-105   * Constructor
-106   * @param expr a valid regular 
expression
-107   * @param flags java.util.regex.Pattern 
flags
-108   */
-109  public RegexStringComparator(String 
expr, int flags) {
-110this(expr, flags, EngineType.JAVA);
-111  }
-112
-113  /**
-114   * Constructor
-115   * @param expr a valid regular 
expression
-116   * @param flags java.util.regex.Pattern 
flags
-117   * @param engine engine implementation 
type
-118   */
-119  public RegexStringComparator(String 
expr, int flags, EngineType engine) {
-120super(Bytes.toBytes(expr));
-121switch (engine) {
-122  case JAVA:
-123this.engine = new 
JavaRegexEngine(expr, flags);
-124break;
-125  case JONI:
-126this.engine = new 
JoniRegexEngine(expr, flags);
-127break;
-128}
-129  }
-130
-131  /**
-132   * Specifies the {@link Charset} to use 
to convert the row key to a String.
-133   * p
-134   * The row key needs to be converted to 
a String in order to be matched
-135   * against the regular expression.  
This method controls which charset is
-136   * used to do this conversion.
-137   * p
-138   * If the row key is made of arbitrary 
bytes, the charset {@code ISO-8859-1}
-139   * is recommended.
-140   * @param charset The charset to use.
-141   */
-142  public void setCharset(final Charset 
charset) {
-143engine.setCharset(charset.name());
-144  }
-145
-146  @Override
-147  public int compareTo(byte[] value, int 
offset, int length) {
-148return engine.compareTo(value, 
offset, length);
-149  }
-150
-151  /**
-152   * @return The comparator serialized 
using pb
-153   */
-154  @Override
-155  public byte [] toByteArray() {
-156return engine.toByteArray();
-157  }
-158
-159  /**
-160   * @param pbBytes A pb serialized 
{@link RegexStringComparator} instance
-161   * @return An instance of {@link 
RegexStringComparator} made from codebytes/code
-162   * @throws DeserializationException
-163   * @see #toByteArray
-164   */
-165  public static RegexStringComparator 
parseFrom(final byte [] pbBytes)
-166  throws DeserializationException {
-167
ComparatorProtos.RegexStringComparator proto;
-168try {
-169  proto = 
ComparatorProtos.RegexStringComparator.parseFrom(pbBytes);
-170} catch 
(InvalidProtocolBufferException e) {
-171  throw new 
DeserializationException(e);
-172}
-173RegexStringComparator comparator;
-174if (proto.hasEngine()) {
-175  EngineType engine = 
EngineType.valueOf(proto.getEngine());
-176  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags(),
-177engine);
-178} else {
-179  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
-180}
-181String charset = 
proto.getCharset();
-182if (charset.length()  0) {
-183  try {
-184
comparator.getEngine().setCharset(charset);
-185  } catch 
(IllegalCharsetNameException e) {
-186LOG.error("invalid charset", 
e);
-187  }
-188}
-189return comparator;
-190  }
-191
-192  /**
-193   * @param other
-194   * @return true if and only if the 
fields of the comparator that are serialized
-195   * are equal to the corresponding 
fields in other.  Used for testing.
-196   */
-197  @Override

[13/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.Engine.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.Engine.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.Engine.html
index ce6ff26..577439c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.Engine.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.Engine.html
@@ -77,353 +77,354 @@
 069 * @see java.util.regex.Pattern
 070 */
 071@InterfaceAudience.Public
-072public class RegexStringComparator 
extends ByteArrayComparable {
-073
-074  private static final Logger LOG = 
LoggerFactory.getLogger(RegexStringComparator.class);
-075
-076  private Engine engine;
-077
-078  /** Engine implementation type 
(default=JAVA) */
-079  @InterfaceAudience.Public
-080  public enum EngineType {
-081JAVA,
-082JONI
-083  }
-084
-085  /**
-086   * Constructor
-087   * Adds Pattern.DOTALL to the 
underlying Pattern
-088   * @param expr a valid regular 
expression
-089   */
-090  public RegexStringComparator(String 
expr) {
-091this(expr, Pattern.DOTALL);
-092  }
-093
-094  /**
-095   * Constructor
-096   * Adds Pattern.DOTALL to the 
underlying Pattern
-097   * @param expr a valid regular 
expression
-098   * @param engine engine implementation 
type
-099   */
-100  public RegexStringComparator(String 
expr, EngineType engine) {
-101this(expr, Pattern.DOTALL, engine);
-102  }
-103
-104  /**
-105   * Constructor
-106   * @param expr a valid regular 
expression
-107   * @param flags java.util.regex.Pattern 
flags
-108   */
-109  public RegexStringComparator(String 
expr, int flags) {
-110this(expr, flags, EngineType.JAVA);
-111  }
-112
-113  /**
-114   * Constructor
-115   * @param expr a valid regular 
expression
-116   * @param flags java.util.regex.Pattern 
flags
-117   * @param engine engine implementation 
type
-118   */
-119  public RegexStringComparator(String 
expr, int flags, EngineType engine) {
-120super(Bytes.toBytes(expr));
-121switch (engine) {
-122  case JAVA:
-123this.engine = new 
JavaRegexEngine(expr, flags);
-124break;
-125  case JONI:
-126this.engine = new 
JoniRegexEngine(expr, flags);
-127break;
-128}
-129  }
-130
-131  /**
-132   * Specifies the {@link Charset} to use 
to convert the row key to a String.
-133   * p
-134   * The row key needs to be converted to 
a String in order to be matched
-135   * against the regular expression.  
This method controls which charset is
-136   * used to do this conversion.
-137   * p
-138   * If the row key is made of arbitrary 
bytes, the charset {@code ISO-8859-1}
-139   * is recommended.
-140   * @param charset The charset to use.
-141   */
-142  public void setCharset(final Charset 
charset) {
-143engine.setCharset(charset.name());
-144  }
-145
-146  @Override
-147  public int compareTo(byte[] value, int 
offset, int length) {
-148return engine.compareTo(value, 
offset, length);
-149  }
-150
-151  /**
-152   * @return The comparator serialized 
using pb
-153   */
-154  @Override
-155  public byte [] toByteArray() {
-156return engine.toByteArray();
-157  }
-158
-159  /**
-160   * @param pbBytes A pb serialized 
{@link RegexStringComparator} instance
-161   * @return An instance of {@link 
RegexStringComparator} made from codebytes/code
-162   * @throws DeserializationException
-163   * @see #toByteArray
-164   */
-165  public static RegexStringComparator 
parseFrom(final byte [] pbBytes)
-166  throws DeserializationException {
-167
ComparatorProtos.RegexStringComparator proto;
-168try {
-169  proto = 
ComparatorProtos.RegexStringComparator.parseFrom(pbBytes);
-170} catch 
(InvalidProtocolBufferException e) {
-171  throw new 
DeserializationException(e);
-172}
-173RegexStringComparator comparator;
-174if (proto.hasEngine()) {
-175  EngineType engine = 
EngineType.valueOf(proto.getEngine());
-176  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags(),
-177engine);
-178} else {
-179  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
-180}
-181String charset = 
proto.getCharset();
-182if (charset.length()  0) {
-183  try {
-184
comparator.getEngine().setCharset(charset);
-185  } catch 
(IllegalCharsetNameException e) {
-186LOG.error("invalid charset", 
e);
-187  }
-188}
-189return comparator;
-190  }
-191
-192  /**
-193   * @param other
-194   * @return true if and only if the 
fields of the comparator that are serialized
-195   * are equal to the corresponding 
fields in other.  Used for testing.
-196   */
-197  @Override
-198  boolean 

hbase-site git commit: INFRA-10751 Empty commit

2018-03-08 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site e9a81b899 -> 5f66dfcf3


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/5f66dfcf
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/5f66dfcf
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/5f66dfcf

Branch: refs/heads/asf-site
Commit: 5f66dfcf307b4e1524029b84e42f4d018b873d7e
Parents: e9a81b8
Author: jenkins 
Authored: Thu Mar 8 14:53:58 2018 +
Committer: jenkins 
Committed: Thu Mar 8 14:53:58 2018 +

--

--




[12/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
index ce6ff26..577439c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
@@ -77,353 +77,354 @@
 069 * @see java.util.regex.Pattern
 070 */
 071@InterfaceAudience.Public
-072public class RegexStringComparator 
extends ByteArrayComparable {
-073
-074  private static final Logger LOG = 
LoggerFactory.getLogger(RegexStringComparator.class);
-075
-076  private Engine engine;
-077
-078  /** Engine implementation type 
(default=JAVA) */
-079  @InterfaceAudience.Public
-080  public enum EngineType {
-081JAVA,
-082JONI
-083  }
-084
-085  /**
-086   * Constructor
-087   * Adds Pattern.DOTALL to the 
underlying Pattern
-088   * @param expr a valid regular 
expression
-089   */
-090  public RegexStringComparator(String 
expr) {
-091this(expr, Pattern.DOTALL);
-092  }
-093
-094  /**
-095   * Constructor
-096   * Adds Pattern.DOTALL to the 
underlying Pattern
-097   * @param expr a valid regular 
expression
-098   * @param engine engine implementation 
type
-099   */
-100  public RegexStringComparator(String 
expr, EngineType engine) {
-101this(expr, Pattern.DOTALL, engine);
-102  }
-103
-104  /**
-105   * Constructor
-106   * @param expr a valid regular 
expression
-107   * @param flags java.util.regex.Pattern 
flags
-108   */
-109  public RegexStringComparator(String 
expr, int flags) {
-110this(expr, flags, EngineType.JAVA);
-111  }
-112
-113  /**
-114   * Constructor
-115   * @param expr a valid regular 
expression
-116   * @param flags java.util.regex.Pattern 
flags
-117   * @param engine engine implementation 
type
-118   */
-119  public RegexStringComparator(String 
expr, int flags, EngineType engine) {
-120super(Bytes.toBytes(expr));
-121switch (engine) {
-122  case JAVA:
-123this.engine = new 
JavaRegexEngine(expr, flags);
-124break;
-125  case JONI:
-126this.engine = new 
JoniRegexEngine(expr, flags);
-127break;
-128}
-129  }
-130
-131  /**
-132   * Specifies the {@link Charset} to use 
to convert the row key to a String.
-133   * p
-134   * The row key needs to be converted to 
a String in order to be matched
-135   * against the regular expression.  
This method controls which charset is
-136   * used to do this conversion.
-137   * p
-138   * If the row key is made of arbitrary 
bytes, the charset {@code ISO-8859-1}
-139   * is recommended.
-140   * @param charset The charset to use.
-141   */
-142  public void setCharset(final Charset 
charset) {
-143engine.setCharset(charset.name());
-144  }
-145
-146  @Override
-147  public int compareTo(byte[] value, int 
offset, int length) {
-148return engine.compareTo(value, 
offset, length);
-149  }
-150
-151  /**
-152   * @return The comparator serialized 
using pb
-153   */
-154  @Override
-155  public byte [] toByteArray() {
-156return engine.toByteArray();
-157  }
-158
-159  /**
-160   * @param pbBytes A pb serialized 
{@link RegexStringComparator} instance
-161   * @return An instance of {@link 
RegexStringComparator} made from codebytes/code
-162   * @throws DeserializationException
-163   * @see #toByteArray
-164   */
-165  public static RegexStringComparator 
parseFrom(final byte [] pbBytes)
-166  throws DeserializationException {
-167
ComparatorProtos.RegexStringComparator proto;
-168try {
-169  proto = 
ComparatorProtos.RegexStringComparator.parseFrom(pbBytes);
-170} catch 
(InvalidProtocolBufferException e) {
-171  throw new 
DeserializationException(e);
-172}
-173RegexStringComparator comparator;
-174if (proto.hasEngine()) {
-175  EngineType engine = 
EngineType.valueOf(proto.getEngine());
-176  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags(),
-177engine);
-178} else {
-179  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
-180}
-181String charset = 
proto.getCharset();
-182if (charset.length()  0) {
-183  try {
-184
comparator.getEngine().setCharset(charset);
-185  } catch 
(IllegalCharsetNameException e) {
-186LOG.error("invalid charset", 
e);
-187  }
-188}
-189return comparator;
-190  }
-191
-192  /**
-193   * @param other
-194   * @return true if and only if the 
fields of the comparator that are serialized
-195   * are equal to the corresponding 
fields in other.  Used for testing.
-196   */
-197  @Override
-198  boolean 

[19/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index bc11ee0..af453d5 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2018 The Apache Software Foundation
 
-  File: 3579,
- Errors: 16184,
+  File: 3580,
+ Errors: 16177,
  Warnings: 0,
  Infos: 0
   
@@ -17611,7 +17611,7 @@ under the License.
   0
 
 
-  20
+  15
 
   
   
@@ -21760,6 +21760,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.master.TestShutdownWithNoRegionServer.java;>org/apache/hadoop/hbase/master/TestShutdownWithNoRegionServer.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.crypto.CryptoCipherProvider.java;>org/apache/hadoop/hbase/io/crypto/CryptoCipherProvider.java
 
 
@@ -37141,7 +37155,7 @@ under the License.
   0
 
 
-  2
+  0
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/coc.html
--
diff --git a/coc.html b/coc.html
index c051892..3500ece 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -368,7 +368,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-07
+  Last Published: 
2018-03-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index c334c75..9942bdb 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -667,7 +667,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-07
+  Last Published: 
2018-03-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index b0bd543..e0a5554 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -433,7 +433,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-07
+  Last Published: 
2018-03-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 05829bb..2f21003 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -281,10 +281,10 @@
 320
 
 Number of unique artifacts (NOA):
-349
+350
 
 Number of version-conflicting artifacts (NOC):
-19
+20
 
 Number of SNAPSHOT artifacts (NOS):
 0
@@ -340,55 +340,72 @@
 org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT:compile\-com.fasterxml.jackson.module:jackson-module-scala_2.10:jar:2.9.2:compile
 org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT\-com.fasterxml.jackson.module:jackson-module-scala_2.10:jar:2.9.2:compile
 
-com.google.guava:guava
+com.google.errorprone:javac
 
 
 
 
 
 
+9+181-r4173-1
+
+

[06/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/supportingprojects.html
--
diff --git a/supportingprojects.html b/supportingprojects.html
index 17b7a2c..4d1ecae 100644
--- a/supportingprojects.html
+++ b/supportingprojects.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Supporting Projects
 
@@ -513,7 +513,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-07
+  Last Published: 
2018-03-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/team-list.html
--
diff --git a/team-list.html b/team-list.html
index c32a7f0..cbc7a44 100644
--- a/team-list.html
+++ b/team-list.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Team
 
@@ -717,7 +717,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-03-07
+  Last Published: 
2018-03-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/allclasses-frame.html
--
diff --git a/testdevapidocs/allclasses-frame.html 
b/testdevapidocs/allclasses-frame.html
index e16bf34..babd3a4 100644
--- a/testdevapidocs/allclasses-frame.html
+++ b/testdevapidocs/allclasses-frame.html
@@ -1977,6 +1977,7 @@
 TestShutdownBackupMaster.MockHMaster
 TestShutdownWhileWALBroken
 TestShutdownWhileWALBroken.MyRegionServer
+TestShutdownWithNoRegionServer
 TestSimpleMutableByteRange
 TestSimplePositionedMutableByteRange
 TestSimpleRegionNormalizer

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/allclasses-noframe.html
--
diff --git a/testdevapidocs/allclasses-noframe.html 
b/testdevapidocs/allclasses-noframe.html
index e17d16b..129fb65 100644
--- a/testdevapidocs/allclasses-noframe.html
+++ b/testdevapidocs/allclasses-noframe.html
@@ -1977,6 +1977,7 @@
 TestShutdownBackupMaster.MockHMaster
 TestShutdownWhileWALBroken
 TestShutdownWhileWALBroken.MyRegionServer
+TestShutdownWithNoRegionServer
 TestSimpleMutableByteRange
 TestSimplePositionedMutableByteRange
 TestSimpleRegionNormalizer

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/constant-values.html
--
diff --git a/testdevapidocs/constant-values.html 
b/testdevapidocs/constant-values.html
index f95df92..9de3bf6 100644
--- a/testdevapidocs/constant-values.html
+++ b/testdevapidocs/constant-values.html
@@ -7055,7 +7055,7 @@
 
 publicstaticfinalint
 TYPES_OF_FAILURE
-7
+6
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index d94dbc3..75f15c0 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -5063,6 +5063,8 @@
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.TestShutdownBackupMaster
 
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.TestShutdownWithNoRegionServer
+
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.TestSplitLogManager
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.TestTableStateManager
@@ -35618,6 +35620,8 @@
 
 setup()
 - Method in class org.apache.hadoop.hbase.master.TestMasterTransitions
 
+setUp()
 - Static method in class org.apache.hadoop.hbase.master.TestShutdownWithNoRegionServer
+
 setup()
 - Method in class org.apache.hadoop.hbase.master.TestSplitLogManager
 
 setUp()
 - Method in class org.apache.hadoop.hbase.master.TestWarmupRegion
@@ -40564,6 +40568,8 @@
 
 tearDown()
 - Method in class org.apache.hadoop.hbase.master.TestRestartCluster
 
+tearDown()
 - Static method in class org.apache.hadoop.hbase.master.TestShutdownWithNoRegionServer
+
 teardown()
 - Method in class org.apache.hadoop.hbase.master.TestSplitLogManager
 
 tearDown()
 - Method in class org.apache.hadoop.hbase.master.TestWarmupRegion
@@ -41450,6 +41456,8 @@
 
 test()
 - Method in class org.apache.hadoop.hbase.master.TestGetLastFlushedSequenceId
 
+test()
 - Method in class org.apache.hadoop.hbase.master.TestShutdownWithNoRegionServer
+
 test()
 - Method in class org.apache.hadoop.hbase.metrics.impl.TestDropwizardMeter
 
 Test(Connection,
 PerformanceEvaluation.TestOptions, PerformanceEvaluation.Status) - 
Constructor for class 

[20/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 6abd72f..ceffe62 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -274,10 +274,10 @@
 Warnings
 Errors
 
-3579
+3580
 0
 0
-16184
+16177
 
 Files
 
@@ -4877,5406 +4877,5401 @@
 0
 5
 
-org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
-0
-0
-2
-
 org/apache/hadoop/hbase/master/TestCatalogJanitor.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/master/TestCatalogJanitorInMemoryStates.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestHMasterCommandLine.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestHMasterRPCException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestMaster.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestMasterFailoverBalancerPersistence.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/master/TestMasterFileSystem.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/TestMasterFileSystemWithWALDir.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestMasterMetrics.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/TestMasterNoCluster.java
 0
 0
 17
-
+
 org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/master/TestMasterShutdown.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestMasterStatusServlet.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/master/TestMasterTransitions.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestMirroringTableStateManager.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/master/TestRegionPlacement.java
 0
 0
 21
-
+
 org/apache/hadoop/hbase/master/TestRegionPlacement2.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/TestRestartCluster.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/TestRollingRestart.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/TestSplitLogManager.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/master/TestTableStateManager.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/TestWarmupRegion.java
 0
 0
 19
-
+
 org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 0
 0
 30
-
+
 org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
 0
 0
 31
-
+
 org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/master/assignment/RegionStates.java
 0
 0
 25
-
+
 org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
 0
 0
-20
-
+15
+
 org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/assignment/TestRegionStates.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/master/assignment/Util.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/BalancerChore.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
 0
 0
 75
-
+
 org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 0
 0
 61
-
+
 org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/master/balancer/ServerAndLoad.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
 0
 0
 33
-
+
 org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 0
 0
 32
-
+
 

[02/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.html
index 507c0cd..d27f392 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.html
@@ -720,7 +720,7 @@
 712  }
 713
 714  private class 
HangOnCloseThenRSCrashExecutor extends GoodRsExecutor {
-715public static final int 
TYPES_OF_FAILURE = 7;
+715public static final int 
TYPES_OF_FAILURE = 6;
 716private int invocations;
 717
 718@Override
@@ -732,129 +732,121 @@
 724  case 2: throw new 
RegionServerStoppedException("Fake!");
 725  case 3: throw new 
ServerNotRunningYetException("Fake!");
 726  case 4:
-727// We will expire the server that 
we failed to rpc against.
-728throw new 
FailedRemoteDispatchException("Fake!");
-729  case 5:
-730// Mark this regionserver as 
already expiring so we go different code route; i.e. we
-731// FAIL to expire the remote 
server and presume ok to move region to CLOSED. HBASE-20137.
-732
TestAssignmentManager.this.master.getServerManager().expireServer(server);
-733throw new 
FailedRemoteDispatchException("Fake!");
-734  case 6:
-735LOG.info("Return null response 
from serverName=" + server + "; means STUCK...TODO timeout");
-736executor.schedule(new Runnable() 
{
-737  @Override
-738  public void run() {
-739LOG.info("Sending in CRASH of 
" + server);
-740doCrash(server);
-741  }
-742}, 1, TimeUnit.SECONDS);
-743return null;
-744  default:
-745return 
super.execCloseRegion(server, regionName);
-746  }
-747}
-748  }
-749
-750  private class RandRsExecutor extends 
NoopRsExecutor {
-751private final Random rand = new 
Random();
-752
-753@Override
-754public ExecuteProceduresResponse 
sendRequest(ServerName server, ExecuteProceduresRequest req)
-755throws IOException {
-756  switch (rand.nextInt(5)) {
-757case 0: throw new 
ServerNotRunningYetException("wait on server startup");
-758case 1: throw new 
SocketTimeoutException("simulate socket timeout");
-759case 2: throw new 
RemoteException("java.io.IOException", "unexpected exception");
-760  }
-761  return super.sendRequest(server, 
req);
-762}
-763
-764@Override
-765protected RegionOpeningState 
execOpenRegion(final ServerName server, RegionOpenInfo openReq)
-766throws IOException {
-767  switch (rand.nextInt(6)) {
-768case 0:
-769  LOG.info("Return OPENED 
response");
-770  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.OPENED);
-771  return 
OpenRegionResponse.RegionOpeningState.OPENED;
-772case 1:
-773  LOG.info("Return transition 
report that OPENED/ALREADY_OPENED response");
-774  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.OPENED);
-775  return 
OpenRegionResponse.RegionOpeningState.ALREADY_OPENED;
-776case 2:
-777  LOG.info("Return transition 
report that FAILED_OPEN/FAILED_OPENING response");
-778  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.FAILED_OPEN);
-779  return 
OpenRegionResponse.RegionOpeningState.FAILED_OPENING;
-780  }
-781  // The procedure on master will 
just hang forever because nothing comes back
-782  // from the RS in this case.
-783  LOG.info("Return null as response; 
means proc stuck so we send in a crash report after a few seconds...");
-784  executor.schedule(new Runnable() 
{
-785@Override
-786public void run() {
-787  LOG.info("Delayed CRASHING of " 
+ server);
-788  doCrash(server);
-789}
-790  }, 5, TimeUnit.SECONDS);
-791  return null;
-792}
-793
-794@Override
-795protected CloseRegionResponse 
execCloseRegion(ServerName server, byte[] regionName)
-796throws IOException {
-797  CloseRegionResponse.Builder resp = 
CloseRegionResponse.newBuilder();
-798  boolean closed = 
rand.nextBoolean();
-799  if (closed) {
-800RegionInfo hri = 
am.getRegionInfo(regionName);
-801sendTransitionReport(server, 
ProtobufUtil.toRegionInfo(hri), TransitionCode.CLOSED);
-802  }
-803  resp.setClosed(closed);
-804  return resp.build();
-805}
-806  }
+727

[07/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
index 74d0207..58257aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
@@ -29,21 +29,16 @@
 021import 
org.apache.yetus.audience.InterfaceAudience;
 022
 023/**
-024 * Used internally signaling failed queue 
of a remote procedure operation.
-025 * Usually happens because no such remote 
server; it is being processed as crashed so it is not
-026 * online at time of RPC. Otherwise, 
something unexpected happened.
-027 */
-028@SuppressWarnings("serial")
-029@InterfaceAudience.Private
-030public class 
FailedRemoteDispatchException extends HBaseIOException {
-031  public FailedRemoteDispatchException() 
{
-032super();
-033  }
-034
-035  public 
FailedRemoteDispatchException(String msg) {
-036super(msg);
-037  }
-038}
+024 * Used internally signaling failed queue 
of a remote procedure
+025 * operation.
+026 */
+027@SuppressWarnings("serial")
+028@InterfaceAudience.Private
+029public class 
FailedRemoteDispatchException extends HBaseIOException {
+030  public 
FailedRemoteDispatchException(String msg) {
+031super(msg);
+032  }
+033}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
index f1b91d0..19e4a1b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
@@ -185,254 +185,251 @@
 177  public void remoteCallFailed(final 
MasterProcedureEnv env,
 178  final ServerName serverName, final 
IOException exception) {
 179final RegionStateNode regionNode = 
getRegionState(env);
-180LOG.warn("Remote call failed {}; 
rit={}, exception={}", this, regionNode.getState(),
-181exception.toString());
-182if (remoteCallFailed(env, regionNode, 
exception)) {
-183  // NOTE: This call to wakeEvent 
puts this Procedure back on the scheduler.
-184  // Thereafter, another Worker can 
be in here so DO NOT MESS WITH STATE beyond
-185  // this method. Just get out of 
this current processing quickly.
-186  
regionNode.getProcedureEvent().wake(env.getProcedureScheduler());
-187}
-188// else leave the procedure in 
suspended state; it is waiting on another call to this callback
-189  }
-190
-191  /**
-192   * Be careful! At the end of this 
method, the procedure has either succeeded
-193   * and this procedure has been set into 
a suspended state OR, we failed and
-194   * this procedure has been put back on 
the scheduler ready for another worker
-195   * to pick it up. In both cases, we 
need to exit the current Worker processing
-196   * immediately!
-197   * @return True if we successfully 
dispatched the call and false if we failed;
-198   * if failed, we need to roll back any 
setup done for the dispatch.
-199   */
-200  protected boolean 
addToRemoteDispatcher(final MasterProcedureEnv env,
-201  final ServerName targetServer) {
-202assert targetServer == null || 
targetServer.equals(getRegionState(env).getRegionLocation()):
-203  "targetServer=" + targetServer + " 
getRegionLocation=" +
-204
getRegionState(env).getRegionLocation(); // TODO
-205
-206LOG.info("Dispatch " + this + "; " + 
getRegionState(env).toShortString());
+180String msg = exception.getMessage() 
== null? exception.getClass().getSimpleName():
+181  exception.getMessage();
+182LOG.warn("Remote call failed " + this 
+ "; " + regionNode.toShortString() +
+183  "; exception=" + msg);
+184if (remoteCallFailed(env, regionNode, 
exception)) {
+185  // NOTE: This call to wakeEvent 
puts this Procedure back on the scheduler.
+186  // Thereafter, another Worker can 
be in here so DO NOT MESS WITH STATE beyond
+187  // this method. Just get out of 
this current processing quickly.
+188  
regionNode.getProcedureEvent().wake(env.getProcedureScheduler());
+189}
+190// else leave the procedure in 
suspended state; it is waiting on another call to this callback
+191  }
+192
+193  /**
+194   * Be careful! At the end of this 

[18/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/org/apache/hadoop/hbase/filter/BinaryComparator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/BinaryComparator.html 
b/devapidocs/org/apache/hadoop/hbase/filter/BinaryComparator.html
index 4d158b6..1a5133a 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/BinaryComparator.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/BinaryComparator.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class BinaryComparator
+public class BinaryComparator
 extends ByteArrayComparable
 A binary comparator which lexicographically compares 
against the specified
  byte array using Bytes.compareTo(byte[],
 byte[]).
@@ -243,7 +243,7 @@ extends 
 
 BinaryComparator
-publicBinaryComparator(byte[]value)
+publicBinaryComparator(byte[]value)
 Constructor
 
 Parameters:
@@ -265,7 +265,7 @@ extends 
 
 compareTo
-publicintcompareTo(byte[]value,
+publicintcompareTo(byte[]value,
  intoffset,
  intlength)
 Description copied from 
class:ByteArrayComparable
@@ -290,7 +290,7 @@ extends 
 
 compareTo
-publicintcompareTo(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffervalue,
+publicintcompareTo(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffervalue,
  intoffset,
  intlength)
 Description copied from 
class:ByteArrayComparable
@@ -314,7 +314,7 @@ extends 
 
 toByteArray
-publicbyte[]toByteArray()
+publicbyte[]toByteArray()
 
 Specified by:
 toByteArrayin
 classByteArrayComparable
@@ -329,7 +329,7 @@ extends 
 
 parseFrom
-public staticBinaryComparatorparseFrom(byte[]pbBytes)
+public staticBinaryComparatorparseFrom(byte[]pbBytes)
   throws DeserializationException
 
 Parameters:
@@ -349,7 +349,7 @@ extends 
 
 areSerializedFieldsEqual
-booleanareSerializedFieldsEqual(ByteArrayComparableother)
+booleanareSerializedFieldsEqual(ByteArrayComparableother)
 
 Overrides:
 areSerializedFieldsEqualin
 classByteArrayComparable

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.html 
b/devapidocs/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.html
index c9b9bf6..ad8084b 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class BinaryPrefixComparator
+public class BinaryPrefixComparator
 extends ByteArrayComparable
 A comparator which compares against a specified byte array, 
but only compares
  up to the length of this byte array. For the rest it is similar to
@@ -240,7 +240,7 @@ extends 
 
 BinaryPrefixComparator
-publicBinaryPrefixComparator(byte[]value)
+publicBinaryPrefixComparator(byte[]value)
 Constructor
 
 Parameters:
@@ -262,7 +262,7 @@ extends 
 
 compareTo
-publicintcompareTo(byte[]value,
+publicintcompareTo(byte[]value,
  intoffset,
  intlength)
 Description copied from 
class:ByteArrayComparable
@@ -287,7 +287,7 @@ extends 
 
 compareTo
-publicintcompareTo(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffervalue,
+publicintcompareTo(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffervalue,
  intoffset,
  intlength)
 Description copied from 
class:ByteArrayComparable
@@ -311,7 +311,7 @@ extends 
 
 toByteArray
-publicbyte[]toByteArray()
+publicbyte[]toByteArray()
 
 Specified by:
 toByteArrayin
 classByteArrayComparable
@@ -326,7 +326,7 @@ extends 
 
 parseFrom
-public staticBinaryPrefixComparatorparseFrom(byte[]pbBytes)
+public staticBinaryPrefixComparatorparseFrom(byte[]pbBytes)
 throws DeserializationException
 
 Parameters:
@@ -346,7 +346,7 @@ extends 
 
 areSerializedFieldsEqual
-booleanareSerializedFieldsEqual(ByteArrayComparableother)
+booleanareSerializedFieldsEqual(ByteArrayComparableother)
 
 Overrides:
 areSerializedFieldsEqualin
 classByteArrayComparable

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/org/apache/hadoop/hbase/filter/BitComparator.BitwiseOp.html
--
diff --git 

[15/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/src-html/org/apache/hadoop/hbase/filter/BigDecimalComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/BigDecimalComparator.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/BigDecimalComparator.html
index d327c65..a39fe8b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/BigDecimalComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/BigDecimalComparator.html
@@ -44,84 +44,85 @@
 036 * A BigDecimal comparator which 
numerical compares against the specified byte array
 037 */
 038@InterfaceAudience.Public
-039public class BigDecimalComparator extends 
ByteArrayComparable {
-040  private BigDecimal bigDecimal;
-041
-042  public BigDecimalComparator(BigDecimal 
value) {
-043super(Bytes.toBytes(value));
-044this.bigDecimal = value;
-045  }
-046
-047  @Override
-048  public boolean equals(Object obj) {
-049if (obj == null || !(obj instanceof 
BigDecimalComparator)) {
-050  return false;
-051}
-052if (this == obj) {
-053  return true;
-054}
-055BigDecimalComparator bdc = 
(BigDecimalComparator) obj;
-056return 
this.bigDecimal.equals(bdc.bigDecimal);
-057  }
-058
-059  @Override
-060  public int hashCode() {
-061return 
Objects.hash(this.bigDecimal);
-062  }
-063
-064  @Override
-065  public int compareTo(byte[] value, int 
offset, int length) {
-066BigDecimal that = 
Bytes.toBigDecimal(value, offset, length);
-067return 
this.bigDecimal.compareTo(that);
-068  }
-069
-070  @Override
-071  public int compareTo(ByteBuffer value, 
int offset, int length) {
-072BigDecimal that = 
ByteBufferUtils.toBigDecimal(value, offset, length);
-073return 
this.bigDecimal.compareTo(that);
-074  }
-075
-076  /**
-077   * @return The comparator serialized 
using pb
-078   */
-079  @Override
-080  public byte[] toByteArray() {
-081
ComparatorProtos.BigDecimalComparator.Builder builder =
-082
ComparatorProtos.BigDecimalComparator.newBuilder();
-083
builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value));
-084return 
builder.build().toByteArray();
-085  }
-086
-087  /**
-088   * @param pbBytes A pb serialized 
{@link BigDecimalComparator} instance
-089   * @return An instance of {@link 
BigDecimalComparator} made from codebytes/code
-090   * @throws DeserializationException A 
deserialization exception
-091   * @see #toByteArray
-092   */
-093  public static BigDecimalComparator 
parseFrom(final byte[] pbBytes)
-094  throws DeserializationException {
-095ComparatorProtos.BigDecimalComparator 
proto;
-096try {
-097  proto = 
ComparatorProtos.BigDecimalComparator.parseFrom(pbBytes);
-098} catch 
(InvalidProtocolBufferException e) {
-099  throw new 
DeserializationException(e);
-100}
-101return new 
BigDecimalComparator(Bytes.toBigDecimal(proto.getComparable().getValue()
-102.toByteArray()));
-103  }
-104
-105  /**
-106   * @param other the other comparator
-107   * @return true if and only if the 
fields of the comparator that are serialized are equal to the
-108   * corresponding fields in 
other. Used for testing.
-109   */
-110  boolean 
areSerializedFieldsEqual(BigDecimalComparator other) {
-111if (other == this) {
-112  return true;
-113}
-114return 
super.areSerializedFieldsEqual(other);
-115  }
-116}
+039@SuppressWarnings("ComparableType") // 
Should this move to Comparator usage?
+040public class BigDecimalComparator extends 
ByteArrayComparable {
+041  private BigDecimal bigDecimal;
+042
+043  public BigDecimalComparator(BigDecimal 
value) {
+044super(Bytes.toBytes(value));
+045this.bigDecimal = value;
+046  }
+047
+048  @Override
+049  public boolean equals(Object obj) {
+050if (obj == null || !(obj instanceof 
BigDecimalComparator)) {
+051  return false;
+052}
+053if (this == obj) {
+054  return true;
+055}
+056BigDecimalComparator bdc = 
(BigDecimalComparator) obj;
+057return 
this.bigDecimal.equals(bdc.bigDecimal);
+058  }
+059
+060  @Override
+061  public int hashCode() {
+062return 
Objects.hash(this.bigDecimal);
+063  }
+064
+065  @Override
+066  public int compareTo(byte[] value, int 
offset, int length) {
+067BigDecimal that = 
Bytes.toBigDecimal(value, offset, length);
+068return 
this.bigDecimal.compareTo(that);
+069  }
+070
+071  @Override
+072  public int compareTo(ByteBuffer value, 
int offset, int length) {
+073BigDecimal that = 
ByteBufferUtils.toBigDecimal(value, offset, length);
+074return 
this.bigDecimal.compareTo(that);
+075  }
+076
+077  /**
+078   * @return The comparator serialized 
using pb
+079   */
+080  @Override
+081  public byte[] toByteArray() {
+082
ComparatorProtos.BigDecimalComparator.Builder builder =
+083

[25/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/e9a81b89
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/e9a81b89
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/e9a81b89

Branch: refs/heads/asf-site
Commit: e9a81b8991a570bfb86b17e28ea4dddced41aee0
Parents: 9d2d5a5
Author: jenkins 
Authored: Thu Mar 8 14:53:37 2018 +
Committer: jenkins 
Committed: Thu Mar 8 14:53:37 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 6 +-
 .../hbase/filter/BigDecimalComparator.html  |16 +-
 .../hadoop/hbase/filter/BinaryComparator.html   |12 +-
 .../hbase/filter/BinaryPrefixComparator.html|12 +-
 .../hbase/filter/BitComparator.BitwiseOp.html   |12 +-
 .../hadoop/hbase/filter/BitComparator.html  |16 +-
 .../hbase/filter/ByteArrayComparable.html   |16 +-
 .../hadoop/hbase/filter/LongComparator.html |12 +-
 .../hadoop/hbase/filter/NullComparator.html |18 +-
 .../RegexStringComparator.EngineType.html   |10 +-
 .../hbase/filter/RegexStringComparator.html |18 +-
 .../hbase/filter/SubstringComparator.html   |12 +-
 .../org/apache/hadoop/hbase/ServerLoad.html | 2 +-
 .../apache/hadoop/hbase/client/Mutation.html| 2 +-
 .../hbase/filter/BigDecimalComparator.html  |   157 +-
 .../hadoop/hbase/filter/BinaryComparator.html   |   121 +-
 .../hbase/filter/BinaryPrefixComparator.html|   131 +-
 .../hbase/filter/BitComparator.BitwiseOp.html   |   251 +-
 .../hadoop/hbase/filter/BitComparator.html  |   251 +-
 .../hbase/filter/ByteArrayComparable.html   |   155 +-
 .../hadoop/hbase/filter/LongComparator.html |   117 +-
 .../hadoop/hbase/filter/NullComparator.html |   145 +-
 .../RegexStringComparator.EngineType.html   |   695 +-
 .../hbase/filter/RegexStringComparator.html |   695 +-
 .../hbase/filter/SubstringComparator.html   |   137 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 38268 -
 checkstyle.rss  |22 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html |   149 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/index-all.html   | 7 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../hbase/class-use/HBaseIOException.html   | 3 +-
 .../hadoop/hbase/class-use/ServerName.html  | 2 +-
 .../hadoop/hbase/client/package-tree.html   |24 +-
 .../hadoop/hbase/errorprone/AlwaysPasses.html   | 4 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hbase/filter/BigDecimalComparator.html  |20 +-
 .../hadoop/hbase/filter/BinaryComparator.html   |14 +-
 .../hbase/filter/BinaryPrefixComparator.html|14 +-
 .../hbase/filter/BitComparator.BitwiseOp.html   |12 +-
 .../hadoop/hbase/filter/BitComparator.html  |18 +-
 .../hbase/filter/ByteArrayComparable.html   |20 +-
 .../hadoop/hbase/filter/LongComparator.html |16 +-
 .../hadoop/hbase/filter/NullComparator.html |20 +-
 .../filter/RegexStringComparator.Engine.html|14 +-
 .../RegexStringComparator.EngineType.html   |10 +-
 .../RegexStringComparator.JavaRegexEngine.html  |20 +-
 .../RegexStringComparator.JoniRegexEngine.html  |28 +-
 .../hbase/filter/RegexStringComparator.html |26 +-
 .../hbase/filter/SubstringComparator.html   |16 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../hadoop/hbase/master/ServerManager.html  |60 +-
 .../FailedRemoteDispatchException.html  |21 +-
 .../assignment/RegionTransitionProcedure.html   |34 +-
 .../master/assignment/UnassignProcedure.html|29 +-
 .../class-use/RegionStates.RegionStateNode.html |21 +-
 .../master/assignment/package-summary.html  | 3 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../procedure/class-use/MasterProcedureEnv.html |43 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |18 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 6 +-
 

[17/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html 
b/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html
index 1911d3e..a667e8b 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class RegexStringComparator
+public class RegexStringComparator
 extends ByteArrayComparable
 This comparator is for use with CompareFilter 
implementations, such
  as RowFilter, QualifierFilter, and ValueFilter, 
for
@@ -346,7 +346,7 @@ extends 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -355,7 +355,7 @@ extends 
 
 engine
-privateRegexStringComparator.Engine engine
+privateRegexStringComparator.Engine engine
 
 
 
@@ -372,7 +372,7 @@ extends 
 
 RegexStringComparator
-publicRegexStringComparator(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringexpr)
+publicRegexStringComparator(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringexpr)
 Constructor
  Adds Pattern.DOTALL to the underlying Pattern
 
@@ -387,7 +387,7 @@ extends 
 
 RegexStringComparator
-publicRegexStringComparator(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringexpr,
+publicRegexStringComparator(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringexpr,
  RegexStringComparator.EngineTypeengine)
 Constructor
  Adds Pattern.DOTALL to the underlying Pattern
@@ -404,7 +404,7 @@ extends 
 
 RegexStringComparator
-publicRegexStringComparator(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringexpr,
+publicRegexStringComparator(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringexpr,
  intflags)
 Constructor
 
@@ -420,7 +420,7 @@ extends 
 
 RegexStringComparator
-publicRegexStringComparator(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringexpr,
+publicRegexStringComparator(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringexpr,
  intflags,
  RegexStringComparator.EngineTypeengine)
 Constructor
@@ -446,7 +446,7 @@ extends 
 
 setCharset
-publicvoidsetCharset(https://docs.oracle.com/javase/8/docs/api/java/nio/charset/Charset.html?is-external=true;
 title="class or interface in java.nio.charset">Charsetcharset)
+publicvoidsetCharset(https://docs.oracle.com/javase/8/docs/api/java/nio/charset/Charset.html?is-external=true;
 title="class or interface in java.nio.charset">Charsetcharset)
 Specifies the https://docs.oracle.com/javase/8/docs/api/java/nio/charset/Charset.html?is-external=true;
 title="class or interface in java.nio.charset">Charset to use 
to convert the row key to a String.
  
  The row key needs to be converted to a String in order to be matched
@@ -467,7 +467,7 @@ extends 
 
 compareTo
-publicintcompareTo(byte[]value,
+publicintcompareTo(byte[]value,
  intoffset,
  intlength)
 Description copied from 
class:ByteArrayComparable
@@ -492,7 +492,7 @@ extends 
 
 toByteArray
-publicbyte[]toByteArray()
+publicbyte[]toByteArray()
 
 Specified by:
 toByteArrayin
 classByteArrayComparable
@@ -507,7 +507,7 @@ extends 
 
 parseFrom
-public staticRegexStringComparatorparseFrom(byte[]pbBytes)
+public staticRegexStringComparatorparseFrom(byte[]pbBytes)
throws DeserializationException
 
 Parameters:
@@ -527,7 +527,7 @@ extends 
 
 areSerializedFieldsEqual
-booleanareSerializedFieldsEqual(ByteArrayComparableother)
+booleanareSerializedFieldsEqual(ByteArrayComparableother)
 
 Overrides:
 areSerializedFieldsEqualin
 classByteArrayComparable
@@ -545,7 +545,7 @@ extends 
 
 getEngine
-RegexStringComparator.EnginegetEngine()
+RegexStringComparator.EnginegetEngine()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/org/apache/hadoop/hbase/filter/SubstringComparator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/SubstringComparator.html 

[11/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.JavaRegexEngine.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.JavaRegexEngine.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.JavaRegexEngine.html
index ce6ff26..577439c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.JavaRegexEngine.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.JavaRegexEngine.html
@@ -77,353 +77,354 @@
 069 * @see java.util.regex.Pattern
 070 */
 071@InterfaceAudience.Public
-072public class RegexStringComparator 
extends ByteArrayComparable {
-073
-074  private static final Logger LOG = 
LoggerFactory.getLogger(RegexStringComparator.class);
-075
-076  private Engine engine;
-077
-078  /** Engine implementation type 
(default=JAVA) */
-079  @InterfaceAudience.Public
-080  public enum EngineType {
-081JAVA,
-082JONI
-083  }
-084
-085  /**
-086   * Constructor
-087   * Adds Pattern.DOTALL to the 
underlying Pattern
-088   * @param expr a valid regular 
expression
-089   */
-090  public RegexStringComparator(String 
expr) {
-091this(expr, Pattern.DOTALL);
-092  }
-093
-094  /**
-095   * Constructor
-096   * Adds Pattern.DOTALL to the 
underlying Pattern
-097   * @param expr a valid regular 
expression
-098   * @param engine engine implementation 
type
-099   */
-100  public RegexStringComparator(String 
expr, EngineType engine) {
-101this(expr, Pattern.DOTALL, engine);
-102  }
-103
-104  /**
-105   * Constructor
-106   * @param expr a valid regular 
expression
-107   * @param flags java.util.regex.Pattern 
flags
-108   */
-109  public RegexStringComparator(String 
expr, int flags) {
-110this(expr, flags, EngineType.JAVA);
-111  }
-112
-113  /**
-114   * Constructor
-115   * @param expr a valid regular 
expression
-116   * @param flags java.util.regex.Pattern 
flags
-117   * @param engine engine implementation 
type
-118   */
-119  public RegexStringComparator(String 
expr, int flags, EngineType engine) {
-120super(Bytes.toBytes(expr));
-121switch (engine) {
-122  case JAVA:
-123this.engine = new 
JavaRegexEngine(expr, flags);
-124break;
-125  case JONI:
-126this.engine = new 
JoniRegexEngine(expr, flags);
-127break;
-128}
-129  }
-130
-131  /**
-132   * Specifies the {@link Charset} to use 
to convert the row key to a String.
-133   * p
-134   * The row key needs to be converted to 
a String in order to be matched
-135   * against the regular expression.  
This method controls which charset is
-136   * used to do this conversion.
-137   * p
-138   * If the row key is made of arbitrary 
bytes, the charset {@code ISO-8859-1}
-139   * is recommended.
-140   * @param charset The charset to use.
-141   */
-142  public void setCharset(final Charset 
charset) {
-143engine.setCharset(charset.name());
-144  }
-145
-146  @Override
-147  public int compareTo(byte[] value, int 
offset, int length) {
-148return engine.compareTo(value, 
offset, length);
-149  }
-150
-151  /**
-152   * @return The comparator serialized 
using pb
-153   */
-154  @Override
-155  public byte [] toByteArray() {
-156return engine.toByteArray();
-157  }
-158
-159  /**
-160   * @param pbBytes A pb serialized 
{@link RegexStringComparator} instance
-161   * @return An instance of {@link 
RegexStringComparator} made from codebytes/code
-162   * @throws DeserializationException
-163   * @see #toByteArray
-164   */
-165  public static RegexStringComparator 
parseFrom(final byte [] pbBytes)
-166  throws DeserializationException {
-167
ComparatorProtos.RegexStringComparator proto;
-168try {
-169  proto = 
ComparatorProtos.RegexStringComparator.parseFrom(pbBytes);
-170} catch 
(InvalidProtocolBufferException e) {
-171  throw new 
DeserializationException(e);
-172}
-173RegexStringComparator comparator;
-174if (proto.hasEngine()) {
-175  EngineType engine = 
EngineType.valueOf(proto.getEngine());
-176  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags(),
-177engine);
-178} else {
-179  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
-180}
-181String charset = 
proto.getCharset();
-182if (charset.length()  0) {
-183  try {
-184
comparator.getEngine().setCharset(charset);
-185  } catch 
(IllegalCharsetNameException e) {
-186LOG.error("invalid charset", 
e);
-187  }
-188}
-189return comparator;
-190  }
-191
-192  /**
-193   * @param other
-194   * @return true if and only if the 
fields of the comparator that are serialized
-195   * are equal to the corresponding 
fields in other.  Used for testing.
-196   */
-197  @Override

[05/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 7f5135a..a514d28 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -653,10 +653,10 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.regionserver.TestRegionServerReadRequestMetrics.Metric
-org.apache.hadoop.hbase.regionserver.TestMultiLogThreshold.ActionType
 org.apache.hadoop.hbase.regionserver.TestAtomicOperation.TestStep
 org.apache.hadoop.hbase.regionserver.TestCacheOnWriteInSchema.CacheOnWriteType
 org.apache.hadoop.hbase.regionserver.DataBlockEncodingTool.Manipulation
+org.apache.hadoop.hbase.regionserver.TestMultiLogThreshold.ActionType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
index 5936719..fbeea17 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
@@ -253,10 +253,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.test.IntegrationTestWithCellVisibilityLoadAndVerify.Counters
-org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Generator.Counts
-org.apache.hadoop.hbase.test.IntegrationTestLoadAndVerify.Counters
 org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Verify.Counts
+org.apache.hadoop.hbase.test.IntegrationTestLoadAndVerify.Counters
+org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Generator.Counts
+org.apache.hadoop.hbase.test.IntegrationTestWithCellVisibilityLoadAndVerify.Counters
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
index ea95008..cb85ca6 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
@@ -138,9 +138,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.wal.TestWALSplit.Corruptions
-org.apache.hadoop.hbase.wal.FaultyFSLog.FailureType
 org.apache.hadoop.hbase.wal.IOTestProvider.AllowedOperations
+org.apache.hadoop.hbase.wal.FaultyFSLog.FailureType
+org.apache.hadoop.hbase.wal.TestWALSplit.Corruptions
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/overview-tree.html
--
diff --git a/testdevapidocs/overview-tree.html 
b/testdevapidocs/overview-tree.html
index 3a0da67..9fb9a6b 100644
--- a/testdevapidocs/overview-tree.html
+++ b/testdevapidocs/overview-tree.html
@@ -3409,6 +3409,7 @@
 org.apache.hadoop.hbase.util.TestShowProperties
 org.apache.hadoop.hbase.master.TestShutdownBackupMaster
 org.apache.hadoop.hbase.regionserver.TestShutdownWhileWALBroken
+org.apache.hadoop.hbase.master.TestShutdownWithNoRegionServer
 org.apache.hadoop.hbase.util.TestSimpleMutableByteRange
 org.apache.hadoop.hbase.util.TestSimplePositionedMutableByteRange
 

[01/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 9d2d5a5c9 -> e9a81b899


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.ServerNotYetRunningRsExecutor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.ServerNotYetRunningRsExecutor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.ServerNotYetRunningRsExecutor.html
index 507c0cd..d27f392 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.ServerNotYetRunningRsExecutor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.ServerNotYetRunningRsExecutor.html
@@ -720,7 +720,7 @@
 712  }
 713
 714  private class 
HangOnCloseThenRSCrashExecutor extends GoodRsExecutor {
-715public static final int 
TYPES_OF_FAILURE = 7;
+715public static final int 
TYPES_OF_FAILURE = 6;
 716private int invocations;
 717
 718@Override
@@ -732,129 +732,121 @@
 724  case 2: throw new 
RegionServerStoppedException("Fake!");
 725  case 3: throw new 
ServerNotRunningYetException("Fake!");
 726  case 4:
-727// We will expire the server that 
we failed to rpc against.
-728throw new 
FailedRemoteDispatchException("Fake!");
-729  case 5:
-730// Mark this regionserver as 
already expiring so we go different code route; i.e. we
-731// FAIL to expire the remote 
server and presume ok to move region to CLOSED. HBASE-20137.
-732
TestAssignmentManager.this.master.getServerManager().expireServer(server);
-733throw new 
FailedRemoteDispatchException("Fake!");
-734  case 6:
-735LOG.info("Return null response 
from serverName=" + server + "; means STUCK...TODO timeout");
-736executor.schedule(new Runnable() 
{
-737  @Override
-738  public void run() {
-739LOG.info("Sending in CRASH of 
" + server);
-740doCrash(server);
-741  }
-742}, 1, TimeUnit.SECONDS);
-743return null;
-744  default:
-745return 
super.execCloseRegion(server, regionName);
-746  }
-747}
-748  }
-749
-750  private class RandRsExecutor extends 
NoopRsExecutor {
-751private final Random rand = new 
Random();
-752
-753@Override
-754public ExecuteProceduresResponse 
sendRequest(ServerName server, ExecuteProceduresRequest req)
-755throws IOException {
-756  switch (rand.nextInt(5)) {
-757case 0: throw new 
ServerNotRunningYetException("wait on server startup");
-758case 1: throw new 
SocketTimeoutException("simulate socket timeout");
-759case 2: throw new 
RemoteException("java.io.IOException", "unexpected exception");
-760  }
-761  return super.sendRequest(server, 
req);
-762}
-763
-764@Override
-765protected RegionOpeningState 
execOpenRegion(final ServerName server, RegionOpenInfo openReq)
-766throws IOException {
-767  switch (rand.nextInt(6)) {
-768case 0:
-769  LOG.info("Return OPENED 
response");
-770  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.OPENED);
-771  return 
OpenRegionResponse.RegionOpeningState.OPENED;
-772case 1:
-773  LOG.info("Return transition 
report that OPENED/ALREADY_OPENED response");
-774  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.OPENED);
-775  return 
OpenRegionResponse.RegionOpeningState.ALREADY_OPENED;
-776case 2:
-777  LOG.info("Return transition 
report that FAILED_OPEN/FAILED_OPENING response");
-778  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.FAILED_OPEN);
-779  return 
OpenRegionResponse.RegionOpeningState.FAILED_OPENING;
-780  }
-781  // The procedure on master will 
just hang forever because nothing comes back
-782  // from the RS in this case.
-783  LOG.info("Return null as response; 
means proc stuck so we send in a crash report after a few seconds...");
-784  executor.schedule(new Runnable() 
{
-785@Override
-786public void run() {
-787  LOG.info("Delayed CRASHING of " 
+ server);
-788  doCrash(server);
-789}
-790  }, 5, TimeUnit.SECONDS);
-791  return null;
-792}
-793
-794@Override
-795protected CloseRegionResponse 
execCloseRegion(ServerName server, byte[] regionName)
-796throws IOException {
-797  CloseRegionResponse.Builder resp = 
CloseRegionResponse.newBuilder();
-798  boolean closed = 
rand.nextBoolean();
-799  if (closed) {
-800RegionInfo hri = 
am.getRegionInfo(regionName);
-801sendTransitionReport(server, 
ProtobufUtil.toRegionInfo(hri), TransitionCode.CLOSED);
-802 

[22/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/apidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
index ce6ff26..577439c 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
@@ -77,353 +77,354 @@
 069 * @see java.util.regex.Pattern
 070 */
 071@InterfaceAudience.Public
-072public class RegexStringComparator 
extends ByteArrayComparable {
-073
-074  private static final Logger LOG = 
LoggerFactory.getLogger(RegexStringComparator.class);
-075
-076  private Engine engine;
-077
-078  /** Engine implementation type 
(default=JAVA) */
-079  @InterfaceAudience.Public
-080  public enum EngineType {
-081JAVA,
-082JONI
-083  }
-084
-085  /**
-086   * Constructor
-087   * Adds Pattern.DOTALL to the 
underlying Pattern
-088   * @param expr a valid regular 
expression
-089   */
-090  public RegexStringComparator(String 
expr) {
-091this(expr, Pattern.DOTALL);
-092  }
-093
-094  /**
-095   * Constructor
-096   * Adds Pattern.DOTALL to the 
underlying Pattern
-097   * @param expr a valid regular 
expression
-098   * @param engine engine implementation 
type
-099   */
-100  public RegexStringComparator(String 
expr, EngineType engine) {
-101this(expr, Pattern.DOTALL, engine);
-102  }
-103
-104  /**
-105   * Constructor
-106   * @param expr a valid regular 
expression
-107   * @param flags java.util.regex.Pattern 
flags
-108   */
-109  public RegexStringComparator(String 
expr, int flags) {
-110this(expr, flags, EngineType.JAVA);
-111  }
-112
-113  /**
-114   * Constructor
-115   * @param expr a valid regular 
expression
-116   * @param flags java.util.regex.Pattern 
flags
-117   * @param engine engine implementation 
type
-118   */
-119  public RegexStringComparator(String 
expr, int flags, EngineType engine) {
-120super(Bytes.toBytes(expr));
-121switch (engine) {
-122  case JAVA:
-123this.engine = new 
JavaRegexEngine(expr, flags);
-124break;
-125  case JONI:
-126this.engine = new 
JoniRegexEngine(expr, flags);
-127break;
-128}
-129  }
-130
-131  /**
-132   * Specifies the {@link Charset} to use 
to convert the row key to a String.
-133   * p
-134   * The row key needs to be converted to 
a String in order to be matched
-135   * against the regular expression.  
This method controls which charset is
-136   * used to do this conversion.
-137   * p
-138   * If the row key is made of arbitrary 
bytes, the charset {@code ISO-8859-1}
-139   * is recommended.
-140   * @param charset The charset to use.
-141   */
-142  public void setCharset(final Charset 
charset) {
-143engine.setCharset(charset.name());
-144  }
-145
-146  @Override
-147  public int compareTo(byte[] value, int 
offset, int length) {
-148return engine.compareTo(value, 
offset, length);
-149  }
-150
-151  /**
-152   * @return The comparator serialized 
using pb
-153   */
-154  @Override
-155  public byte [] toByteArray() {
-156return engine.toByteArray();
-157  }
-158
-159  /**
-160   * @param pbBytes A pb serialized 
{@link RegexStringComparator} instance
-161   * @return An instance of {@link 
RegexStringComparator} made from codebytes/code
-162   * @throws DeserializationException
-163   * @see #toByteArray
-164   */
-165  public static RegexStringComparator 
parseFrom(final byte [] pbBytes)
-166  throws DeserializationException {
-167
ComparatorProtos.RegexStringComparator proto;
-168try {
-169  proto = 
ComparatorProtos.RegexStringComparator.parseFrom(pbBytes);
-170} catch 
(InvalidProtocolBufferException e) {
-171  throw new 
DeserializationException(e);
-172}
-173RegexStringComparator comparator;
-174if (proto.hasEngine()) {
-175  EngineType engine = 
EngineType.valueOf(proto.getEngine());
-176  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags(),
-177engine);
-178} else {
-179  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
-180}
-181String charset = 
proto.getCharset();
-182if (charset.length()  0) {
-183  try {
-184
comparator.getEngine().setCharset(charset);
-185  } catch 
(IllegalCharsetNameException e) {
-186LOG.error("invalid charset", 
e);
-187  }
-188}
-189return comparator;
-190  }
-191
-192  /**
-193   * @param other
-194   * @return true if and only if the 
fields of the comparator that are serialized
-195   * are equal to the corresponding 
fields in other.  Used for testing.
-196   */
-197  @Override
-198  boolean 

[09/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.html
index ce6ff26..577439c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/RegexStringComparator.html
@@ -77,353 +77,354 @@
 069 * @see java.util.regex.Pattern
 070 */
 071@InterfaceAudience.Public
-072public class RegexStringComparator 
extends ByteArrayComparable {
-073
-074  private static final Logger LOG = 
LoggerFactory.getLogger(RegexStringComparator.class);
-075
-076  private Engine engine;
-077
-078  /** Engine implementation type 
(default=JAVA) */
-079  @InterfaceAudience.Public
-080  public enum EngineType {
-081JAVA,
-082JONI
-083  }
-084
-085  /**
-086   * Constructor
-087   * Adds Pattern.DOTALL to the 
underlying Pattern
-088   * @param expr a valid regular 
expression
-089   */
-090  public RegexStringComparator(String 
expr) {
-091this(expr, Pattern.DOTALL);
-092  }
-093
-094  /**
-095   * Constructor
-096   * Adds Pattern.DOTALL to the 
underlying Pattern
-097   * @param expr a valid regular 
expression
-098   * @param engine engine implementation 
type
-099   */
-100  public RegexStringComparator(String 
expr, EngineType engine) {
-101this(expr, Pattern.DOTALL, engine);
-102  }
-103
-104  /**
-105   * Constructor
-106   * @param expr a valid regular 
expression
-107   * @param flags java.util.regex.Pattern 
flags
-108   */
-109  public RegexStringComparator(String 
expr, int flags) {
-110this(expr, flags, EngineType.JAVA);
-111  }
-112
-113  /**
-114   * Constructor
-115   * @param expr a valid regular 
expression
-116   * @param flags java.util.regex.Pattern 
flags
-117   * @param engine engine implementation 
type
-118   */
-119  public RegexStringComparator(String 
expr, int flags, EngineType engine) {
-120super(Bytes.toBytes(expr));
-121switch (engine) {
-122  case JAVA:
-123this.engine = new 
JavaRegexEngine(expr, flags);
-124break;
-125  case JONI:
-126this.engine = new 
JoniRegexEngine(expr, flags);
-127break;
-128}
-129  }
-130
-131  /**
-132   * Specifies the {@link Charset} to use 
to convert the row key to a String.
-133   * p
-134   * The row key needs to be converted to 
a String in order to be matched
-135   * against the regular expression.  
This method controls which charset is
-136   * used to do this conversion.
-137   * p
-138   * If the row key is made of arbitrary 
bytes, the charset {@code ISO-8859-1}
-139   * is recommended.
-140   * @param charset The charset to use.
-141   */
-142  public void setCharset(final Charset 
charset) {
-143engine.setCharset(charset.name());
-144  }
-145
-146  @Override
-147  public int compareTo(byte[] value, int 
offset, int length) {
-148return engine.compareTo(value, 
offset, length);
-149  }
-150
-151  /**
-152   * @return The comparator serialized 
using pb
-153   */
-154  @Override
-155  public byte [] toByteArray() {
-156return engine.toByteArray();
-157  }
-158
-159  /**
-160   * @param pbBytes A pb serialized 
{@link RegexStringComparator} instance
-161   * @return An instance of {@link 
RegexStringComparator} made from codebytes/code
-162   * @throws DeserializationException
-163   * @see #toByteArray
-164   */
-165  public static RegexStringComparator 
parseFrom(final byte [] pbBytes)
-166  throws DeserializationException {
-167
ComparatorProtos.RegexStringComparator proto;
-168try {
-169  proto = 
ComparatorProtos.RegexStringComparator.parseFrom(pbBytes);
-170} catch 
(InvalidProtocolBufferException e) {
-171  throw new 
DeserializationException(e);
-172}
-173RegexStringComparator comparator;
-174if (proto.hasEngine()) {
-175  EngineType engine = 
EngineType.valueOf(proto.getEngine());
-176  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags(),
-177engine);
-178} else {
-179  comparator = new 
RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
-180}
-181String charset = 
proto.getCharset();
-182if (charset.length()  0) {
-183  try {
-184
comparator.getEngine().setCharset(charset);
-185  } catch 
(IllegalCharsetNameException e) {
-186LOG.error("invalid charset", 
e);
-187  }
-188}
-189return comparator;
-190  }
-191
-192  /**
-193   * @param other
-194   * @return true if and only if the 
fields of the comparator that are serialized
-195   * are equal to the corresponding 
fields in other.  Used for testing.
-196   */
-197  @Override
-198  boolean 
areSerializedFieldsEqual(ByteArrayComparable other) {
-199if 

[24/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/apidocs/src-html/org/apache/hadoop/hbase/filter/BigDecimalComparator.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/BigDecimalComparator.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/BigDecimalComparator.html
index d327c65..a39fe8b 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/BigDecimalComparator.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/BigDecimalComparator.html
@@ -44,84 +44,85 @@
 036 * A BigDecimal comparator which 
numerical compares against the specified byte array
 037 */
 038@InterfaceAudience.Public
-039public class BigDecimalComparator extends 
ByteArrayComparable {
-040  private BigDecimal bigDecimal;
-041
-042  public BigDecimalComparator(BigDecimal 
value) {
-043super(Bytes.toBytes(value));
-044this.bigDecimal = value;
-045  }
-046
-047  @Override
-048  public boolean equals(Object obj) {
-049if (obj == null || !(obj instanceof 
BigDecimalComparator)) {
-050  return false;
-051}
-052if (this == obj) {
-053  return true;
-054}
-055BigDecimalComparator bdc = 
(BigDecimalComparator) obj;
-056return 
this.bigDecimal.equals(bdc.bigDecimal);
-057  }
-058
-059  @Override
-060  public int hashCode() {
-061return 
Objects.hash(this.bigDecimal);
-062  }
-063
-064  @Override
-065  public int compareTo(byte[] value, int 
offset, int length) {
-066BigDecimal that = 
Bytes.toBigDecimal(value, offset, length);
-067return 
this.bigDecimal.compareTo(that);
-068  }
-069
-070  @Override
-071  public int compareTo(ByteBuffer value, 
int offset, int length) {
-072BigDecimal that = 
ByteBufferUtils.toBigDecimal(value, offset, length);
-073return 
this.bigDecimal.compareTo(that);
-074  }
-075
-076  /**
-077   * @return The comparator serialized 
using pb
-078   */
-079  @Override
-080  public byte[] toByteArray() {
-081
ComparatorProtos.BigDecimalComparator.Builder builder =
-082
ComparatorProtos.BigDecimalComparator.newBuilder();
-083
builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value));
-084return 
builder.build().toByteArray();
-085  }
-086
-087  /**
-088   * @param pbBytes A pb serialized 
{@link BigDecimalComparator} instance
-089   * @return An instance of {@link 
BigDecimalComparator} made from codebytes/code
-090   * @throws DeserializationException A 
deserialization exception
-091   * @see #toByteArray
-092   */
-093  public static BigDecimalComparator 
parseFrom(final byte[] pbBytes)
-094  throws DeserializationException {
-095ComparatorProtos.BigDecimalComparator 
proto;
-096try {
-097  proto = 
ComparatorProtos.BigDecimalComparator.parseFrom(pbBytes);
-098} catch 
(InvalidProtocolBufferException e) {
-099  throw new 
DeserializationException(e);
-100}
-101return new 
BigDecimalComparator(Bytes.toBigDecimal(proto.getComparable().getValue()
-102.toByteArray()));
-103  }
-104
-105  /**
-106   * @param other the other comparator
-107   * @return true if and only if the 
fields of the comparator that are serialized are equal to the
-108   * corresponding fields in 
other. Used for testing.
-109   */
-110  boolean 
areSerializedFieldsEqual(BigDecimalComparator other) {
-111if (other == this) {
-112  return true;
-113}
-114return 
super.areSerializedFieldsEqual(other);
-115  }
-116}
+039@SuppressWarnings("ComparableType") // 
Should this move to Comparator usage?
+040public class BigDecimalComparator extends 
ByteArrayComparable {
+041  private BigDecimal bigDecimal;
+042
+043  public BigDecimalComparator(BigDecimal 
value) {
+044super(Bytes.toBytes(value));
+045this.bigDecimal = value;
+046  }
+047
+048  @Override
+049  public boolean equals(Object obj) {
+050if (obj == null || !(obj instanceof 
BigDecimalComparator)) {
+051  return false;
+052}
+053if (this == obj) {
+054  return true;
+055}
+056BigDecimalComparator bdc = 
(BigDecimalComparator) obj;
+057return 
this.bigDecimal.equals(bdc.bigDecimal);
+058  }
+059
+060  @Override
+061  public int hashCode() {
+062return 
Objects.hash(this.bigDecimal);
+063  }
+064
+065  @Override
+066  public int compareTo(byte[] value, int 
offset, int length) {
+067BigDecimal that = 
Bytes.toBigDecimal(value, offset, length);
+068return 
this.bigDecimal.compareTo(that);
+069  }
+070
+071  @Override
+072  public int compareTo(ByteBuffer value, 
int offset, int length) {
+073BigDecimal that = 
ByteBufferUtils.toBigDecimal(value, offset, length);
+074return 
this.bigDecimal.compareTo(that);
+075  }
+076
+077  /**
+078   * @return The comparator serialized 
using pb
+079   */
+080  @Override
+081  public byte[] toByteArray() {
+082
ComparatorProtos.BigDecimalComparator.Builder builder =
+083
ComparatorProtos.BigDecimalComparator.newBuilder();
+084

[23/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/apidocs/src-html/org/apache/hadoop/hbase/filter/ByteArrayComparable.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/ByteArrayComparable.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/ByteArrayComparable.html
index da5d73e..5afdb22 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/ByteArrayComparable.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/ByteArrayComparable.html
@@ -40,83 +40,84 @@
 032// adding special compareTo methods. We 
have to clean it. Deprecate this class and replace it
 033// with a more generic one which says it 
compares bytes (not necessary a byte array only)
 034// BytesComparable implements 
ComparableByte will work?
-035public abstract class ByteArrayComparable 
implements Comparablebyte[] {
-036
-037  byte[] value;
-038
-039  /**
-040   * Constructor.
-041   * @param value the value to compare 
against
-042   */
-043  public ByteArrayComparable(byte [] 
value) {
-044this.value = value;
-045  }
-046
-047  public byte[] getValue() {
-048return value;
-049  }
-050
-051  /**
-052   * @return The comparator serialized 
using pb
-053   */
-054  public abstract byte [] 
toByteArray();
-055
-056  /**
-057   * @param pbBytes A pb serialized 
{@link ByteArrayComparable} instance
-058   * @return An instance of {@link 
ByteArrayComparable} made from codebytes/code
-059   * @throws DeserializationException
-060   * @see #toByteArray
-061   */
-062  public static ByteArrayComparable 
parseFrom(final byte [] pbBytes)
-063  throws DeserializationException {
-064throw new DeserializationException(
-065  "parseFrom called on base 
ByteArrayComparable, but should be called on derived type");
-066  }
-067
-068  /**
-069   * @param other
-070   * @return true if and only if the 
fields of the comparator that are serialized
-071   * are equal to the corresponding 
fields in other.  Used for testing.
-072   */
-073  boolean 
areSerializedFieldsEqual(ByteArrayComparable other) {
-074if (other == this) return true;
-075
-076return Bytes.equals(this.getValue(), 
other.getValue());
-077  }
-078
-079  @Override
-080  public int compareTo(byte [] value) {
-081return compareTo(value, 0, 
value.length);
-082  }
-083
-084  /**
-085   * Special compareTo method for 
subclasses, to avoid
-086   * copying byte[] unnecessarily.
-087   * @param value byte[] to compare
-088   * @param offset offset into value
-089   * @param length number of bytes to 
compare
-090   * @return a negative integer, zero, or 
a positive integer as this object
-091   * is less than, equal to, or 
greater than the specified object.
-092   */
-093  public abstract int compareTo(byte [] 
value, int offset, int length);
-094
-095  /**
-096   * Special compareTo method for 
subclasses, to avoid copying bytes unnecessarily.
-097   * @param value bytes to compare within 
a ByteBuffer
-098   * @param offset offset into value
-099   * @param length number of bytes to 
compare
-100   * @return a negative integer, zero, or 
a positive integer as this object
-101   * is less than, equal to, or 
greater than the specified object.
-102   */
-103  public int compareTo(ByteBuffer value, 
int offset, int length) {
-104// For BC, providing a default 
implementation here which is doing a bytes copy to a temp byte[]
-105// and calling compareTo(byte[]). 
Make sure to override this method in subclasses to avoid
-106// copying bytes unnecessarily.
-107byte[] temp = new byte[length];
-108
ByteBufferUtils.copyFromBufferToArray(temp, value, offset, 0, length);
-109return compareTo(temp);
-110  }
-111}
+035@SuppressWarnings("ComparableType") // 
Should this move to Comparator usage?
+036public abstract class ByteArrayComparable 
implements Comparablebyte[] {
+037
+038  byte[] value;
+039
+040  /**
+041   * Constructor.
+042   * @param value the value to compare 
against
+043   */
+044  public ByteArrayComparable(byte [] 
value) {
+045this.value = value;
+046  }
+047
+048  public byte[] getValue() {
+049return value;
+050  }
+051
+052  /**
+053   * @return The comparator serialized 
using pb
+054   */
+055  public abstract byte [] 
toByteArray();
+056
+057  /**
+058   * @param pbBytes A pb serialized 
{@link ByteArrayComparable} instance
+059   * @return An instance of {@link 
ByteArrayComparable} made from codebytes/code
+060   * @throws DeserializationException
+061   * @see #toByteArray
+062   */
+063  public static ByteArrayComparable 
parseFrom(final byte [] pbBytes)
+064  throws DeserializationException {
+065throw new DeserializationException(
+066  "parseFrom called on base 
ByteArrayComparable, but should be called on derived type");
+067  }
+068
+069  /**
+070   * @param other
+071   * @return true if and only if the 
fields of the comparator that are serialized
+072   * are equal to the 

[04/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.GoodRsExecutor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.GoodRsExecutor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.GoodRsExecutor.html
index 507c0cd..d27f392 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.GoodRsExecutor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.GoodRsExecutor.html
@@ -720,7 +720,7 @@
 712  }
 713
 714  private class 
HangOnCloseThenRSCrashExecutor extends GoodRsExecutor {
-715public static final int 
TYPES_OF_FAILURE = 7;
+715public static final int 
TYPES_OF_FAILURE = 6;
 716private int invocations;
 717
 718@Override
@@ -732,129 +732,121 @@
 724  case 2: throw new 
RegionServerStoppedException("Fake!");
 725  case 3: throw new 
ServerNotRunningYetException("Fake!");
 726  case 4:
-727// We will expire the server that 
we failed to rpc against.
-728throw new 
FailedRemoteDispatchException("Fake!");
-729  case 5:
-730// Mark this regionserver as 
already expiring so we go different code route; i.e. we
-731// FAIL to expire the remote 
server and presume ok to move region to CLOSED. HBASE-20137.
-732
TestAssignmentManager.this.master.getServerManager().expireServer(server);
-733throw new 
FailedRemoteDispatchException("Fake!");
-734  case 6:
-735LOG.info("Return null response 
from serverName=" + server + "; means STUCK...TODO timeout");
-736executor.schedule(new Runnable() 
{
-737  @Override
-738  public void run() {
-739LOG.info("Sending in CRASH of 
" + server);
-740doCrash(server);
-741  }
-742}, 1, TimeUnit.SECONDS);
-743return null;
-744  default:
-745return 
super.execCloseRegion(server, regionName);
-746  }
-747}
-748  }
-749
-750  private class RandRsExecutor extends 
NoopRsExecutor {
-751private final Random rand = new 
Random();
-752
-753@Override
-754public ExecuteProceduresResponse 
sendRequest(ServerName server, ExecuteProceduresRequest req)
-755throws IOException {
-756  switch (rand.nextInt(5)) {
-757case 0: throw new 
ServerNotRunningYetException("wait on server startup");
-758case 1: throw new 
SocketTimeoutException("simulate socket timeout");
-759case 2: throw new 
RemoteException("java.io.IOException", "unexpected exception");
-760  }
-761  return super.sendRequest(server, 
req);
-762}
-763
-764@Override
-765protected RegionOpeningState 
execOpenRegion(final ServerName server, RegionOpenInfo openReq)
-766throws IOException {
-767  switch (rand.nextInt(6)) {
-768case 0:
-769  LOG.info("Return OPENED 
response");
-770  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.OPENED);
-771  return 
OpenRegionResponse.RegionOpeningState.OPENED;
-772case 1:
-773  LOG.info("Return transition 
report that OPENED/ALREADY_OPENED response");
-774  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.OPENED);
-775  return 
OpenRegionResponse.RegionOpeningState.ALREADY_OPENED;
-776case 2:
-777  LOG.info("Return transition 
report that FAILED_OPEN/FAILED_OPENING response");
-778  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.FAILED_OPEN);
-779  return 
OpenRegionResponse.RegionOpeningState.FAILED_OPENING;
-780  }
-781  // The procedure on master will 
just hang forever because nothing comes back
-782  // from the RS in this case.
-783  LOG.info("Return null as response; 
means proc stuck so we send in a crash report after a few seconds...");
-784  executor.schedule(new Runnable() 
{
-785@Override
-786public void run() {
-787  LOG.info("Delayed CRASHING of " 
+ server);
-788  doCrash(server);
-789}
-790  }, 5, TimeUnit.SECONDS);
-791  return null;
-792}
-793
-794@Override
-795protected CloseRegionResponse 
execCloseRegion(ServerName server, byte[] regionName)
-796throws IOException {
-797  CloseRegionResponse.Builder resp = 
CloseRegionResponse.newBuilder();
-798  boolean closed = 
rand.nextBoolean();
-799  if (closed) {
-800RegionInfo hri = 
am.getRegionInfo(regionName);
-801sendTransitionReport(server, 
ProtobufUtil.toRegionInfo(hri), TransitionCode.CLOSED);
-802  }
-803  resp.setClosed(closed);
-804  return resp.build();
-805}
-806  }
+727LOG.info("Return null response 
from serverName=" + server + "; 

[08/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
index b73a30b..8f8a8ff 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
@@ -563,503 +563,504 @@
 555  }
 556
 557  /*
-558   * Expire the passed server. Add it to 
list of dead servers and queue a shutdown processing.
-559   * @return True if we expired passed 
codeserverName/code else false if we failed to schedule
-560   * an expire (and attendant 
ServerCrashProcedure -- some clients are dependent on
-561   * server crash procedure being queued 
and need to know if has not been queued).
-562   */
-563  public synchronized boolean 
expireServer(final ServerName serverName) {
-564if 
(serverName.equals(master.getServerName())) {
-565  if (!(master.isAborted() || 
master.isStopped())) {
-566master.stop("We lost our 
znode?");
-567  }
-568  return false;
-569}
-570if 
(!master.isServerCrashProcessingEnabled()) {
-571  LOG.info("Master doesn't enable 
ServerShutdownHandler during initialization, "
-572  + "delay expiring server " + 
serverName);
-573  // Even we delay expire this 
server, we still need to handle Meta's RIT
-574  // that are against the crashed 
server; since when we do RecoverMetaProcedure,
-575  // the SCP is not enable yet and 
Meta's RIT may be suspend forever. See HBase-19287
-576  
master.getAssignmentManager().handleMetaRITOnCrashedServer(serverName);
-577  
this.queuedDeadServers.add(serverName);
-578  return false;
-579}
-580if 
(this.deadservers.isDeadServer(serverName)) {
-581  // TODO: Can this happen?  It 
shouldn't be online in this case?
-582  LOG.warn("Expiration of " + 
serverName +
-583  " but server shutdown already 
in progress");
-584  return false;
-585}
-586
moveFromOnlineToDeadServers(serverName);
-587
-588// If cluster is going down, yes, 
servers are going to be expiring; don't
-589// process as a dead server
-590if (isClusterShutdown()) {
-591  LOG.info("Cluster shutdown set; " + 
serverName +
-592" expired; onlineServers=" + 
this.onlineServers.size());
-593  if (this.onlineServers.isEmpty()) 
{
-594master.stop("Cluster shutdown 
set; onlineServer=0");
-595  }
-596  return false;
-597}
-598LOG.info("Processing expiration of " 
+ serverName + " on " + this.master.getServerName());
-599
master.getAssignmentManager().submitServerCrash(serverName, true);
-600
-601// Tell our listeners that a server 
was removed
-602if (!this.listeners.isEmpty()) {
-603  for (ServerListener listener : 
this.listeners) {
-604
listener.serverRemoved(serverName);
-605  }
-606}
-607return true;
-608  }
-609
-610  @VisibleForTesting
-611  public void 
moveFromOnlineToDeadServers(final ServerName sn) {
-612synchronized (onlineServers) {
-613  if 
(!this.onlineServers.containsKey(sn)) {
-614LOG.warn("Expiration of " + sn + 
" but server not online");
-615  }
-616  // Remove the server from the known 
servers lists and update load info BUT
-617  // add to deadservers first; do 
this so it'll show in dead servers list if
-618  // not in online servers list.
-619  this.deadservers.add(sn);
-620  this.onlineServers.remove(sn);
-621  onlineServers.notifyAll();
-622}
-623this.rsAdmins.remove(sn);
-624  }
-625
-626  public synchronized void 
processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
-627// When assignment manager is 
cleaning up the zookeeper nodes and rebuilding the
-628// in-memory region states, region 
servers could be down. Meta table can and
-629// should be re-assigned, log 
splitting can be done too. However, it is better to
-630// wait till the cleanup is done 
before re-assigning user regions.
-631//
-632// We should not wait in the server 
shutdown handler thread since it can clog
-633// the handler threads and meta table 
could not be re-assigned in case
-634// the corresponding server is down. 
So we queue them up here instead.
-635if 
(!master.getAssignmentManager().isFailoverCleanupDone()) {
-636  requeuedDeadServers.put(serverName, 
shouldSplitWal);
-637  return;
-638}
-639
-640this.deadservers.add(serverName);
-641
master.getAssignmentManager().submitServerCrash(serverName, shouldSplitWal);
-642  }
-643
-644  /**
-645   * Process the servers which died 
during master's initialization. It will be
-646   * called after HMaster#assignMeta and 
AssignmentManager#joinCluster.
-647   * */
-648  

[03/25] hbase-site git commit: Published site at a03d09abd72789bbf9364d8a9b2c54d0e9351af9.

2018-03-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e9a81b89/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSRestartExecutor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSRestartExecutor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSRestartExecutor.html
index 507c0cd..d27f392 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSRestartExecutor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSRestartExecutor.html
@@ -720,7 +720,7 @@
 712  }
 713
 714  private class 
HangOnCloseThenRSCrashExecutor extends GoodRsExecutor {
-715public static final int 
TYPES_OF_FAILURE = 7;
+715public static final int 
TYPES_OF_FAILURE = 6;
 716private int invocations;
 717
 718@Override
@@ -732,129 +732,121 @@
 724  case 2: throw new 
RegionServerStoppedException("Fake!");
 725  case 3: throw new 
ServerNotRunningYetException("Fake!");
 726  case 4:
-727// We will expire the server that 
we failed to rpc against.
-728throw new 
FailedRemoteDispatchException("Fake!");
-729  case 5:
-730// Mark this regionserver as 
already expiring so we go different code route; i.e. we
-731// FAIL to expire the remote 
server and presume ok to move region to CLOSED. HBASE-20137.
-732
TestAssignmentManager.this.master.getServerManager().expireServer(server);
-733throw new 
FailedRemoteDispatchException("Fake!");
-734  case 6:
-735LOG.info("Return null response 
from serverName=" + server + "; means STUCK...TODO timeout");
-736executor.schedule(new Runnable() 
{
-737  @Override
-738  public void run() {
-739LOG.info("Sending in CRASH of 
" + server);
-740doCrash(server);
-741  }
-742}, 1, TimeUnit.SECONDS);
-743return null;
-744  default:
-745return 
super.execCloseRegion(server, regionName);
-746  }
-747}
-748  }
-749
-750  private class RandRsExecutor extends 
NoopRsExecutor {
-751private final Random rand = new 
Random();
-752
-753@Override
-754public ExecuteProceduresResponse 
sendRequest(ServerName server, ExecuteProceduresRequest req)
-755throws IOException {
-756  switch (rand.nextInt(5)) {
-757case 0: throw new 
ServerNotRunningYetException("wait on server startup");
-758case 1: throw new 
SocketTimeoutException("simulate socket timeout");
-759case 2: throw new 
RemoteException("java.io.IOException", "unexpected exception");
-760  }
-761  return super.sendRequest(server, 
req);
-762}
-763
-764@Override
-765protected RegionOpeningState 
execOpenRegion(final ServerName server, RegionOpenInfo openReq)
-766throws IOException {
-767  switch (rand.nextInt(6)) {
-768case 0:
-769  LOG.info("Return OPENED 
response");
-770  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.OPENED);
-771  return 
OpenRegionResponse.RegionOpeningState.OPENED;
-772case 1:
-773  LOG.info("Return transition 
report that OPENED/ALREADY_OPENED response");
-774  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.OPENED);
-775  return 
OpenRegionResponse.RegionOpeningState.ALREADY_OPENED;
-776case 2:
-777  LOG.info("Return transition 
report that FAILED_OPEN/FAILED_OPENING response");
-778  sendTransitionReport(server, 
openReq.getRegion(), TransitionCode.FAILED_OPEN);
-779  return 
OpenRegionResponse.RegionOpeningState.FAILED_OPENING;
-780  }
-781  // The procedure on master will 
just hang forever because nothing comes back
-782  // from the RS in this case.
-783  LOG.info("Return null as response; 
means proc stuck so we send in a crash report after a few seconds...");
-784  executor.schedule(new Runnable() 
{
-785@Override
-786public void run() {
-787  LOG.info("Delayed CRASHING of " 
+ server);
-788  doCrash(server);
-789}
-790  }, 5, TimeUnit.SECONDS);
-791  return null;
-792}
-793
-794@Override
-795protected CloseRegionResponse 
execCloseRegion(ServerName server, byte[] regionName)
-796throws IOException {
-797  CloseRegionResponse.Builder resp = 
CloseRegionResponse.newBuilder();
-798  boolean closed = 
rand.nextBoolean();
-799  if (closed) {
-800RegionInfo hri = 
am.getRegionInfo(regionName);
-801sendTransitionReport(server, 
ProtobufUtil.toRegionInfo(hri), TransitionCode.CLOSED);
-802  }
-803  resp.setClosed(closed);
-804  return resp.build();
-805}
-806  }
+727

[33/45] hbase git commit: HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker and remove ReplicationZKNodeCleanerChore

2018-03-08 Thread zhangduo
HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker 
and remove ReplicationZKNodeCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/af05d043
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/af05d043
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/af05d043

Branch: refs/heads/HBASE-19397-branch-2
Commit: af05d043fd823966c836a849f8d4de29ea1fb063
Parents: 3ed879b
Author: zhangduo 
Authored: Wed Jan 3 09:39:44 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../replication/VerifyReplication.java  |   6 +-
 .../hbase/replication/ReplicationPeers.java |  26 +--
 .../hbase/replication/ReplicationUtils.java |  38 
 .../replication/TestReplicationStateBasic.java  |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  13 --
 .../cleaner/ReplicationZKNodeCleaner.java   | 192 ---
 .../cleaner/ReplicationZKNodeCleanerChore.java  |  54 --
 .../replication/ReplicationPeerManager.java |  18 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  11 +-
 .../hbase/util/hbck/ReplicationChecker.java | 109 +++
 .../cleaner/TestReplicationZKNodeCleaner.java   | 115 ---
 .../hbase/util/TestHBaseFsckReplication.java| 101 ++
 .../hadoop/hbase/util/hbck/HbckTestingUtil.java |   6 +-
 13 files changed, 226 insertions(+), 465 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/af05d043/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index fe45762..fac4875 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -50,8 +50,8 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -345,10 +345,10 @@ public class VerifyReplication extends Configured 
implements Tool {
 }
   });
   ReplicationPeerStorage storage =
-  ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf);
+ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf);
   ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId);
   return Pair.newPair(peerConfig,
-ReplicationPeers.getPeerClusterConfiguration(peerConfig, conf));
+ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf));
 } catch (ReplicationException e) {
   throw new IOException("An error occurred while trying to connect to the 
remove peer cluster",
   e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/af05d043/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index 45940a5..fcbc350 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -17,14 +17,11 @@
  */
 package org.apache.hadoop.hbase.replication;
 
-import java.io.IOException;
 import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CompoundConfiguration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -106,25 +103,6 @@ public class ReplicationPeers {
 return 

[27/45] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly

2018-03-08 Thread zhangduo
HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d956a285
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d956a285
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d956a285

Branch: refs/heads/HBASE-19397-branch-2
Commit: d956a285795010bbbc96d4d9ab0780859df6eadb
Parents: 1e50d80
Author: zhangduo 
Authored: Wed Dec 27 22:03:51 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |   9 +-
 .../hbase/replication/ReplicationQueues.java| 161 ---
 .../replication/ReplicationQueuesArguments.java |  70 ---
 .../replication/ReplicationQueuesZKImpl.java| 417 -
 .../hbase/replication/ReplicationTableBase.java | 442 ---
 .../replication/ReplicationTrackerZKImpl.java   |  21 +-
 .../replication/ZKReplicationQueueStorage.java  |  22 +
 .../replication/TestReplicationStateBasic.java  | 131 +++---
 .../replication/TestReplicationStateZKImpl.java |  41 +-
 .../regionserver/DumpReplicationQueues.java |  15 +-
 .../RecoveredReplicationSource.java |  17 +-
 .../RecoveredReplicationSourceShipper.java  |  22 +-
 .../replication/regionserver/Replication.java   |  20 +-
 .../regionserver/ReplicationSource.java |  16 +-
 .../ReplicationSourceInterface.java |  11 +-
 .../regionserver/ReplicationSourceManager.java  | 265 ++-
 .../regionserver/ReplicationSyncUp.java |  13 +-
 .../hbase/master/cleaner/TestLogsCleaner.java   |  12 +-
 .../cleaner/TestReplicationHFileCleaner.java|  23 +-
 .../cleaner/TestReplicationZKNodeCleaner.java   |  22 +-
 .../replication/ReplicationSourceDummy.java |   6 +-
 .../replication/TestReplicationSyncUpTool.java  |   4 +-
 .../TestReplicationSourceManager.java   |  97 ++--
 .../TestReplicationSourceManagerZkImpl.java |  57 +--
 24 files changed, 356 insertions(+), 1558 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d956a285/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 6c1c213..5e70e57 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -17,12 +17,11 @@
  */
 package org.apache.hadoop.hbase.replication;
 
-import org.apache.commons.lang3.reflect.ConstructorUtils;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A factory class for instantiating replication objects that deal with 
replication state.
@@ -30,12 +29,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 @InterfaceAudience.Private
 public class ReplicationFactory {
 
-  public static ReplicationQueues 
getReplicationQueues(ReplicationQueuesArguments args)
-  throws Exception {
-return (ReplicationQueues) 
ConstructorUtils.invokeConstructor(ReplicationQueuesZKImpl.class,
-  args);
-  }
-
   public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
   Abortable abortable) {
 return getReplicationPeers(zk, conf, null, abortable);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d956a285/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
deleted file mode 100644
index a2d21f7..000
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of 

[29/45] hbase git commit: HBASE-19636 All rs should already start work with the new peer change when replication peer procedure is finished

2018-03-08 Thread zhangduo
HBASE-19636 All rs should already start work with the new peer change when 
replication peer procedure is finished

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b1d7ec39
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b1d7ec39
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b1d7ec39

Branch: refs/heads/HBASE-19397-branch-2
Commit: b1d7ec39757a4690d985092a23834044e10da43c
Parents: 03d4fdd
Author: Guanghao Zhang 
Authored: Thu Jan 4 16:58:01 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |   1 -
 .../hbase/replication/ReplicationPeerImpl.java  |   4 +-
 .../hbase/replication/ReplicationQueueInfo.java |  23 +-
 .../hbase/replication/ReplicationUtils.java |  56 ++
 .../replication/TestReplicationStateZKImpl.java |  21 -
 .../regionserver/ReplicationSourceService.java  |   3 +-
 .../regionserver/PeerProcedureHandler.java  |   3 +
 .../regionserver/PeerProcedureHandlerImpl.java  |  50 +-
 .../RecoveredReplicationSource.java |   6 +-
 .../RecoveredReplicationSourceShipper.java  |   8 +-
 .../replication/regionserver/Replication.java   |  11 +-
 .../regionserver/ReplicationSource.java |  34 +-
 .../regionserver/ReplicationSourceFactory.java  |   4 +-
 .../ReplicationSourceInterface.java |   8 +-
 .../regionserver/ReplicationSourceManager.java  | 827 ++-
 .../regionserver/ReplicationSourceShipper.java  |   6 +-
 .../ReplicationSourceWALReader.java |   2 +-
 .../replication/ReplicationSourceDummy.java |   2 +-
 .../replication/TestNamespaceReplication.java   |  57 +-
 .../TestReplicationSourceManager.java   |   5 +-
 20 files changed, 622 insertions(+), 509 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b1d7ec39/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index fdae288..bf8d030 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -25,7 +25,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1d7ec39/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index 3e17025..604e0bb 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,6 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class ReplicationPeerImpl implements ReplicationPeer {
+
   private final Configuration conf;
 
   private final String id;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1d7ec39/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
index ecd888f..cd65f9b 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
@@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.ServerName;
 
 /**
- * This class is responsible for the parsing logic for a znode representing a 
queue.
+ * This class is responsible for the parsing logic for a queue id 

[08/45] hbase git commit: HBASE-19544 Add UTs for testing concurrent modifications on replication peer

2018-03-08 Thread zhangduo
HBASE-19544 Add UTs for testing concurrent modifications on replication peer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3ed879b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3ed879b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3ed879b5

Branch: refs/heads/HBASE-19397-branch-2
Commit: 3ed879b5f9d21dfafce74adf64f261955da7b25a
Parents: 34c56c7
Author: Guanghao Zhang 
Authored: Tue Jan 2 17:07:41 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../replication/TestReplicationAdmin.java   | 69 
 1 file changed, 69 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed879b5/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 772a9d6..a753d23 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -31,6 +31,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.regex.Pattern;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -58,6 +59,8 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Unit testing of ReplicationAdmin
@@ -69,6 +72,8 @@ public class TestReplicationAdmin {
   public static final HBaseClassTestRule CLASS_RULE =
   HBaseClassTestRule.forClass(TestReplicationAdmin.class);
 
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestReplicationAdmin.class);
+
   private final static HBaseTestingUtility TEST_UTIL =
   new HBaseTestingUtility();
 
@@ -118,6 +123,70 @@ public class TestReplicationAdmin {
   }
 
   @Test
+  public void testConcurrentPeerOperations() throws Exception {
+int threadNum = 5;
+AtomicLong successCount = new AtomicLong(0);
+
+// Test concurrent add peer operation
+Thread[] addPeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  addPeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.addReplicationPeer(ID_ONE,
+ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build());
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when add replication peer", e);
+}
+  });
+  addPeers[i].start();
+}
+for (Thread addPeer : addPeers) {
+  addPeer.join();
+}
+assertEquals(1, successCount.get());
+
+// Test concurrent remove peer operation
+successCount.set(0);
+Thread[] removePeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  removePeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.removeReplicationPeer(ID_ONE);
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when remove replication peer", e);
+}
+  });
+  removePeers[i].start();
+}
+for (Thread removePeer : removePeers) {
+  removePeer.join();
+}
+assertEquals(1, successCount.get());
+
+// Test concurrent add peer operation again
+successCount.set(0);
+addPeers = new Thread[threadNum];
+for (int i = 0; i < threadNum; i++) {
+  addPeers[i] = new Thread(() -> {
+try {
+  hbaseAdmin.addReplicationPeer(ID_ONE,
+ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build());
+  successCount.incrementAndGet();
+} catch (Exception e) {
+  LOG.debug("Got exception when add replication peer", e);
+}
+  });
+  addPeers[i].start();
+}
+for (Thread addPeer : addPeers) {
+  addPeer.join();
+}
+assertEquals(1, successCount.get());
+  }
+
+  @Test
   public void testAddInvalidPeer() {
 ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
 builder.setClusterKey(KEY_ONE);



[20/45] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2018-03-08 Thread zhangduo
HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/37d9adda
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/37d9adda
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/37d9adda

Branch: refs/heads/HBASE-19397-branch-2
Commit: 37d9addaa34ec2d1721de4da75429fa8ab64faa4
Parents: 1727abd
Author: zhangduo 
Authored: Mon Dec 25 18:49:56 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |  19 +-
 .../replication/ReplicationPeersZKImpl.java |  21 +-
 .../replication/ReplicationQueueStorage.java|  26 +-
 .../replication/ReplicationQueuesClient.java|  93 -
 .../ReplicationQueuesClientArguments.java   |  40 --
 .../ReplicationQueuesClientZKImpl.java  | 181 -
 .../replication/ZKReplicationQueueStorage.java  |  90 -
 .../replication/TestReplicationStateBasic.java  | 378 +++
 .../replication/TestReplicationStateZKImpl.java | 153 
 .../TestZKReplicationQueueStorage.java  |  74 
 .../cleaner/ReplicationZKNodeCleaner.java   |  71 ++--
 .../cleaner/ReplicationZKNodeCleanerChore.java  |   5 +-
 .../replication/ReplicationPeerManager.java |  31 +-
 .../master/ReplicationHFileCleaner.java | 109 ++
 .../master/ReplicationLogCleaner.java   |  44 +--
 .../regionserver/DumpReplicationQueues.java |  78 ++--
 .../hbase/util/hbck/ReplicationChecker.java |  14 +-
 .../client/TestAsyncReplicationAdminApi.java|  31 +-
 .../replication/TestReplicationAdmin.java   |   2 +
 .../hbase/master/cleaner/TestLogsCleaner.java   |  81 ++--
 .../cleaner/TestReplicationHFileCleaner.java|  29 --
 .../cleaner/TestReplicationZKNodeCleaner.java   |  12 +-
 .../replication/TestReplicationStateBasic.java  | 378 ---
 .../replication/TestReplicationStateZKImpl.java | 232 
 .../TestReplicationSourceManagerZkImpl.java |  41 --
 25 files changed, 890 insertions(+), 1343 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/37d9adda/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 9f4ad18..6c1c213 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -37,20 +36,14 @@ public class ReplicationFactory {
   args);
   }
 
-  public static ReplicationQueuesClient
-  getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws 
Exception {
-return (ReplicationQueuesClient) ConstructorUtils
-.invokeConstructor(ReplicationQueuesClientZKImpl.class, args);
-  }
-
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- Abortable abortable) {
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  Abortable abortable) {
 return getReplicationPeers(zk, conf, null, abortable);
   }
 
-  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, 
Configuration conf,
- final 
ReplicationQueuesClient queuesClient, Abortable abortable) {
-return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable);
+  public static ReplicationPeers getReplicationPeers(ZKWatcher zk, 
Configuration conf,
+  ReplicationQueueStorage queueStorage, Abortable abortable) {
+return new ReplicationPeersZKImpl(zk, conf, queueStorage, abortable);
   }
 
   public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,

http://git-wip-us.apache.org/repos/asf/hbase/blob/37d9adda/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 419e289..4e5f757 100644
--- 

[05/45] hbase git commit: HBASE-20144 The shutdown of master will hang if there are no live region server

2018-03-08 Thread zhangduo
HBASE-20144 The shutdown of master will hang if there are no live region server


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25efd37f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25efd37f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25efd37f

Branch: refs/heads/HBASE-19397-branch-2
Commit: 25efd37f322ed8b08d4a8e9f091e53c8893d0fb4
Parents: af9a108
Author: zhangduo 
Authored: Wed Mar 7 20:32:35 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 15:54:40 2018 +0800

--
 .../hadoop/hbase/master/ServerManager.java  |  4 ++
 .../master/TestAssignmentManagerMetrics.java| 40 +-
 .../master/TestShutdownWithNoRegionServer.java  | 58 
 3 files changed, 75 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/25efd37f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 06d6c8b..a65d95f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -957,6 +957,10 @@ public class ServerManager {
 String statusStr = "Cluster shutdown requested of master=" + 
this.master.getServerName();
 LOG.info(statusStr);
 this.clusterShutdown.set(true);
+if (onlineServers.isEmpty()) {
+  // we do not synchronize here so this may cause a double stop, but not a 
big deal
+  master.stop("OnlineServer=0 right after cluster shutdown set");
+}
   }
 
   boolean isClusterShutdown() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/25efd37f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
index 83e350c..287fc70 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
@@ -24,20 +24,21 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompatibilityFactory;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
-import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -45,8 +46,7 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@Ignore // Disabled temporarily; reenable 
-@Category(MediumTests.class)
+@Category({ MasterTests.class, MediumTests.class })
 public class TestAssignmentManagerMetrics {
 
   @ClassRule
@@ -59,7 +59,7 @@ public class TestAssignmentManagerMetrics {
 
   private static MiniHBaseCluster cluster;
   private static HMaster master;
-  private static HBaseTestingUtility TEST_UTIL;
+  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private static Configuration conf;
   private static final int msgInterval = 1000;
 
@@ -69,7 +69,6 @@ public class TestAssignmentManagerMetrics {
   @BeforeClass
   public static void startCluster() throws Exception {
 LOG.info("Starting cluster");
-TEST_UTIL = new HBaseTestingUtility();
 conf = TEST_UTIL.getConfiguration();
 
 // Disable sanity check for coprocessor
@@ -96,20 +95,14 @@ public class TestAssignmentManagerMetrics {
 
   @AfterClass
   public static void after() throws Exception {
-if (TEST_UTIL != null) {
-

[21/45] hbase git commit: HBASE-19579 Add peer lock test for shell command list_locks

2018-03-08 Thread zhangduo
HBASE-19579 Add peer lock test for shell command list_locks

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dd9d06fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dd9d06fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dd9d06fc

Branch: refs/heads/HBASE-19397-branch-2
Commit: dd9d06fc09615788c284e6a682cb9d67b28d745b
Parents: 37d9add
Author: Guanghao Zhang 
Authored: Sat Dec 23 21:04:27 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../src/main/protobuf/LockService.proto  |  1 +
 .../src/test/ruby/shell/list_locks_test.rb   | 19 +++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dd9d06fc/hbase-protocol-shaded/src/main/protobuf/LockService.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto 
b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index b8d180c..0675070 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -77,6 +77,7 @@ enum LockedResourceType {
   NAMESPACE = 2;
   TABLE = 3;
   REGION = 4;
+  PEER = 5;
 }
 
 message LockedResource {

http://git-wip-us.apache.org/repos/asf/hbase/blob/dd9d06fc/hbase-shell/src/test/ruby/shell/list_locks_test.rb
--
diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb 
b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
index f465a6b..ef1c0ce 100644
--- a/hbase-shell/src/test/ruby/shell/list_locks_test.rb
+++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
@@ -67,6 +67,25 @@ module Hbase
 proc_id)
 end
 
+define_test 'list peer locks' do
+  lock = create_exclusive_lock(0)
+  peer_id = '1'
+
+  @scheduler.waitPeerExclusiveLock(lock, peer_id)
+  output = capture_stdout { @list_locks.command }
+  @scheduler.wakePeerExclusiveLock(lock, peer_id)
+
+  assert_equal(
+"PEER(1)\n" \
+"Lock type: EXCLUSIVE, procedure: {" \
+  
"\"className\"=>\"org.apache.hadoop.hbase.master.locking.LockProcedure\", " \
+  "\"procId\"=>\"0\", \"submittedTime\"=>\"0\", 
\"state\"=>\"RUNNABLE\", " \
+  "\"lastUpdate\"=>\"0\", " \
+  "\"stateMessage\"=>[{\"lockType\"=>\"EXCLUSIVE\", 
\"description\"=>\"description\"}]" \
+"}\n\n",
+output)
+end
+
 define_test 'list server locks' do
   lock = create_exclusive_lock(0)
 



[22/45] hbase git commit: HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface

2018-03-08 Thread zhangduo
HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f546d914
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f546d914
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f546d914

Branch: refs/heads/HBASE-19397-branch-2
Commit: f546d9145ea8fb549755afcf8eedeed34404280b
Parents: dd9d06f
Author: Guanghao Zhang 
Authored: Tue Dec 26 11:39:34 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../replication/VerifyReplication.java  |   5 -
 .../hbase/replication/ReplicationPeer.java  |  42 ++--
 .../hbase/replication/ReplicationPeerImpl.java  | 169 ++
 .../replication/ReplicationPeerZKImpl.java  | 233 ---
 .../hbase/replication/ReplicationPeers.java |   4 +-
 .../replication/ReplicationPeersZKImpl.java |  23 +-
 .../replication/TestReplicationStateBasic.java  |   7 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  29 +--
 8 files changed, 216 insertions(+), 296 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f546d914/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 9065f4e..09d4b4b 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -333,7 +332,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   private static Pair 
getPeerQuorumConfig(
   final Configuration conf, String peerId) throws IOException {
 ZKWatcher localZKW = null;
-ReplicationPeerZKImpl peer = null;
 try {
   localZKW = new ZKWatcher(conf, "VerifyReplication",
   new Abortable() {
@@ -354,9 +352,6 @@ public class VerifyReplication extends Configured 
implements Tool {
   throw new IOException(
   "An error occurred while trying to connect to the remove peer 
cluster", e);
 } finally {
-  if (peer != null) {
-peer.close();
-  }
   if (localZKW != null) {
 localZKW.close();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f546d914/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index b66d76d..4846018 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
 
-
 /**
  * ReplicationPeer manages enabled / disabled state for the peer.
  */
@@ -49,65 +48,52 @@ public interface ReplicationPeer {
   String getId();
 
   /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig();
-
-  /**
-   * Get the peer config object. if loadFromBackingStore is true, it will load 
from backing store
-   * directly and update its load peer config. otherwise, just return the 
local cached peer config.
-   * @return the ReplicationPeerConfig for this peer
-   */
-  public ReplicationPeerConfig getPeerConfig(boolean loadFromBackingStore)
-  throws ReplicationException;
-
-  /**
* Returns the state of the peer by reading local cache.
* @return the enabled state
*/
   PeerState getPeerState();
 
   /**
-   * Returns the state of peer, if loadFromBackingStore is true, it will load 
from backing 

[39/45] hbase git commit: HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface

2018-03-08 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/5cfb0071/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 5b7bab8..ce9882a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -160,7 +160,6 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 this.clusterId = clusterId;
 this.walFileLengthProvider = walFileLengthProvider;
 this.replicationTracker.registerListener(this);
-this.replicationPeers.getAllPeerIds();
 // It's preferable to failover 1 RS at a time, but with good zk servers
 // more could be processed at the same time.
 int nbWorkers = conf.getInt("replication.executor.workers", 1);
@@ -260,8 +259,8 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 }
 List otherRegionServers = 
replicationTracker.getListOfRegionServers().stream()
 .map(ServerName::valueOf).collect(Collectors.toList());
-LOG.info(
-  "Current list of replicators: " + currentReplicators + " other RSs: " + 
otherRegionServers);
+LOG.info("Current list of replicators: " + currentReplicators + " other 
RSs: "
++ otherRegionServers);
 
 // Look if there's anything to process after a restart
 for (ServerName rs : currentReplicators) {
@@ -278,7 +277,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
* The returned future is for adoptAbandonedQueues task.
*/
   Future init() throws IOException, ReplicationException {
-for (String id : this.replicationPeers.getConnectedPeerIds()) {
+for (String id : this.replicationPeers.getAllPeerIds()) {
   addSource(id);
   if (replicationForBulkLoadDataEnabled) {
 // Check if peer exists in hfile-refs queue, if not add it. This can 
happen in the case
@@ -297,8 +296,8 @@ public class ReplicationSourceManager implements 
ReplicationListener {
*/
   @VisibleForTesting
   ReplicationSourceInterface addSource(String id) throws IOException, 
ReplicationException {
-ReplicationPeerConfig peerConfig = 
replicationPeers.getReplicationPeerConfig(id);
-ReplicationPeer peer = replicationPeers.getConnectedPeer(id);
+ReplicationPeerConfig peerConfig = replicationPeers.getPeerConfig(id);
+ReplicationPeer peer = replicationPeers.getPeer(id);
 ReplicationSourceInterface src = getReplicationSource(id, peerConfig, 
peer);
 synchronized (this.walsById) {
   this.sources.add(src);
@@ -344,7 +343,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   public void deleteSource(String peerId, boolean closeConnection) {
 abortWhenFail(() -> this.queueStorage.removeQueue(server.getServerName(), 
peerId));
 if (closeConnection) {
-  this.replicationPeers.peerDisconnected(peerId);
+  this.replicationPeers.removePeer(peerId);
 }
   }
 
@@ -437,12 +436,12 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 // update replication queues on ZK
 // synchronize on replicationPeers to avoid adding source for the 
to-be-removed peer
 synchronized (replicationPeers) {
-  for (String id : replicationPeers.getConnectedPeerIds()) {
+  for (String id : replicationPeers.getAllPeerIds()) {
 try {
   this.queueStorage.addWAL(server.getServerName(), id, logName);
 } catch (ReplicationException e) {
-  throw new IOException("Cannot add log to replication queue" +
-" when creating a new source, queueId=" + id + ", filename=" + 
logName, e);
+  throw new IOException("Cannot add log to replication queue"
+  + " when creating a new source, queueId=" + id + ", filename=" + 
logName, e);
 }
   }
 }
@@ -587,7 +586,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 
   public void addPeer(String id) throws ReplicationException, IOException {
 LOG.info("Trying to add peer, peerId: " + id);
-boolean added = this.replicationPeers.peerConnected(id);
+boolean added = this.replicationPeers.addPeer(id);
 if (added) {
   LOG.info("Peer " + id + " connected success, trying to start the 
replication source thread.");
   addSource(id);
@@ -723,16 +722,21 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   // there is not an actual peer defined corresponding to peerId for 
the failover.
   ReplicationQueueInfo replicationQueueInfo = new 
ReplicationQueueInfo(peerId);

[19/45] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly

2018-03-08 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/37d9adda/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 6e27a21..d8f9625 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -21,13 +21,13 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.Set;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileStatus;
@@ -48,17 +48,18 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
+import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.AtomicLongMap;
 
 /**
@@ -303,57 +304,53 @@ public class DumpReplicationQueues extends Configured 
implements Tool {
   }
 
   public String dumpQueues(ClusterConnection connection, ZKWatcher zkw, 
Set peerIds,
-   boolean hdfs) throws Exception {
-ReplicationQueuesClient queuesClient;
+  boolean hdfs) throws Exception {
+ReplicationQueueStorage queueStorage;
 ReplicationPeers replicationPeers;
 ReplicationQueues replicationQueues;
 ReplicationTracker replicationTracker;
-ReplicationQueuesClientArguments replicationArgs =
-new ReplicationQueuesClientArguments(getConf(), new 
WarnOnlyAbortable(), zkw);
+ReplicationQueuesArguments replicationArgs =
+new ReplicationQueuesArguments(getConf(), new WarnOnlyAbortable(), 
zkw);
 StringBuilder sb = new StringBuilder();
 
-queuesClient = 
ReplicationFactory.getReplicationQueuesClient(replicationArgs);
-queuesClient.init();
+queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, 
getConf());
 replicationQueues = 
ReplicationFactory.getReplicationQueues(replicationArgs);
-replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), 
queuesClient, connection);
+replicationPeers =
+ReplicationFactory.getReplicationPeers(zkw, getConf(), queueStorage, 
connection);
 replicationTracker = ReplicationFactory.getReplicationTracker(zkw, 
replicationPeers, getConf(),
   new WarnOnlyAbortable(), new WarnOnlyStoppable());
-List liveRegionServers = 
replicationTracker.getListOfRegionServers();
+Set liveRegionServers = new 
HashSet<>(replicationTracker.getListOfRegionServers());
 
 // Loops each peer on each RS and dumps the queues
-try {
-  List regionservers = queuesClient.getListOfReplicators();
-  if (regionservers == null || regionservers.isEmpty()) {
-return sb.toString();
+List regionservers = queueStorage.getListOfReplicators();
+if (regionservers == null || regionservers.isEmpty()) {
+  return sb.toString();
+}
+for (ServerName regionserver : regionservers) {
+  List queueIds = queueStorage.getAllQueues(regionserver);
+  replicationQueues.init(regionserver.getServerName());
+  if (!liveRegionServers.contains(regionserver.getServerName())) {
+deadRegionServers.add(regionserver.getServerName());
   }
-  for (String regionserver : regionservers) {
-List queueIds = queuesClient.getAllQueues(regionserver);
-replicationQueues.init(regionserver);
-if (!liveRegionServers.contains(regionserver)) {
-  deadRegionServers.add(regionserver);
-}
-for (String 

[17/45] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2018-03-08 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/1727abd3/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
deleted file mode 100644
index b6f8784..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.replication;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.replication.BaseReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
-
-/**
- * Manages and performs all replication admin operations.
- * 
- * Used to add/remove a replication peer.
- */
-@InterfaceAudience.Private
-public class ReplicationManager {
-  private final ReplicationQueuesClient replicationQueuesClient;
-  private final ReplicationPeers replicationPeers;
-
-  public ReplicationManager(Configuration conf, ZKWatcher zkw, Abortable 
abortable)
-  throws IOException {
-try {
-  this.replicationQueuesClient = ReplicationFactory
-  .getReplicationQueuesClient(new 
ReplicationQueuesClientArguments(conf, abortable, zkw));
-  this.replicationQueuesClient.init();
-  this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf,
-this.replicationQueuesClient, abortable);
-  this.replicationPeers.init();
-} catch (Exception e) {
-  throw new IOException("Failed to construct ReplicationManager", e);
-}
-  }
-
-  public void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig, boolean enabled)
-  throws ReplicationException {
-checkPeerConfig(peerConfig);
-replicationPeers.registerPeer(peerId, peerConfig, enabled);
-replicationPeers.peerConnected(peerId);
-  }
-
-  public void removeReplicationPeer(String peerId) throws ReplicationException 
{
-replicationPeers.peerDisconnected(peerId);
-replicationPeers.unregisterPeer(peerId);
-  }
-
-  public void enableReplicationPeer(String peerId) throws ReplicationException 
{
-this.replicationPeers.enablePeer(peerId);
-  }
-
-  public void disableReplicationPeer(String peerId) throws 
ReplicationException {
-this.replicationPeers.disablePeer(peerId);
-  }
-
-  public ReplicationPeerConfig getPeerConfig(String peerId)
-  throws ReplicationException, ReplicationPeerNotFoundException {
-ReplicationPeerConfig peerConfig = 
replicationPeers.getReplicationPeerConfig(peerId);
-if (peerConfig == null) {
-  throw new ReplicationPeerNotFoundException(peerId);
-}
-return peerConfig;
-  }
-
-  public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig)
-  throws ReplicationException, IOException {
-checkPeerConfig(peerConfig);
-this.replicationPeers.updatePeerConfig(peerId, peerConfig);
-  }
-
-  public List listReplicationPeers(Pattern pattern)
-  throws ReplicationException {
-List peers = new ArrayList<>();
-List peerIds = replicationPeers.getAllPeerIds();
-for (String peerId : 

[44/45] hbase git commit: HBASE-19634 Add permission check for executeProcedures in AccessController

2018-03-08 Thread zhangduo
HBASE-19634 Add permission check for executeProcedures in AccessController


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/03d4fdda
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/03d4fdda
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/03d4fdda

Branch: refs/heads/HBASE-19397-branch-2
Commit: 03d4fddac2e3c7bc8261dcefcba0c86c5ffa2569
Parents: ca942ea
Author: zhangduo 
Authored: Thu Jan 4 16:18:21 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hbase/coprocessor/RegionServerObserver.java | 14 ++
 .../hbase/regionserver/RSRpcServices.java   | 52 +++-
 .../RegionServerCoprocessorHost.java| 18 +++
 .../hbase/security/access/AccessController.java | 30 ++-
 .../hadoop/hbase/TestJMXConnectorServer.java|  7 +++
 .../security/access/TestAccessController.java   | 18 +--
 6 files changed, 100 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/03d4fdda/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
index c1af3fb..5b751df 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
@@ -126,4 +126,18 @@ public interface RegionServerObserver {
   default void postClearCompactionQueues(
   final ObserverContext ctx)
   throws IOException {}
+
+  /**
+   * This will be called before executing procedures
+   * @param ctx the environment to interact with the framework and region 
server.
+   */
+  default void 
preExecuteProcedures(ObserverContext ctx)
+  throws IOException {}
+
+  /**
+   * This will be called after executing procedures
+   * @param ctx the environment to interact with the framework and region 
server.
+   */
+  default void 
postExecuteProcedures(ObserverContext ctx)
+  throws IOException {}
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/03d4fdda/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 5b0ce4e..cceae28 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -3508,36 +3508,40 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
   }
 
   @Override
+  @QosPriority(priority = HConstants.ADMIN_QOS)
   public ExecuteProceduresResponse executeProcedures(RpcController controller,
   ExecuteProceduresRequest request) throws ServiceException {
-if (request.getOpenRegionCount() > 0) {
-  for (OpenRegionRequest req : request.getOpenRegionList()) {
-openRegion(controller, req);
+try {
+  checkOpen();
+  regionServer.getRegionServerCoprocessorHost().preExecuteProcedures();
+  if (request.getOpenRegionCount() > 0) {
+for (OpenRegionRequest req : request.getOpenRegionList()) {
+  openRegion(controller, req);
+}
   }
-}
-if (request.getCloseRegionCount() > 0) {
-  for (CloseRegionRequest req : request.getCloseRegionList()) {
-closeRegion(controller, req);
+  if (request.getCloseRegionCount() > 0) {
+for (CloseRegionRequest req : request.getCloseRegionList()) {
+  closeRegion(controller, req);
+}
   }
-}
-if (request.getProcCount() > 0) {
-  for (RemoteProcedureRequest req : request.getProcList()) {
-RSProcedureCallable callable;
-try {
-  callable =
-
Class.forName(req.getProcClass()).asSubclass(RSProcedureCallable.class).newInstance();
-} catch (Exception e) {
-  // here we just ignore the error as this should not happen and we do 
not provide a general
-  // way to report errors for all types of remote procedure. The 
procedure will hang at
-  // master side but after you solve the problem and restart master it 
will be executed
-  // again and pass.
-  LOG.warn("create procedure of type " + req.getProcClass() + " 
failed, give up", e);
-  continue;
+  if (request.getProcCount() > 0) {
+for 

[42/45] hbase git commit: HBASE-19711 TestReplicationAdmin.testConcurrentPeerOperations hangs

2018-03-08 Thread zhangduo
HBASE-19711 TestReplicationAdmin.testConcurrentPeerOperations hangs

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0bc41dd0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0bc41dd0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0bc41dd0

Branch: refs/heads/HBASE-19397-branch-2
Commit: 0bc41dd012ba94eaab94385546042b4beb262ff1
Parents: 77674d5
Author: Guanghao Zhang 
Authored: Fri Jan 5 15:39:06 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../procedure/MasterProcedureScheduler.java | 23 
 1 file changed, 19 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0bc41dd0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index b18dd6c..eab06a2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -290,7 +290,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
   @Override
   public void completionCleanup(final Procedure proc) {
 if (proc instanceof TableProcedureInterface) {
-  TableProcedureInterface iProcTable = (TableProcedureInterface)proc;
+  TableProcedureInterface iProcTable = (TableProcedureInterface) proc;
   boolean tableDeleted;
   if (proc.hasException()) {
 Exception procEx = proc.getException().unwrapRemoteException();
@@ -311,9 +311,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
   }
 } else if (proc instanceof PeerProcedureInterface) {
   PeerProcedureInterface iProcPeer = (PeerProcedureInterface) proc;
-  if (iProcPeer.getPeerOperationType() == PeerOperationType.REMOVE) {
-removePeerQueue(iProcPeer.getPeerId());
-  }
+  tryCleanupPeerQueue(iProcPeer.getPeerId(), proc);
 } else {
   // No cleanup for ServerProcedureInterface types, yet.
   return;
@@ -402,6 +400,23 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 locking.removePeerLock(peerId);
   }
 
+  private void tryCleanupPeerQueue(String peerId, Procedure procedure) {
+schedLock();
+try {
+  PeerQueue queue = AvlTree.get(peerMap, peerId, 
PEER_QUEUE_KEY_COMPARATOR);
+  if (queue == null) {
+return;
+  }
+
+  final LockAndQueue lock = locking.getPeerLock(peerId);
+  if (queue.isEmpty() && lock.tryExclusiveLock(procedure)) {
+removeFromRunQueue(peerRunQueue, queue);
+removePeerQueue(peerId);
+  }
+} finally {
+  schedUnlock();
+}
+  }
 
   private static boolean isPeerProcedure(Procedure proc) {
 return proc instanceof PeerProcedureInterface;



[23/45] hbase git commit: HBASE-19630 Add peer cluster key check when add new replication peer

2018-03-08 Thread zhangduo
HBASE-19630 Add peer cluster key check when add new replication peer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/68a24e7a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/68a24e7a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/68a24e7a

Branch: refs/heads/HBASE-19397-branch-2
Commit: 68a24e7a36d052c5b3cf8956f80001380391969d
Parents: f546d91
Author: Guanghao Zhang 
Authored: Tue Dec 26 21:10:00 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 54 
 .../replication/TestReplicationAdmin.java   | 22 
 2 files changed, 54 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/68a24e7a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 84abfeb..b78cbce 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master.replication;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -42,6 +43,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -216,36 +218,36 @@ public final class ReplicationPeerManager {
 return desc != null ? Optional.of(desc.getPeerConfig()) : Optional.empty();
   }
 
-  /**
-   * If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
-   * Then allow config exclude namespaces or exclude table-cfs which can't be 
replicated to peer
-   * cluster.
-   * 
-   * If replicate_all flag is false, it means all user tables can't be 
replicated to peer cluster.
-   * Then allow to config namespaces or table-cfs which will be replicated to 
peer cluster.
-   */
-  private static void checkPeerConfig(ReplicationPeerConfig peerConfig)
-  throws DoNotRetryIOException {
+  private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws 
DoNotRetryIOException {
+checkClusterKey(peerConfig.getClusterKey());
+
 if (peerConfig.replicateAllUserTables()) {
-  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty()) ||
-(peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
-throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly " +
-  "when you want replicate all cluster");
+  // If replicate_all flag is true, it means all user tables will be 
replicated to peer cluster.
+  // Then allow config exclude namespaces or exclude table-cfs which can't 
be replicated to peer
+  // cluster.
+  if ((peerConfig.getNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty())
+  || (peerConfig.getTableCFsMap() != null && 
!peerConfig.getTableCFsMap().isEmpty())) {
+throw new DoNotRetryIOException("Need clean namespaces or table-cfs 
config firstly "
++ "when you want replicate all cluster");
   }
   
checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(),
 peerConfig.getExcludeTableCFsMap());
 } else {
-  if ((peerConfig.getExcludeNamespaces() != null &&
-!peerConfig.getExcludeNamespaces().isEmpty()) ||
-(peerConfig.getExcludeTableCFsMap() != null &&
-  !peerConfig.getExcludeTableCFsMap().isEmpty())) {
+  // If replicate_all flag is false, it means all user tables can't be 
replicated to peer
+  // cluster. Then allow to config namespaces or table-cfs which will be 
replicated to peer
+  // cluster.
+  if ((peerConfig.getExcludeNamespaces() != null
+  && !peerConfig.getExcludeNamespaces().isEmpty())
+  || (peerConfig.getExcludeTableCFsMap() != null
+  && !peerConfig.getExcludeTableCFsMap().isEmpty())) {
 throw new DoNotRetryIOException(
-"Need 

[36/45] hbase git commit: HBASE-19697 Remove TestReplicationAdminUsingProcedure

2018-03-08 Thread zhangduo
HBASE-19697 Remove TestReplicationAdminUsingProcedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ca942ea4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ca942ea4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ca942ea4

Branch: refs/heads/HBASE-19397-branch-2
Commit: ca942ea42aaa54f1336a46bbe5db28ac3c2aa9a0
Parents: 111c687
Author: zhangduo 
Authored: Wed Jan 3 21:13:57 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../TestReplicationAdminUsingProcedure.java | 225 ---
 1 file changed, 225 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ca942ea4/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
deleted file mode 100644
index 1300376..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client.replication;
-
-import java.io.IOException;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.TestReplicationBase;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.log4j.Logger;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
-
-@Category({ MediumTests.class, ClientTests.class })
-public class TestReplicationAdminUsingProcedure extends TestReplicationBase {
-
-  private static final String PEER_ID = "2";
-  private static final Logger LOG = 
Logger.getLogger(TestReplicationAdminUsingProcedure.class);
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-conf1.setInt("hbase.multihconnection.threads.max", 10);
-
-// Start the master & slave mini cluster.
-TestReplicationBase.setUpBeforeClass();
-
-// Remove the replication peer
-hbaseAdmin.removeReplicationPeer(PEER_ID);
-  }
-
-  private void loadData(int startRowKey, int endRowKey) throws IOException {
-for (int i = startRowKey; i < endRowKey; i++) {
-  byte[] rowKey = Bytes.add(row, Bytes.toBytes(i));
-  Put put = new Put(rowKey);
-  put.addColumn(famName, null, Bytes.toBytes(i));
-  htable1.put(put);
-}
-  }
-
-  private void waitForReplication(int expectedRows, int retries)
-  throws IOException, InterruptedException {
-Scan scan;
-for (int i = 0; i < retries; i++) {
-  scan = new Scan();
-  if (i == retries - 1) {
-throw new IOException("Waited too much time for normal batch 
replication");
-  }
-  try (ResultScanner scanner = htable2.getScanner(scan)) {
-int count = 0;
-for (Result res : scanner) {
-  count++;
-}
-if (count != expectedRows) {
-  LOG.info("Only got " + count + " rows,  expected rows: " + 
expectedRows);
-  Thread.sleep(SLEEP_TIME);
-} else {
-  return;
-}
-  }
-}
-  }
-
-  @Before
-  public void setUp() throws IOException {
-

[11/45] hbase git commit: HBASE-19216 Implement a general framework to execute remote procedure on RS

2018-03-08 Thread zhangduo
HBASE-19216 Implement a general framework to execute remote procedure on RS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3ee18a00
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3ee18a00
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3ee18a00

Branch: refs/heads/HBASE-19397-branch-2
Commit: 3ee18a00c708278687ba1321f387682411a2d97f
Parents: 25efd37
Author: zhangduo 
Authored: Fri Dec 15 21:06:44 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hbase/procedure2/LockedResourceType.java|   4 +-
 .../procedure2/RemoteProcedureDispatcher.java   |  23 ++-
 .../src/main/protobuf/Admin.proto   |   9 +-
 .../src/main/protobuf/MasterProcedure.proto |  30 +++
 .../src/main/protobuf/RegionServerStatus.proto  |  15 ++
 .../apache/hadoop/hbase/executor/EventType.java |  26 ++-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  30 ++-
 .../hadoop/hbase/master/MasterRpcServices.java  |  13 ++
 .../assignment/RegionTransitionProcedure.java   |  18 +-
 .../procedure/MasterProcedureScheduler.java | 113 ++-
 .../procedure/PeerProcedureInterface.java   |  34 
 .../hbase/master/procedure/PeerQueue.java   |  54 +
 .../master/procedure/RSProcedureDispatcher.java | 101 ++
 .../hbase/master/procedure/SchemaLocking.java   |   5 +
 .../master/replication/ModifyPeerProcedure.java | 127 
 .../master/replication/RefreshPeerCallable.java |  67 +++
 .../replication/RefreshPeerProcedure.java   | 197 +++
 .../hbase/procedure2/RSProcedureCallable.java   |  43 
 .../hbase/regionserver/HRegionServer.java   |  61 ++
 .../hbase/regionserver/RSRpcServices.java   |  58 --
 .../handler/RSProcedureHandler.java |  51 +
 .../assignment/TestAssignmentManager.java   |  20 +-
 .../replication/DummyModifyPeerProcedure.java   |  41 
 .../TestDummyModifyPeerProcedure.java   |  80 
 .../security/access/TestAccessController.java   |   1 +
 26 files changed, 1109 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3ee18a00/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
index c5fe62b..dc9b5d4 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,5 +22,5 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public enum LockedResourceType {
-  SERVER, NAMESPACE, TABLE, REGION
+  SERVER, NAMESPACE, TABLE, REGION, PEER
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ee18a00/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 6238e10..7e3dde6 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -226,13 +226,30 @@ public abstract class RemoteProcedureDispatcher {
+/**
+ * For building the remote operation.
+ */
 RemoteOperation remoteCallBuild(TEnv env, TRemote remote);
-void remoteCallCompleted(TEnv env, TRemote remote, RemoteOperation 
response);
+
+/**
+ * Called when the executeProcedure call is failed.
+ */
 void remoteCallFailed(TEnv env, TRemote remote, IOException exception);
+
+/**
+ * Called when RS tells the remote procedure is succeeded through the
+ * {@code reportProcedureDone} method.
+ */
+void remoteOperationCompleted(TEnv env);
+
+/**
+ * Called when RS tells the remote procedure is failed through the 

[31/45] hbase git commit: HBASE-19873 addendum add missing rule for new tests

2018-03-08 Thread zhangduo
HBASE-19873 addendum add missing rule for new tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5535fdca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5535fdca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5535fdca

Branch: refs/heads/HBASE-19397-branch-2
Commit: 5535fdca2ac0e076c5066fef678de42c61553575
Parents: dd77e12
Author: zhangduo 
Authored: Tue Jan 30 09:40:23 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hbase/replication/TestZKReplicationPeerStorage.java  | 7 ++-
 .../hbase/replication/TestZKReplicationQueueStorage.java | 8 +++-
 .../hbase/replication/TestReplicationProcedureRetry.java | 7 ++-
 .../apache/hadoop/hbase/util/TestHBaseFsckReplication.java   | 6 ++
 4 files changed, 25 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5535fdca/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
--
diff --git 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
index 3eb11da..3290fb0 100644
--- 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
+++ 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java
@@ -33,19 +33,24 @@ import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 import java.util.stream.Stream;
-
+import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseZKTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category({ ReplicationTests.class, MediumTests.class })
 public class TestZKReplicationPeerStorage {
 
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestZKReplicationPeerStorage.class);
+
   private static final HBaseZKTestingUtility UTIL = new 
HBaseZKTestingUtility();
 
   private static ZKReplicationPeerStorage STORAGE;

http://git-wip-us.apache.org/repos/asf/hbase/blob/5535fdca/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
index 786730f..2c01a26 100644
--- 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
@@ -27,8 +27,8 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Set;
 import java.util.SortedSet;
-
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseZKTestingUtility;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -38,11 +38,17 @@ import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category({ ReplicationTests.class, MediumTests.class })
 public class TestZKReplicationQueueStorage {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestZKReplicationQueueStorage.class);
+
   private static final HBaseZKTestingUtility UTIL = new 
HBaseZKTestingUtility();
 
   private static ZKReplicationQueueStorage STORAGE;

http://git-wip-us.apache.org/repos/asf/hbase/blob/5535fdca/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
index ab35b46..a2ae0b4 100644
--- 

[30/45] hbase git commit: HBASE-19923 Reset peer state and config when refresh replication source failed

2018-03-08 Thread zhangduo
HBASE-19923 Reset peer state and config when refresh replication source failed


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0f29bb71
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0f29bb71
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0f29bb71

Branch: refs/heads/HBASE-19397-branch-2
Commit: 0f29bb719e95bf1e1bd9d761d99e12d513460da2
Parents: 2022874
Author: Guanghao Zhang 
Authored: Tue Feb 6 14:58:39 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hbase/replication/ReplicationPeerImpl.java  |  4 ++--
 .../regionserver/PeerProcedureHandlerImpl.java  | 24 
 2 files changed, 22 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0f29bb71/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index 604e0bb..d656466 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -54,11 +54,11 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
 this.peerConfigListeners = new ArrayList<>();
   }
 
-  void setPeerState(boolean enabled) {
+  public void setPeerState(boolean enabled) {
 this.peerState = enabled ? PeerState.ENABLED : PeerState.DISABLED;
   }
 
-  void setPeerConfig(ReplicationPeerConfig peerConfig) {
+  public void setPeerConfig(ReplicationPeerConfig peerConfig) {
 this.peerConfig = peerConfig;
 peerConfigListeners.forEach(listener -> 
listener.peerConfigUpdated(peerConfig));
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f29bb71/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
index ce8fdae..a02d181 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
@@ -62,18 +62,26 @@ public class PeerProcedureHandlerImpl implements 
PeerProcedureHandler {
   private void refreshPeerState(String peerId) throws ReplicationException, 
IOException {
 PeerState newState;
 Lock peerLock = peersLock.acquireLock(peerId);
+ReplicationPeerImpl peer = null;
+PeerState oldState = null;
+boolean success = false;
 try {
-  ReplicationPeerImpl peer = 
replicationSourceManager.getReplicationPeers().getPeer(peerId);
+  peer = replicationSourceManager.getReplicationPeers().getPeer(peerId);
   if (peer == null) {
 throw new ReplicationException("Peer with id=" + peerId + " is not 
cached.");
   }
-  PeerState oldState = peer.getPeerState();
+  oldState = peer.getPeerState();
   newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
   // RS need to start work with the new replication state change
   if (oldState.equals(PeerState.ENABLED) && 
newState.equals(PeerState.DISABLED)) {
 replicationSourceManager.refreshSources(peerId);
   }
+  success = true;
 } finally {
+  if (!success && peer != null) {
+// Reset peer state if refresh source failed
+peer.setPeerState(oldState.equals(PeerState.ENABLED));
+  }
   peerLock.unlock();
 }
   }
@@ -91,19 +99,27 @@ public class PeerProcedureHandlerImpl implements 
PeerProcedureHandler {
   @Override
   public void updatePeerConfig(String peerId) throws ReplicationException, 
IOException {
 Lock peerLock = peersLock.acquireLock(peerId);
+ReplicationPeerImpl peer = null;
+ReplicationPeerConfig oldConfig = null;
+boolean success = false;
 try {
-  ReplicationPeerImpl peer = 
replicationSourceManager.getReplicationPeers().getPeer(peerId);
+  peer = replicationSourceManager.getReplicationPeers().getPeer(peerId);
   if (peer == null) {
 throw new ReplicationException("Peer with id=" + peerId + " is not 
cached.");
   }
-  ReplicationPeerConfig oldConfig = peer.getPeerConfig();
+  oldConfig = peer.getPeerConfig();
   

[14/45] hbase git commit: HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer Procedure classes

2018-03-08 Thread zhangduo
HBASE-19580 Use slf4j instead of commons-logging in new, just-added Peer 
Procedure classes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e4d0745d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e4d0745d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e4d0745d

Branch: refs/heads/HBASE-19397-branch-2
Commit: e4d0745d2e0a92e687c8e7371c00fb8b3809b168
Parents: 89ed3b0
Author: zhangduo 
Authored: Thu Dec 21 21:59:46 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hadoop/hbase/master/replication/AddPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/DisablePeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/EnablePeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/ModifyPeerProcedure.java   | 6 +++---
 .../hadoop/hbase/master/replication/RefreshPeerProcedure.java  | 6 +++---
 .../hadoop/hbase/master/replication/RemovePeerProcedure.java   | 6 +++---
 .../hbase/master/replication/UpdatePeerConfigProcedure.java| 6 +++---
 7 files changed, 21 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e4d0745d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
index c3862d8..066c3e7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -28,6 +26,8 @@ import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AddPeerStateData;
 
@@ -37,7 +37,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.A
 @InterfaceAudience.Private
 public class AddPeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(AddPeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(AddPeerProcedure.class);
 
   private ReplicationPeerConfig peerConfig;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e4d0745d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
index 0b32db9..9a28de6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
@@ -19,11 +19,11 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The procedure for disabling a replication peer.
@@ -31,7 +31,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public class DisablePeerProcedure extends ModifyPeerProcedure {
 
-  private static final Log LOG = LogFactory.getLog(DisablePeerProcedure.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(DisablePeerProcedure.class);
 
   public DisablePeerProcedure() {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e4d0745d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java

[26/45] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly

2018-03-08 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/d956a285/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index b8d80d2..5b7bab8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -33,17 +33,20 @@ import java.util.TreeSet;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -56,7 +59,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
@@ -91,7 +94,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   private final List sources;
   // List of all the sources we got from died RSs
   private final List oldsources;
-  private final ReplicationQueues replicationQueues;
+  private final ReplicationQueueStorage queueStorage;
   private final ReplicationTracker replicationTracker;
   private final ReplicationPeers replicationPeers;
   // UUID for this cluster
@@ -124,7 +127,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 
   /**
* Creates a replication manager and sets the watch on all the other 
registered region servers
-   * @param replicationQueues the interface for manipulating replication queues
+   * @param queueStorage the interface for manipulating replication queues
* @param replicationPeers
* @param replicationTracker
* @param conf the configuration to use
@@ -134,14 +137,14 @@ public class ReplicationSourceManager implements 
ReplicationListener {
* @param oldLogDir the directory where old logs are archived
* @param clusterId
*/
-  public ReplicationSourceManager(ReplicationQueues replicationQueues,
+  public ReplicationSourceManager(ReplicationQueueStorage queueStorage,
   ReplicationPeers replicationPeers, ReplicationTracker 
replicationTracker, Configuration conf,
   Server server, FileSystem fs, Path logDir, Path oldLogDir, UUID 
clusterId,
   WALFileLengthProvider walFileLengthProvider) throws IOException {
 //CopyOnWriteArrayList is thread-safe.
 //Generally, reading is more than modifying.
 this.sources = new CopyOnWriteArrayList<>();
-this.replicationQueues = replicationQueues;
+this.queueStorage = queueStorage;
 this.replicationPeers = replicationPeers;
 this.replicationTracker = replicationTracker;
 this.server = server;
@@ -174,6 +177,19 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
   }
 
+  @FunctionalInterface
+  private interface ReplicationQueueOperation {
+void exec() throws ReplicationException;
+  }
+
+  private void abortWhenFail(ReplicationQueueOperation op) {
+try {
+  op.exec();
+} catch (ReplicationException e) {
+  server.abort("Failed to operate on replication queue", e);
+}
+  }
+
   /**
* Provide the id of the peer and a log key and this method will figure which
* wal it belongs to and will log, for this region server, the current
@@ -185,12 +201,13 @@ public class ReplicationSourceManager implements 
ReplicationListener {
* @param queueRecovered indicates if this queue comes from another region 
server
* @param holdLogInZK if true then the log is retained in 

[43/45] hbase git commit: HBASE-19592 Add UTs to test retry on update zk failure

2018-03-08 Thread zhangduo
HBASE-19592 Add UTs to test retry on update zk failure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d5b880f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d5b880f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d5b880f9

Branch: refs/heads/HBASE-19397-branch-2
Commit: d5b880f9448103512f3a37938bf3bd67128e2878
Parents: 68a24e7
Author: zhangduo 
Authored: Tue Dec 26 20:39:00 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../replication/ReplicationPeerManager.java |   5 +-
 .../TestReplicationProcedureRetry.java  | 200 +++
 2 files changed, 202 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d5b880f9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index b78cbce..f4ccce8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Used to add/remove a replication peer.
  */
 @InterfaceAudience.Private
-public final class ReplicationPeerManager {
+public class ReplicationPeerManager {
 
   private final ReplicationPeerStorage peerStorage;
 
@@ -61,8 +61,7 @@ public final class ReplicationPeerManager {
 
   private final ConcurrentMap peers;
 
-  private ReplicationPeerManager(ReplicationPeerStorage peerStorage,
-  ReplicationQueueStorage queueStorage,
+  ReplicationPeerManager(ReplicationPeerStorage peerStorage, 
ReplicationQueueStorage queueStorage,
   ConcurrentMap peers) {
 this.peerStorage = peerStorage;
 this.queueStorage = queueStorage;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5b880f9/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
new file mode 100644
index 000..ab35b46
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.invocation.InvocationOnMock;
+

[02/45] hbase git commit: Revert "HBASE-20137 TestRSGroups is flakey"

2018-03-08 Thread zhangduo
Revert "HBASE-20137 TestRSGroups is flakey"

Revert. Fix is not right.

This reverts commit 6d1740d498d3f0f301a87a0a0cd598827790efa5.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/96a42b73
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/96a42b73
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/96a42b73

Branch: refs/heads/HBASE-19397-branch-2
Commit: 96a42b7359674e787bd4d2e48e4622e9f5861645
Parents: f5c8713
Author: Michael Stack 
Authored: Wed Mar 7 09:24:09 2018 -0800
Committer: Michael Stack 
Committed: Wed Mar 7 09:25:02 2018 -0800

--
 .../hadoop/hbase/master/ServerManager.java  | 17 +++
 .../FailedRemoteDispatchException.java  |  9 +---
 .../assignment/RegionTransitionProcedure.java   | 15 +++
 .../master/assignment/UnassignProcedure.java| 47 
 .../assignment/TestAssignmentManager.java   | 10 +
 5 files changed, 34 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/96a42b73/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index e2f0b6b..06d6c8b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -555,17 +555,15 @@ public class ServerManager {
   }
 
   /*
-   * Expire the passed server. Add it to list of dead servers and queue a 
shutdown processing.
-   * @return True if we expired passed serverName else false if 
we failed to schedule
-   * an expire (and attendant ServerCrashProcedure -- some clients are 
dependent on
-   * server crash procedure being queued and need to know if has not been 
queued).
+   * Expire the passed server.  Add it to list of dead servers and queue a
+   * shutdown processing.
*/
-  public synchronized boolean expireServer(final ServerName serverName) {
+  public synchronized void expireServer(final ServerName serverName) {
 if (serverName.equals(master.getServerName())) {
   if (!(master.isAborted() || master.isStopped())) {
 master.stop("We lost our znode?");
   }
-  return false;
+  return;
 }
 if (!master.isServerCrashProcessingEnabled()) {
   LOG.info("Master doesn't enable ServerShutdownHandler during 
initialization, "
@@ -575,13 +573,13 @@ public class ServerManager {
   // the SCP is not enable yet and Meta's RIT may be suspend forever. See 
HBase-19287
   master.getAssignmentManager().handleMetaRITOnCrashedServer(serverName);
   this.queuedDeadServers.add(serverName);
-  return false;
+  return;
 }
 if (this.deadservers.isDeadServer(serverName)) {
   // TODO: Can this happen?  It shouldn't be online in this case?
   LOG.warn("Expiration of " + serverName +
   " but server shutdown already in progress");
-  return false;
+  return;
 }
 moveFromOnlineToDeadServers(serverName);
 
@@ -593,7 +591,7 @@ public class ServerManager {
   if (this.onlineServers.isEmpty()) {
 master.stop("Cluster shutdown set; onlineServer=0");
   }
-  return false;
+  return;
 }
 LOG.info("Processing expiration of " + serverName + " on " + 
this.master.getServerName());
 master.getAssignmentManager().submitServerCrash(serverName, true);
@@ -604,7 +602,6 @@ public class ServerManager {
 listener.serverRemoved(serverName);
   }
 }
-return true;
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hbase/blob/96a42b73/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.java
index 7e98675..b459cfe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.java
@@ -21,17 +21,12 @@ import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * Used internally signaling failed queue of a remote procedure operation.
- * Usually happens because no such remote server; it is being processed as 
crashed so it is not
- * online at time of RPC. 

[40/45] hbase git commit: HBASE-19622 Reimplement ReplicationPeers with the new replication storage interface

2018-03-08 Thread zhangduo
HBASE-19622 Reimplement ReplicationPeers with the new replication storage 
interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5cfb0071
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5cfb0071
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5cfb0071

Branch: refs/heads/HBASE-19397-branch-2
Commit: 5cfb00719a7744f1d57390b0c2248044fcb0894a
Parents: e898d25
Author: huzheng 
Authored: Tue Dec 26 16:46:10 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  10 +-
 .../replication/VerifyReplication.java  |   9 +-
 .../hbase/replication/ReplicationFactory.java   |  10 +-
 .../hbase/replication/ReplicationPeerImpl.java  |  60 +-
 .../replication/ReplicationPeerStorage.java |   3 +-
 .../hbase/replication/ReplicationPeers.java | 238 
 .../replication/ReplicationPeersZKImpl.java | 552 ---
 .../replication/ZKReplicationPeerStorage.java   |  12 +-
 .../replication/ZKReplicationStorageBase.java   |   3 +-
 .../replication/TestReplicationStateBasic.java  | 125 ++---
 .../replication/TestReplicationStateZKImpl.java |   2 +-
 .../TestZKReplicationPeerStorage.java   |  12 +-
 .../cleaner/ReplicationZKNodeCleaner.java   |  57 +-
 .../replication/ReplicationPeerManager.java |   6 +-
 .../regionserver/DumpReplicationQueues.java |   2 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  49 +-
 .../replication/regionserver/Replication.java   |   2 +-
 .../regionserver/ReplicationSource.java |   7 +-
 .../regionserver/ReplicationSourceManager.java  |  44 +-
 .../cleaner/TestReplicationHFileCleaner.java|   7 +-
 .../replication/TestMultiSlaveReplication.java  |   2 -
 .../TestReplicationTrackerZKImpl.java   |  26 +-
 .../TestReplicationSourceManager.java   |  17 +-
 .../hadoop/hbase/HBaseZKTestingUtility.java |   3 +-
 24 files changed, 307 insertions(+), 951 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5cfb0071/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 022bf64..a234a9b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -247,22 +247,22 @@ public final class ReplicationPeerConfigUtil {
   public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes)
   throws DeserializationException {
 if (ProtobufUtil.isPBMagicPrefix(bytes)) {
-  int pblen = ProtobufUtil.lengthOfPBMagic();
+  int pbLen = ProtobufUtil.lengthOfPBMagic();
   ReplicationProtos.ReplicationPeer.Builder builder =
   ReplicationProtos.ReplicationPeer.newBuilder();
   ReplicationProtos.ReplicationPeer peer;
   try {
-ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
+ProtobufUtil.mergeFrom(builder, bytes, pbLen, bytes.length - pbLen);
 peer = builder.build();
   } catch (IOException e) {
 throw new DeserializationException(e);
   }
   return convert(peer);
 } else {
-  if (bytes.length > 0) {
-return 
ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build();
+  if (bytes == null || bytes.length <= 0) {
+throw new DeserializationException("Bytes to deserialize should not be 
empty.");
   }
-  return ReplicationPeerConfig.newBuilder().setClusterKey("").build();
+  return 
ReplicationPeerConfig.newBuilder().setClusterKey(Bytes.toString(bytes)).build();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5cfb0071/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 09d4b4b..f0070f0 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -339,15 +339,10 @@ public class VerifyReplication extends Configured 
implements 

[35/45] hbase git commit: HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase

2018-03-08 Thread zhangduo
HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/111c6876
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/111c6876
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/111c6876

Branch: refs/heads/HBASE-19397-branch-2
Commit: 111c6876146ec475c4a2817a3ef8fd26c971ca80
Parents: af05d04
Author: huzheng 
Authored: Fri Dec 29 15:55:28 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hbase/replication/ReplicationFactory.java   |   5 +-
 .../replication/ReplicationStateZKBase.java | 159 ---
 .../replication/ReplicationTrackerZKImpl.java   |  21 +--
 .../replication/ZKReplicationPeerStorage.java   |  24 ++-
 .../replication/ZKReplicationStorageBase.java   |  13 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../master/ReplicationPeerConfigUpgrader.java   | 128 +++
 .../regionserver/DumpReplicationQueues.java |  18 +--
 .../replication/regionserver/Replication.java   |   3 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   3 +-
 .../TestReplicationTrackerZKImpl.java   |   3 +-
 .../replication/master/TestTableCFsUpdater.java |  41 ++---
 .../TestReplicationSourceManager.java   |   6 +-
 13 files changed, 136 insertions(+), 292 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/111c6876/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 6c66aff..2a970ba 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -33,9 +33,8 @@ public class ReplicationFactory {
 return new ReplicationPeers(zk, conf);
   }
 
-  public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,
-  final ReplicationPeers replicationPeers, Configuration conf, Abortable 
abortable,
+  public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, 
Abortable abortable,
   Stoppable stopper) {
-return new ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, 
abortable, stopper);
+return new ReplicationTrackerZKImpl(zookeeper, abortable, stopper);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/111c6876/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
deleted file mode 100644
index a48683e..000
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-import org.apache.hadoop.hbase.zookeeper.ZKConfig;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import 

[10/45] hbase git commit: HBASE-19633 Clean up the replication queues in the postPeerModification stage when removing a peer

2018-03-08 Thread zhangduo
HBASE-19633 Clean up the replication queues in the postPeerModification stage 
when removing a peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ff026fdd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ff026fdd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ff026fdd

Branch: refs/heads/HBASE-19397-branch-2
Commit: ff026fdd3c5855601e8926f13d3b1d9d66107b7f
Parents: 5cfb007
Author: zhangduo 
Authored: Tue Jan 2 09:57:23 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |  2 +-
 .../replication/VerifyReplication.java  | 34 ++
 .../hbase/replication/ReplicationPeers.java | 32 +++--
 .../replication/ZKReplicationQueueStorage.java  |  3 +-
 .../replication/ZKReplicationStorageBase.java   |  4 +--
 .../replication/TestReplicationStateBasic.java  | 10 ++
 .../master/replication/AddPeerProcedure.java|  5 ++-
 .../replication/DisablePeerProcedure.java   |  3 +-
 .../master/replication/EnablePeerProcedure.java |  3 +-
 .../master/replication/ModifyPeerProcedure.java | 34 ++
 .../replication/RefreshPeerProcedure.java   | 17 -
 .../master/replication/RemovePeerProcedure.java |  7 ++--
 .../replication/ReplicationPeerManager.java | 31 +++-
 .../replication/UpdatePeerConfigProcedure.java  |  3 +-
 .../RemoteProcedureResultReporter.java  |  3 +-
 .../regionserver/RefreshPeerCallable.java   |  5 +--
 .../regionserver/ReplicationSourceManager.java  | 38 +++-
 .../TestReplicationAdminUsingProcedure.java |  7 ++--
 18 files changed, 124 insertions(+), 117 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ff026fdd/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index b80ee16..fdae288 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -27,8 +27,8 @@ import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A configuration for the replication peer cluster.

http://git-wip-us.apache.org/repos/asf/hbase/blob/ff026fdd/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index f0070f0..fe45762 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.mapreduce.replication;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.UUID;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -45,13 +44,14 @@ import org.apache.hadoop.hbase.filter.PrefixFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableMapper;
+import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
 import org.apache.hadoop.hbase.mapreduce.TableSplit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -66,6 +66,7 @@ import org.apache.hadoop.util.Tool;
 

[38/45] hbase git commit: HBASE-19783 Change replication peer cluster key/endpoint from a not-null value to null is not allowed

2018-03-08 Thread zhangduo
HBASE-19783 Change replication peer cluster key/endpoint from a not-null value 
to null is not allowed

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dd77e12b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dd77e12b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dd77e12b

Branch: refs/heads/HBASE-19397-branch-2
Commit: dd77e12be1e07c0a632c248f1d3d78e0d5bb61cf
Parents: 80fdf87
Author: Guanghao Zhang 
Authored: Fri Jan 12 22:04:38 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 28 +---
 1 file changed, 19 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dd77e12b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 696b2d7..19fc7f4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -132,20 +132,19 @@ public class ReplicationPeerManager {
 checkPeerConfig(peerConfig);
 ReplicationPeerDescription desc = checkPeerExists(peerId);
 ReplicationPeerConfig oldPeerConfig = desc.getPeerConfig();
-if (!StringUtils.isBlank(peerConfig.getClusterKey()) &&
-  !peerConfig.getClusterKey().equals(oldPeerConfig.getClusterKey())) {
+if (!isStringEquals(peerConfig.getClusterKey(), 
oldPeerConfig.getClusterKey())) {
   throw new DoNotRetryIOException(
   "Changing the cluster key on an existing peer is not allowed. 
Existing key '" +
-oldPeerConfig.getClusterKey() + "' for peer " + peerId + " does 
not match new key '" +
-peerConfig.getClusterKey() + "'");
+  oldPeerConfig.getClusterKey() + "' for peer " + peerId + " does 
not match new key '" +
+  peerConfig.getClusterKey() + "'");
 }
 
-if (!StringUtils.isBlank(peerConfig.getReplicationEndpointImpl()) &&
-  
!peerConfig.getReplicationEndpointImpl().equals(oldPeerConfig.getReplicationEndpointImpl()))
 {
+if (!isStringEquals(peerConfig.getReplicationEndpointImpl(),
+  oldPeerConfig.getReplicationEndpointImpl())) {
   throw new DoNotRetryIOException("Changing the replication endpoint 
implementation class " +
-"on an existing peer is not allowed. Existing class '" +
-oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId +
-" does not match new class '" + 
peerConfig.getReplicationEndpointImpl() + "'");
+  "on an existing peer is not allowed. Existing class '" +
+  oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId +
+  " does not match new class '" + 
peerConfig.getReplicationEndpointImpl() + "'");
 }
   }
 
@@ -341,4 +340,15 @@ public class ReplicationPeerManager {
 return new ReplicationPeerManager(peerStorage,
 ReplicationStorageFactory.getReplicationQueueStorage(zk, conf), peers);
   }
+
+  /**
+   * For replication peer cluster key or endpoint class, null and empty string 
is same. So here
+   * don't use {@link StringUtils#equals(CharSequence, CharSequence)} directly.
+   */
+  private boolean isStringEquals(String s1, String s2) {
+if (StringUtils.isBlank(s1)) {
+  return StringUtils.isBlank(s2);
+}
+return s1.equals(s2);
+  }
 }



[34/45] hbase git commit: HBASE-19686 Use KeyLocker instead of ReentrantLock in PeerProcedureHandlerImpl

2018-03-08 Thread zhangduo
HBASE-19686 Use KeyLocker instead of ReentrantLock in PeerProcedureHandlerImpl


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/34c56c73
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/34c56c73
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/34c56c73

Branch: refs/heads/HBASE-19397-branch-2
Commit: 34c56c734bb2e0500e4a516a1aef5bd392715a53
Parents: 2aa4c79
Author: zhangduo 
Authored: Tue Jan 2 16:13:55 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../regionserver/PeerProcedureHandlerImpl.java  | 41 ++--
 1 file changed, 29 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/34c56c73/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
index 1efe180..c09c6a0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/PeerProcedureHandlerImpl.java
@@ -19,10 +19,10 @@
 package org.apache.hadoop.hbase.replication.regionserver;
 
 import java.io.IOException;
-import java.util.concurrent.locks.ReentrantLock;
-
+import java.util.concurrent.locks.Lock;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -32,7 +32,7 @@ public class PeerProcedureHandlerImpl implements 
PeerProcedureHandler {
   private static final Logger LOG = 
LoggerFactory.getLogger(PeerProcedureHandlerImpl.class);
 
   private final ReplicationSourceManager replicationSourceManager;
-  private final ReentrantLock peersLock = new ReentrantLock();
+  private final KeyLocker peersLock = new KeyLocker<>();
 
   public PeerProcedureHandlerImpl(ReplicationSourceManager 
replicationSourceManager) {
 this.replicationSourceManager = replicationSourceManager;
@@ -40,40 +40,57 @@ public class PeerProcedureHandlerImpl implements 
PeerProcedureHandler {
 
   @Override
   public void addPeer(String peerId) throws ReplicationException, IOException {
-peersLock.lock();
+Lock peerLock = peersLock.acquireLock(peerId);
 try {
   replicationSourceManager.addPeer(peerId);
 } finally {
-  peersLock.unlock();
+  peerLock.unlock();
 }
   }
 
   @Override
   public void removePeer(String peerId) throws ReplicationException, 
IOException {
-peersLock.lock();
+Lock peerLock = peersLock.acquireLock(peerId);
 try {
   if (replicationSourceManager.getReplicationPeers().getPeer(peerId) != 
null) {
 replicationSourceManager.removePeer(peerId);
   }
 } finally {
-  peersLock.unlock();
+  peerLock.unlock();
 }
   }
 
   @Override
   public void disablePeer(String peerId) throws ReplicationException, 
IOException {
-PeerState newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
-LOG.info("disable replication peer, id: " + peerId + ", new state: " + 
newState);
+PeerState newState;
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
+} finally {
+  peerLock.unlock();
+}
+LOG.info("disable replication peer, id: {}, new state: {}", peerId, 
newState);
   }
 
   @Override
   public void enablePeer(String peerId) throws ReplicationException, 
IOException {
-PeerState newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
-LOG.info("enable replication peer, id: " + peerId + ", new state: " + 
newState);
+PeerState newState;
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  newState = 
replicationSourceManager.getReplicationPeers().refreshPeerState(peerId);
+} finally {
+  peerLock.unlock();
+}
+LOG.info("enable replication peer, id: {}, new state: {}", peerId, 
newState);
   }
 
   @Override
   public void updatePeerConfig(String peerId) throws ReplicationException, 
IOException {
-replicationSourceManager.getReplicationPeers().refreshPeerConfig(peerId);
+Lock peerLock = peersLock.acquireLock(peerId);
+try {
+  

[07/45] hbase git commit: HBASE-19719 Fix checkstyle issues

2018-03-08 Thread zhangduo
HBASE-19719 Fix checkstyle issues


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/17f903e6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/17f903e6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/17f903e6

Branch: refs/heads/HBASE-19397-branch-2
Commit: 17f903e6f29cc57d0a3bfb74ebbf76960d4a2c85
Parents: 0bc41dd
Author: zhangduo 
Authored: Sat Jan 6 08:30:55 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hbase/replication/ReplicationStorageFactory.java   |  2 +-
 .../master/assignment/RegionTransitionProcedure.java   |  4 ++--
 .../hbase/master/procedure/RSProcedureDispatcher.java  | 13 ++---
 .../master/ReplicationPeerConfigUpgrader.java  |  8 
 4 files changed, 13 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/17f903e6/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
index 60d0749..462cfed 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
@@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
  * For now we only have zk based implementation.
  */
 @InterfaceAudience.Private
-public class ReplicationStorageFactory {
+public final class ReplicationStorageFactory {
 
   private ReplicationStorageFactory() {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/17f903e6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
index a0e58f3..7ce7454 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
@@ -36,11 +36,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 
-import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-
 /**
  * Base class for the Assign and Unassign Procedure.
  *

http://git-wip-us.apache.org/repos/asf/hbase/blob/17f903e6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index 57a4535..6c78914 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
@@ -36,6 +35,12 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
@@ -47,12 +52,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionR
 

[06/45] hbase git commit: HBASE-19936 Introduce a new base class for replication peer procedure

2018-03-08 Thread zhangduo
HBASE-19936 Introduce a new base class for replication peer procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/20228744
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/20228744
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/20228744

Branch: refs/heads/HBASE-19397-branch-2
Commit: 20228744a8f7b64b5cd92cfc8d099dffeb6469c9
Parents: 5535fdc
Author: zhangduo 
Authored: Mon Feb 5 16:14:25 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  2 +-
 .../replication/AbstractPeerProcedure.java  | 97 
 .../master/replication/ModifyPeerProcedure.java | 67 +-
 3 files changed, 102 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/20228744/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index a9521ad..9377988 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -401,7 +401,7 @@ message RefreshPeerParameter {
   required ServerName target_server = 3;
 }
 
-message ModifyPeerStateData {
+message PeerProcedureStateData {
   required string peer_id = 1;
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/20228744/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
new file mode 100644
index 000..0ad8a63
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
+import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerProcedureStateData;
+
+/**
+ * The base class for all replication peer related procedure.
+ */
+@InterfaceAudience.Private
+public abstract class AbstractPeerProcedure
+extends StateMachineProcedure implements 
PeerProcedureInterface {
+
+  protected String peerId;
+
+  private volatile boolean locked;
+
+  // used to keep compatible with old client where we can only returns after 
updateStorage.
+  protected ProcedurePrepareLatch latch;
+
+  protected AbstractPeerProcedure() {
+  }
+
+  protected AbstractPeerProcedure(String peerId) {
+this.peerId = peerId;
+this.latch = ProcedurePrepareLatch.createLatch(2, 0);
+  }
+
+  public ProcedurePrepareLatch getLatch() {
+return latch;
+  }
+
+  @Override
+  public String getPeerId() {
+return peerId;
+  }
+
+  @Override
+  protected LockState acquireLock(MasterProcedureEnv env) {
+if (env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)) {
+  return LockState.LOCK_EVENT_WAIT;
+}
+locked = true;
+return LockState.LOCK_ACQUIRED;
+  }
+
+  @Override
+  protected void releaseLock(MasterProcedureEnv env) {
+locked = false;
+env.getProcedureScheduler().wakePeerExclusiveLock(this, peerId);
+  }
+
+  @Override
+  protected boolean 

[28/45] hbase git commit: HBASE-19636 All rs should already start work with the new peer change when replication peer procedure is finished

2018-03-08 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/b1d7ec39/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java
index 4c1b94f..6d2b578 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java
@@ -28,12 +28,11 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
@@ -41,6 +40,8 @@ import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
@@ -71,9 +72,6 @@ public class TestNamespaceReplication extends 
TestReplicationBase {
 
   private static final byte[] val = Bytes.toBytes("myval");
 
-  private static HTableDescriptor tabA;
-  private static HTableDescriptor tabB;
-
   private static Connection connection1;
   private static Connection connection2;
   private static Admin admin1;
@@ -93,23 +91,21 @@ public class TestNamespaceReplication extends 
TestReplicationBase {
 admin2.createNamespace(NamespaceDescriptor.create(ns1).build());
 admin2.createNamespace(NamespaceDescriptor.create(ns2).build());
 
-tabA = new HTableDescriptor(tabAName);
-HColumnDescriptor fam = new HColumnDescriptor(f1Name);
-fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
-tabA.addFamily(fam);
-fam = new HColumnDescriptor(f2Name);
-fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
-tabA.addFamily(fam);
+TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tabAName);
+builder.addColumnFamily(ColumnFamilyDescriptorBuilder
+  
.newBuilder(f1Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build());
+builder.addColumnFamily(ColumnFamilyDescriptorBuilder
+  
.newBuilder(f2Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build());
+TableDescriptor tabA = builder.build();
 admin1.createTable(tabA);
 admin2.createTable(tabA);
 
-tabB = new HTableDescriptor(tabBName);
-fam = new HColumnDescriptor(f1Name);
-fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
-tabB.addFamily(fam);
-fam = new HColumnDescriptor(f2Name);
-fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
-tabB.addFamily(fam);
+builder = TableDescriptorBuilder.newBuilder(tabBName);
+builder.addColumnFamily(ColumnFamilyDescriptorBuilder
+  
.newBuilder(f1Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build());
+builder.addColumnFamily(ColumnFamilyDescriptorBuilder
+  
.newBuilder(f2Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build());
+TableDescriptor tabB = builder.build();
 admin1.createTable(tabB);
 admin2.createTable(tabB);
   }
@@ -137,22 +133,24 @@ public class TestNamespaceReplication extends 
TestReplicationBase {
 
   @Test
   public void testNamespaceReplication() throws Exception {
+String peerId = "2";
+
 Table htab1A = connection1.getTable(tabAName);
 Table htab2A = connection2.getTable(tabAName);
 
 Table htab1B = connection1.getTable(tabBName);
 Table htab2B = connection2.getTable(tabBName);
 
-ReplicationPeerConfig rpc = admin.getPeerConfig("2");
-rpc.setReplicateAllUserTables(false);
-admin.updatePeerConfig("2", rpc);
+ReplicationPeerConfig rpc = admin1.getReplicationPeerConfig(peerId);
+admin1.updateReplicationPeerConfig(peerId,
+  
ReplicationPeerConfig.newBuilder(rpc).setReplicateAllUserTables(false).build());
 
 // add ns1 to peer config which replicate to cluster2
-rpc = admin.getPeerConfig("2");
+rpc = admin1.getReplicationPeerConfig(peerId);
 Set namespaces = new HashSet<>();
 namespaces.add(ns1);
-rpc.setNamespaces(namespaces);
-admin.updatePeerConfig("2", rpc);
+admin1.updateReplicationPeerConfig(peerId,
+  

[18/45] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code

2018-03-08 Thread zhangduo
HBASE-19543 Abstract a replication storage interface to extract the zk specific 
code


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1727abd3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1727abd3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1727abd3

Branch: refs/heads/HBASE-19397-branch-2
Commit: 1727abd3aa754d071144dd73b6325d2495680736
Parents: 0432b86
Author: zhangduo 
Authored: Fri Dec 22 14:37:28 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hadoop/hbase/util/CollectionUtils.java  |   3 +
 hbase-replication/pom.xml   |  12 +
 .../replication/ReplicationPeerStorage.java |  74 
 .../replication/ReplicationQueueStorage.java| 164 +++
 .../replication/ReplicationStateZKBase.java |   1 -
 .../replication/ReplicationStorageFactory.java  |  49 +++
 .../replication/ZKReplicationPeerStorage.java   | 164 +++
 .../replication/ZKReplicationQueueStorage.java  | 425 +++
 .../replication/ZKReplicationStorageBase.java   |  75 
 .../TestZKReplicationPeerStorage.java   | 171 
 .../TestZKReplicationQueueStorage.java  | 171 
 .../org/apache/hadoop/hbase/master/HMaster.java |  34 +-
 .../hadoop/hbase/master/MasterServices.java |   6 +-
 .../master/procedure/MasterProcedureEnv.java|  24 +-
 .../master/replication/AddPeerProcedure.java|   6 +-
 .../replication/DisablePeerProcedure.java   |   7 +-
 .../master/replication/EnablePeerProcedure.java |   6 +-
 .../master/replication/ModifyPeerProcedure.java |  41 +-
 .../master/replication/RemovePeerProcedure.java |   6 +-
 .../master/replication/ReplicationManager.java  | 199 -
 .../replication/ReplicationPeerManager.java | 331 +++
 .../replication/UpdatePeerConfigProcedure.java  |   7 +-
 .../replication/TestReplicationAdmin.java   |  64 ++-
 .../hbase/master/MockNoopMasterServices.java|  13 +-
 .../hbase/master/TestMasterNoCluster.java   |   3 +-
 .../TestReplicationDisableInactivePeer.java |   6 +-
 26 files changed, 1749 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1727abd3/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
index 875b124..8bbb6f1 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java
@@ -107,6 +107,9 @@ public class CollectionUtils {
 return list.get(list.size() - 1);
   }
 
+  public static  List nullToEmpty(List list) {
+return list != null ? list : Collections.emptyList();
+  }
   /**
* In HBASE-16648 we found that ConcurrentHashMap.get is much faster than 
computeIfAbsent if the
* value already exists. Notice that the implementation does not guarantee 
that the supplier will

http://git-wip-us.apache.org/repos/asf/hbase/blob/1727abd3/hbase-replication/pom.xml
--
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
index 136e832..282e9ca 100644
--- a/hbase-replication/pom.xml
+++ b/hbase-replication/pom.xml
@@ -97,6 +97,18 @@
   org.apache.hbase
   hbase-zookeeper
 
+
+  org.apache.hbase
+  hbase-common
+  test-jar
+  test
+
+
+  org.apache.hbase
+  hbase-zookeeper
+  test-jar
+  test
+
 
 
   org.apache.commons

http://git-wip-us.apache.org/repos/asf/hbase/blob/1727abd3/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
new file mode 100644
index 000..e00cd0d
--- /dev/null
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may 

[25/45] hbase git commit: HBASE-19642 Fix locking for peer modification procedure

2018-03-08 Thread zhangduo
HBASE-19642 Fix locking for peer modification procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e50d809
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e50d809
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e50d809

Branch: refs/heads/HBASE-19397-branch-2
Commit: 1e50d80927dbd4df62f16967c36bd28b0e22685b
Parents: d5b880f
Author: zhangduo 
Authored: Wed Dec 27 18:27:13 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../master/replication/ModifyPeerProcedure.java | 21 +---
 1 file changed, 18 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e50d809/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index 279fbc7..a682606 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -46,6 +46,8 @@ public abstract class ModifyPeerProcedure
 
   protected String peerId;
 
+  private volatile boolean locked;
+
   // used to keep compatible with old client where we can only returns after 
updateStorage.
   protected ProcedurePrepareLatch latch;
 
@@ -145,17 +147,30 @@ public abstract class ModifyPeerProcedure
 
   @Override
   protected LockState acquireLock(MasterProcedureEnv env) {
-return env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)
-  ? LockState.LOCK_EVENT_WAIT
-  : LockState.LOCK_ACQUIRED;
+if (env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)) {
+  return  LockState.LOCK_EVENT_WAIT;
+}
+locked = true;
+return LockState.LOCK_ACQUIRED;
   }
 
   @Override
   protected void releaseLock(MasterProcedureEnv env) {
+locked = false;
 env.getProcedureScheduler().wakePeerExclusiveLock(this, peerId);
   }
 
   @Override
+  protected boolean holdLock(MasterProcedureEnv env) {
+return true;
+  }
+
+  @Override
+  protected boolean hasLock(MasterProcedureEnv env) {
+return locked;
+  }
+
+  @Override
   protected void rollbackState(MasterProcedureEnv env, PeerModificationState 
state)
   throws IOException, InterruptedException {
 if (state == PeerModificationState.PRE_PEER_MODIFICATION) {



[13/45] hbase git commit: HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure

2018-03-08 Thread zhangduo
HBASE-19536 Client side changes for moving peer modification from zk watcher to 
procedure

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5d87d6e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5d87d6e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5d87d6e7

Branch: refs/heads/HBASE-19397-branch-2
Commit: 5d87d6e7e29d0bcea924ccde2f5ba53b61da881a
Parents: bddaf62
Author: Guanghao Zhang 
Authored: Tue Dec 19 15:50:57 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  87 ++-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 149 ++-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  82 +-
 3 files changed, 238 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5d87d6e7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 40dac2f..b8546fa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2473,7 +2473,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @throws IOException if a remote or network exception occurs
*/
   default void addReplicationPeer(String peerId, ReplicationPeerConfig 
peerConfig)
@@ -2484,7 +2484,7 @@ public interface Admin extends Abortable, Closeable {
   /**
* Add a new replication peer for replicating data to slave cluster.
* @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
* @param enabled peer state, true if ENABLED and false if DISABLED
* @throws IOException if a remote or network exception occurs
*/
@@ -2492,6 +2492,37 @@ public interface Admin extends Abortable, Closeable {
   throws IOException;
 
   /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  default Future addReplicationPeerAsync(String peerId, 
ReplicationPeerConfig peerConfig)
+  throws IOException {
+return addReplicationPeerAsync(peerId, peerConfig, true);
+  }
+
+  /**
+   * Add a new replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to 
complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @param enabled peer state, true if ENABLED and false if DISABLED
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig 
peerConfig,
+  boolean enabled) throws IOException;
+
+  /**
* Remove a peer and stop the replication.
* @param peerId a short name that identifies the peer
* @throws IOException if a remote or network exception occurs
@@ -2499,6 +2530,18 @@ public interface Admin extends Abortable, Closeable {
   void removeReplicationPeer(String peerId) throws IOException;
 
   /**
+   * Remove a replication peer but does not block and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * ExecutionException if there was an error while executing the operation or 
TimeoutException in
+   * case the wait timeout was not long enough to 

[09/45] hbase git commit: HBASE-19520 Add UTs for the new lock type PEER

2018-03-08 Thread zhangduo
HBASE-19520 Add UTs for the new lock type PEER

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/89ed3b07
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/89ed3b07
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/89ed3b07

Branch: refs/heads/HBASE-19397-branch-2
Commit: 89ed3b0787d7864d85e752fcafb1534e6c8dc85b
Parents: a6415f3
Author: Guanghao Zhang 
Authored: Wed Dec 20 16:43:38 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../procedure/MasterProcedureScheduler.java |   9 +-
 .../procedure/TestMasterProcedureScheduler.java |  65 -
 ...TestMasterProcedureSchedulerConcurrency.java | 135 +++
 3 files changed, 201 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/89ed3b07/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 58f9ccd..b18dd6c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -277,6 +277,13 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 while (tableIter.hasNext()) {
   count += tableIter.next().size();
 }
+
+// Peer queues
+final AvlTreeIterator peerIter = new AvlTreeIterator<>(peerMap);
+while (peerIter.hasNext()) {
+  count += peerIter.next().size();
+}
+
 return count;
   }
 
@@ -807,7 +814,7 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
* @see #wakePeerExclusiveLock(Procedure, String)
* @param procedure the procedure trying to acquire the lock
* @param peerId peer to lock
-   * @return true if the procedure has to wait for the per to be available
+   * @return true if the procedure has to wait for the peer to be available
*/
   public boolean waitPeerExclusiveLock(Procedure procedure, String peerId) {
 schedLock();

http://git-wip-us.apache.org/repos/asf/hbase/blob/89ed3b07/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 160f4d2..65757db 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -908,6 +908,27 @@ public class TestMasterProcedureScheduler {
 }
   }
 
+  public static class TestPeerProcedure extends TestProcedure implements 
PeerProcedureInterface {
+private final String peerId;
+private final PeerOperationType opType;
+
+public TestPeerProcedure(long procId, String peerId, PeerOperationType 
opType) {
+  super(procId);
+  this.peerId = peerId;
+  this.opType = opType;
+}
+
+@Override
+public String getPeerId() {
+  return peerId;
+}
+
+@Override
+public PeerOperationType getPeerOperationType() {
+  return opType;
+}
+  }
+
   private static LockProcedure createLockProcedure(LockType lockType, long 
procId) throws Exception {
 LockProcedure procedure = new LockProcedure();
 
@@ -930,22 +951,19 @@ public class TestMasterProcedureScheduler {
 return createLockProcedure(LockType.SHARED, procId);
   }
 
-  private static void assertLockResource(LockedResource resource,
-  LockedResourceType resourceType, String resourceName)
-  {
+  private static void assertLockResource(LockedResource resource, 
LockedResourceType resourceType,
+  String resourceName) {
 assertEquals(resourceType, resource.getResourceType());
 assertEquals(resourceName, resource.getResourceName());
   }
 
-  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure)
-  {
+  private static void assertExclusiveLock(LockedResource resource, 
Procedure procedure) {
 assertEquals(LockType.EXCLUSIVE, resource.getLockType());
 assertEquals(procedure, resource.getExclusiveLockOwnerProcedure());
 assertEquals(0, resource.getSharedLockCount());
   }
 
-  private static void 

[37/45] hbase git commit: HBASE-19635 Introduce a thread at RS side to call reportProcedureDone

2018-03-08 Thread zhangduo
HBASE-19635 Introduce a thread at RS side to call reportProcedureDone


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e898d258
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e898d258
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e898d258

Branch: refs/heads/HBASE-19397-branch-2
Commit: e898d258be947c1649bcb314bf56eed76512bd9c
Parents: d956a28
Author: zhangduo 
Authored: Wed Dec 27 20:13:42 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../src/main/protobuf/RegionServerStatus.proto  |   5 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  15 ++-
 .../hbase/regionserver/HRegionServer.java   |  72 
 .../RemoteProcedureResultReporter.java  | 111 +++
 .../handler/RSProcedureHandler.java |   2 +-
 5 files changed, 149 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e898d258/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 4f75941..3f836cd 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -146,7 +146,7 @@ message RegionSpaceUseReportRequest {
 message RegionSpaceUseReportResponse {
 }
 
-message ReportProcedureDoneRequest {
+message RemoteProcedureResult {
   required uint64 proc_id = 1;
   enum Status {
 SUCCESS = 1;
@@ -155,6 +155,9 @@ message ReportProcedureDoneRequest {
   required Status status = 2;
   optional ForeignExceptionMessage error = 3;
 }
+message ReportProcedureDoneRequest {
+  repeated RemoteProcedureResult result = 1;
+}
 
 message ReportProcedureDoneResponse {
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e898d258/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 6ce0d39..8f92041 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -268,6 +268,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RemoteProcedureResult;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
@@ -2236,12 +2237,14 @@ public class MasterRpcServices extends RSRpcServices
   @Override
   public ReportProcedureDoneResponse reportProcedureDone(RpcController 
controller,
   ReportProcedureDoneRequest request) throws ServiceException {
-if (request.getStatus() == ReportProcedureDoneRequest.Status.SUCCESS) {
-  master.remoteProcedureCompleted(request.getProcId());
-} else {
-  master.remoteProcedureFailed(request.getProcId(),
-RemoteProcedureException.fromProto(request.getError()));
-}
+request.getResultList().forEach(result -> {
+  if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) {
+master.remoteProcedureCompleted(result.getProcId());
+  } else {
+master.remoteProcedureFailed(result.getProcId(),
+  RemoteProcedureException.fromProto(result.getError()));
+  }
+});
 return ReportProcedureDoneResponse.getDefaultInstance();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e898d258/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index c95ac37..81febc0 100644
--- 

[45/45] hbase git commit: HBASE-20082 Fix findbugs errors only on master which are introduced by HBASE-19397

2018-03-08 Thread zhangduo
HBASE-20082 Fix findbugs errors only on master which are introduced by 
HBASE-19397


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c9bcc0b9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c9bcc0b9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c9bcc0b9

Branch: refs/heads/HBASE-19397-branch-2
Commit: c9bcc0b9b1735f00ab58d1b207368b8c4e22c6c2
Parents: 0f29bb7
Author: zhangduo 
Authored: Mon Feb 26 10:22:09 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hadoop/hbase/master/replication/RefreshPeerProcedure.java  | 2 ++
 .../hbase/replication/regionserver/ReplicationSourceManager.java   | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c9bcc0b9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
index 1253ef9..ba9bcdc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
@@ -50,6 +50,8 @@ public class RefreshPeerProcedure extends 
Procedure
 
   private PeerOperationType type;
 
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = 
"IS2_INCONSISTENT_SYNC",
+  justification = "Will never change after construction")
   private ServerName targetServer;
 
   private boolean dispatched;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c9bcc0b9/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 6965f55..d11dc8e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -402,7 +402,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 toStartup.add(replicationSource);
   }
 }
-for (ReplicationSourceInterface replicationSource : oldsources) {
+for (ReplicationSourceInterface replicationSource : toStartup) {
   replicationSource.startup();
 }
   }



[15/45] hbase git commit: HBASE-19564 Procedure id is missing in the response of peer related operations

2018-03-08 Thread zhangduo
HBASE-19564 Procedure id is missing in the response of peer related operations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a6415f39
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a6415f39
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a6415f39

Branch: refs/heads/HBASE-19397-branch-2
Commit: a6415f39a613325edcd8ac043d275fa1f18e64b6
Parents: 5d87d6e
Author: zhangduo 
Authored: Wed Dec 20 20:57:37 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hadoop/hbase/master/MasterRpcServices.java  | 24 ++--
 .../master/replication/ModifyPeerProcedure.java |  4 +---
 2 files changed, 13 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a6415f39/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 7bd355a..6ce0d39 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1868,10 +1868,10 @@ public class MasterRpcServices extends RSRpcServices
   public AddReplicationPeerResponse addReplicationPeer(RpcController 
controller,
   AddReplicationPeerRequest request) throws ServiceException {
 try {
-  master.addReplicationPeer(request.getPeerId(),
-ReplicationPeerConfigUtil.convert(request.getPeerConfig()), 
request.getPeerState()
-.getState().equals(ReplicationState.State.ENABLED));
-  return AddReplicationPeerResponse.newBuilder().build();
+  long procId = master.addReplicationPeer(request.getPeerId(),
+ReplicationPeerConfigUtil.convert(request.getPeerConfig()),
+
request.getPeerState().getState().equals(ReplicationState.State.ENABLED));
+  return AddReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1881,8 +1881,8 @@ public class MasterRpcServices extends RSRpcServices
   public RemoveReplicationPeerResponse removeReplicationPeer(RpcController 
controller,
   RemoveReplicationPeerRequest request) throws ServiceException {
 try {
-  master.removeReplicationPeer(request.getPeerId());
-  return RemoveReplicationPeerResponse.newBuilder().build();
+  long procId = master.removeReplicationPeer(request.getPeerId());
+  return 
RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1892,8 +1892,8 @@ public class MasterRpcServices extends RSRpcServices
   public EnableReplicationPeerResponse enableReplicationPeer(RpcController 
controller,
   EnableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.enableReplicationPeer(request.getPeerId());
-  return EnableReplicationPeerResponse.newBuilder().build();
+  long procId = master.enableReplicationPeer(request.getPeerId());
+  return 
EnableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1903,8 +1903,8 @@ public class MasterRpcServices extends RSRpcServices
   public DisableReplicationPeerResponse disableReplicationPeer(RpcController 
controller,
   DisableReplicationPeerRequest request) throws ServiceException {
 try {
-  master.disableReplicationPeer(request.getPeerId());
-  return DisableReplicationPeerResponse.newBuilder().build();
+  long procId = master.disableReplicationPeer(request.getPeerId());
+  return 
DisableReplicationPeerResponse.newBuilder().setProcId(procId).build();
 } catch (ReplicationException | IOException e) {
   throw new ServiceException(e);
 }
@@ -1930,9 +1930,9 @@ public class MasterRpcServices extends RSRpcServices
   public UpdateReplicationPeerConfigResponse 
updateReplicationPeerConfig(RpcController controller,
   UpdateReplicationPeerConfigRequest request) throws ServiceException {
 try {
-  master.updateReplicationPeerConfig(request.getPeerId(),
+  long procId = master.updateReplicationPeerConfig(request.getPeerId(),
 ReplicationPeerConfigUtil.convert(request.getPeerConfig()));
-  return UpdateReplicationPeerConfigResponse.newBuilder().build();
+  return 
UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build();
 } catch 

[41/45] hbase git commit: HBASE-19748 TestRegionReplicaFailover and TestRegionReplicaReplicationEndpoint UT hangs

2018-03-08 Thread zhangduo
HBASE-19748 TestRegionReplicaFailover and TestRegionReplicaReplicationEndpoint 
UT hangs


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80fdf87e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80fdf87e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80fdf87e

Branch: refs/heads/HBASE-19397-branch-2
Commit: 80fdf87e1cacfbf2b6699a76d0f5ca0477c64174
Parents: 17f903e
Author: huzheng 
Authored: Wed Jan 10 15:00:30 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/master/HMaster.java   | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/80fdf87e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 71af62b..cd967ed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -40,7 +40,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Objects;
-import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
@@ -71,6 +70,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.PleaseHoldException;
+import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
@@ -3407,13 +3407,12 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   cpHost.preGetReplicationPeerConfig(peerId);
 }
 LOG.info(getClientIdAuditPrefix() + " get replication peer config, id=" + 
peerId);
-Optional peerConfig =
-  this.replicationPeerManager.getPeerConfig(peerId);
-
+ReplicationPeerConfig peerConfig = 
this.replicationPeerManager.getPeerConfig(peerId)
+.orElseThrow(() -> new ReplicationPeerNotFoundException(peerId));
 if (cpHost != null) {
   cpHost.postGetReplicationPeerConfig(peerId);
 }
-return peerConfig.orElse(null);
+return peerConfig;
   }
 
   @Override



[04/45] hbase git commit: HBASE-19987 error-prone 2.2.0

2018-03-08 Thread zhangduo
HBASE-19987 error-prone 2.2.0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/af9a108a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/af9a108a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/af9a108a

Branch: refs/heads/HBASE-19397-branch-2
Commit: af9a108a93a19d6eaadf3eec1a8d482728a3ac0b
Parents: ac18ae2
Author: Mike Drob 
Authored: Mon Feb 12 17:13:32 2018 -0600
Committer: Mike Drob 
Committed: Wed Mar 7 21:21:05 2018 -0600

--
 hbase-build-support/hbase-error-prone/pom.xml  | 6 ++
 .../java/org/apache/hadoop/hbase/errorprone/AlwaysPasses.java  | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/ServerLoad.java  | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java | 2 +-
 .../org/apache/hadoop/hbase/filter/BigDecimalComparator.java   | 1 +
 .../java/org/apache/hadoop/hbase/filter/BinaryComparator.java  | 1 +
 .../org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java | 1 +
 .../java/org/apache/hadoop/hbase/filter/BitComparator.java | 1 +
 .../java/org/apache/hadoop/hbase/filter/LongComparator.java| 1 +
 .../java/org/apache/hadoop/hbase/filter/NullComparator.java| 1 +
 .../org/apache/hadoop/hbase/filter/RegexStringComparator.java  | 1 +
 .../org/apache/hadoop/hbase/filter/SubstringComparator.java| 1 +
 .../org/apache/hadoop/hbase/filter/ByteArrayComparable.java| 1 +
 pom.xml| 2 +-
 14 files changed, 19 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/af9a108a/hbase-build-support/hbase-error-prone/pom.xml
--
diff --git a/hbase-build-support/hbase-error-prone/pom.xml 
b/hbase-build-support/hbase-error-prone/pom.xml
index 6037780..7675323 100644
--- a/hbase-build-support/hbase-error-prone/pom.xml
+++ b/hbase-build-support/hbase-error-prone/pom.xml
@@ -37,6 +37,12 @@
   error_prone_annotation
   ${error-prone.version}
   provided
+  
+
+  com.google.guava
+  guava
+
+  
 
 
   

[12/45] hbase git commit: HBASE-19524 Master side changes for moving peer modification from zk watcher to procedure

2018-03-08 Thread zhangduo
HBASE-19524 Master side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bddaf62a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bddaf62a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bddaf62a

Branch: refs/heads/HBASE-19397-branch-2
Commit: bddaf62acc78f2aec159a4a1fb6edb5dc8f82be5
Parents: 3ee18a0
Author: zhangduo 
Authored: Mon Dec 18 15:22:36 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../procedure2/RemoteProcedureDispatcher.java   |  3 +-
 .../src/main/protobuf/MasterProcedure.proto | 21 -
 .../src/main/protobuf/RegionServerStatus.proto  |  3 +-
 .../src/main/protobuf/Replication.proto |  5 +
 .../replication/ReplicationPeersZKImpl.java |  4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java | 93 ---
 .../hadoop/hbase/master/MasterRpcServices.java  |  4 +-
 .../hadoop/hbase/master/MasterServices.java | 26 +++---
 .../assignment/RegionTransitionProcedure.java   | 13 +--
 .../master/procedure/MasterProcedureEnv.java|  5 +
 .../master/procedure/ProcedurePrepareLatch.java |  2 +-
 .../master/replication/AddPeerProcedure.java| 97 
 .../replication/DisablePeerProcedure.java   | 70 ++
 .../master/replication/EnablePeerProcedure.java | 69 ++
 .../master/replication/ModifyPeerProcedure.java | 97 +---
 .../master/replication/RefreshPeerCallable.java | 67 --
 .../replication/RefreshPeerProcedure.java   | 28 --
 .../master/replication/RemovePeerProcedure.java | 69 ++
 .../master/replication/ReplicationManager.java  | 76 ---
 .../replication/UpdatePeerConfigProcedure.java  | 92 +++
 .../hbase/regionserver/HRegionServer.java   |  5 +-
 .../regionserver/RefreshPeerCallable.java   | 70 ++
 .../hbase/master/MockNoopMasterServices.java| 23 +++--
 .../replication/DummyModifyPeerProcedure.java   | 13 ++-
 24 files changed, 733 insertions(+), 222 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bddaf62a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 7e3dde6..fb852c3 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -247,9 +247,8 @@ public abstract class RemoteProcedureDispatcher

[32/45] hbase git commit: HBASE-19623 Create replication endpoint asynchronously when adding a replication source

2018-03-08 Thread zhangduo
HBASE-19623 Create replication endpoint asynchronously when adding a 
replication source


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2aa4c797
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2aa4c797
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2aa4c797

Branch: refs/heads/HBASE-19397-branch-2
Commit: 2aa4c7979fb2f424f3348d084a841e2c14298682
Parents: ff026fd
Author: zhangduo 
Authored: Tue Jan 2 13:25:58 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hbase/replication/ReplicationPeer.java  |   8 ++
 .../hbase/replication/ReplicationPeers.java |  18 +--
 .../replication/ZKReplicationPeerStorage.java   |   7 +-
 .../replication/TestReplicationStateBasic.java  |  20 +---
 .../TestZKReplicationPeerStorage.java   |  14 +--
 .../HBaseInterClusterReplicationEndpoint.java   |  17 ++-
 .../RecoveredReplicationSource.java |  13 +--
 .../regionserver/ReplicationSource.java | 110 +++
 .../ReplicationSourceInterface.java |   8 +-
 .../regionserver/ReplicationSourceManager.java  |  47 +---
 .../client/TestAsyncReplicationAdminApi.java|   2 -
 .../replication/TestReplicationAdmin.java   |   2 -
 .../replication/ReplicationSourceDummy.java |   7 +-
 .../replication/TestReplicationSource.java  |   5 +-
 .../TestReplicationSourceManager.java   |   8 +-
 15 files changed, 116 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2aa4c797/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 4846018..2da3cce 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,14 @@ public interface ReplicationPeer {
   PeerState getPeerState();
 
   /**
+   * Test whether the peer is enabled.
+   * @return {@code true} if enabled, otherwise {@code false}.
+   */
+  default boolean isPeerEnabled() {
+return getPeerState() == PeerState.ENABLED;
+  }
+
+  /**
* Get the peer config object
* @return the ReplicationPeerConfig for this peer
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/2aa4c797/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
index 422801b..45940a5 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.replication;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -86,21 +87,6 @@ public class ReplicationPeers {
   }
 
   /**
-   * Get the peer state for the specified connected remote slave cluster. The 
value might be read
-   * from cache, so it is recommended to use {@link #peerStorage } to read 
storage directly if
-   * reading the state after enabling or disabling it.
-   * @param peerId a short that identifies the cluster
-   * @return true if replication is enabled, false otherwise.
-   */
-  public boolean isPeerEnabled(String peerId) {
-ReplicationPeer replicationPeer = this.peerCache.get(peerId);
-if (replicationPeer == null) {
-  throw new IllegalArgumentException("Peer with id= " + peerId + " is not 
cached");
-}
-return replicationPeer.getPeerState() == PeerState.ENABLED;
-  }
-
-  /**
* Returns the ReplicationPeerImpl for the specified cached peer. This 
ReplicationPeer will
* continue to track changes to the Peer's state and config. This method 
returns null if no peer
* has been cached with the given peerId.
@@ -117,7 +103,7 @@ public class ReplicationPeers {
* @return a Set of Strings for peerIds
*/
   public Set getAllPeerIds() {
-return peerCache.keySet();
+return Collections.unmodifiableSet(peerCache.keySet());
   }
 
   public static Configuration 
getPeerClusterConfiguration(ReplicationPeerConfig peerConfig,


[03/45] hbase git commit: HBASE-20108 Remove jline exclusion from ZooKeeper

2018-03-08 Thread zhangduo
HBASE-20108 Remove jline exclusion from ZooKeeper

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ac18ae27
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ac18ae27
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ac18ae27

Branch: refs/heads/HBASE-19397-branch-2
Commit: ac18ae272217903976fe75a6d343a427e583d59d
Parents: 96a42b7
Author: Josh Elser 
Authored: Wed Feb 28 17:11:02 2018 -0500
Committer: Josh Elser 
Committed: Wed Mar 7 14:36:46 2018 -0500

--
 bin/hbase  | 3 +++
 bin/hbase.cmd  | 1 +
 hbase-assembly/pom.xml | 4 
 hbase-assembly/src/main/assembly/hadoop-two-compat.xml | 7 +++
 pom.xml| 7 +++
 5 files changed, 22 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ac18ae27/bin/hbase
--
diff --git a/bin/hbase b/bin/hbase
index f2d4251..70cae9c 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -361,6 +361,9 @@ elif [ "$COMMAND" = "hfile" ] ; then
   CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter'
 elif [ "$COMMAND" = "zkcli" ] ; then
   CLASS="org.apache.hadoop.hbase.zookeeper.ZKMainServer"
+  for f in $HBASE_HOME/lib/zkcli/*.jar; do
+CLASSPATH="${CLASSPATH}:$f";
+  done
 elif [ "$COMMAND" = "upgrade" ] ; then
   echo "This command was used to upgrade to HBase 0.96, it was removed in 
HBase 2.0.0."
   echo "Please follow the documentation at 
http://hbase.apache.org/book.html#upgrading.;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ac18ae27/bin/hbase.cmd
--
diff --git a/bin/hbase.cmd b/bin/hbase.cmd
index 8e4a59f..fbeb1f8 100644
--- a/bin/hbase.cmd
+++ b/bin/hbase.cmd
@@ -425,6 +425,7 @@ goto :eof
 
 :zkcli
   set CLASS=org.apache.hadoop.hbase.zookeeper.ZKMainServer
+  set CLASSPATH=!CLASSPATH!;%HBASE_HOME%\lib\zkcli\*
   goto :eof
 
 :mapredcp

http://git-wip-us.apache.org/repos/asf/hbase/blob/ac18ae27/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 2ce7224..eda6491 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -271,6 +271,10 @@
   org.apache.hbase
   hbase-zookeeper
 
+
+  jline
+  jline
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ac18ae27/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
--
diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml 
b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
index 3579476..3c940e9 100644
--- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
+++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
@@ -68,6 +68,7 @@
   org.jruby:jruby-complete
   com.sun.jersey:*
   com.sun.jersey.contribs:*
+  jline:jline
 
   
 
@@ -105,6 +106,12 @@
 org.jruby:jruby-complete
   
 
+
+  lib/zkcli
+  
+jline:jline
+  
+
   
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ac18ae27/pom.xml
--
diff --git a/pom.xml b/pom.xml
index e2d37f8..30649c7 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1341,6 +1341,8 @@
 thrift
 0.9.3
 3.4.10
+
+0.9.94
 1.7.25
 4.0.3
 2.4.1
@@ -1788,6 +1790,11 @@
 
   
   
+jline
+jline
+${jline.version}
+  
+  
 org.apache.thrift
 libthrift
 ${thrift.version}



[24/45] hbase git commit: HBASE-19707 Race in start and terminate of a replication source after we async start replicatione endpoint

2018-03-08 Thread zhangduo
HBASE-19707 Race in start and terminate of a replication source after we async 
start replicatione endpoint


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/77674d56
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/77674d56
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/77674d56

Branch: refs/heads/HBASE-19397-branch-2
Commit: 77674d56059f443e2412b1c076d1d268a350f9cc
Parents: b1d7ec3
Author: zhangduo 
Authored: Fri Jan 5 18:28:44 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../RecoveredReplicationSource.java |  16 +-
 .../regionserver/ReplicationSource.java | 203 ++-
 2 files changed, 116 insertions(+), 103 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/77674d56/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
index 1be9a88..3cae0f2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
@@ -68,7 +68,7 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   LOG.debug("Someone has beat us to start a worker thread for wal group " 
+ walGroupId);
 } else {
   LOG.debug("Starting up worker for wal group " + walGroupId);
-  worker.startup(getUncaughtExceptionHandler());
+  worker.startup(this::uncaughtException);
   worker.setWALReader(
 startNewWALReader(worker.getName(), walGroupId, queue, 
worker.getStartPosition()));
   workerThreads.put(walGroupId, worker);
@@ -76,13 +76,13 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   }
 
   @Override
-  protected ReplicationSourceWALReader startNewWALReader(String threadName,
-  String walGroupId, PriorityBlockingQueue queue, long 
startPosition) {
-ReplicationSourceWALReader walReader = new 
RecoveredReplicationSourceWALReader(fs,
-conf, queue, startPosition, walEntryFilter, this);
-Threads.setDaemonThreadRunning(walReader, threadName
-+ ".replicationSource.replicationWALReaderThread." + walGroupId + "," 
+ queueId,
-  getUncaughtExceptionHandler());
+  protected ReplicationSourceWALReader startNewWALReader(String threadName, 
String walGroupId,
+  PriorityBlockingQueue queue, long startPosition) {
+ReplicationSourceWALReader walReader =
+  new RecoveredReplicationSourceWALReader(fs, conf, queue, startPosition, 
walEntryFilter, this);
+Threads.setDaemonThreadRunning(walReader,
+  threadName + ".replicationSource.replicationWALReaderThread." + 
walGroupId + "," + queueId,
+  this::uncaughtException);
 return walReader;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/77674d56/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 0092251..09b6cc1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -75,7 +75,7 @@ import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
  * 
  */
 @InterfaceAudience.Private
-public class ReplicationSource extends Thread implements 
ReplicationSourceInterface {
+public class ReplicationSource implements ReplicationSourceInterface {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationSource.class);
   // Queues of logs to process, entry in format of walGroupId->queue,
@@ -114,10 +114,8 @@ public class ReplicationSource extends Thread implements 
ReplicationSourceInterf
   private MetricsSource metrics;
   // WARN threshold for the number of queued logs, defaults to 2
   private int logQueueWarnThreshold;
-  // whether the replication endpoint has been initialized
-  private volatile boolean endpointInitialized = false;
   // ReplicationEndpoint which will handle the actual replication
-  private ReplicationEndpoint replicationEndpoint;
+  private volatile ReplicationEndpoint 

[16/45] hbase git commit: HBASE-19525 RS side changes for moving peer modification from zk watcher to procedure

2018-03-08 Thread zhangduo
HBASE-19525 RS side changes for moving peer modification from zk watcher to 
procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0432b864
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0432b864
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0432b864

Branch: refs/heads/HBASE-19397-branch-2
Commit: 0432b864da8e4ca06a66a45980fe3a34e94653b0
Parents: e4d0745
Author: huzheng 
Authored: Wed Dec 20 10:47:18 2017 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:46:33 2018 +0800

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java |  11 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  13 +-
 .../hbase/replication/ReplicationListener.java  |  14 --
 .../hbase/replication/ReplicationPeer.java  |  28 ++-
 .../replication/ReplicationPeerZKImpl.java  | 186 ---
 .../replication/ReplicationPeersZKImpl.java |  19 +-
 .../replication/ReplicationTrackerZKImpl.java   |  73 +-
 .../regionserver/ReplicationSourceService.java  |   6 +
 .../handler/RSProcedureHandler.java |   3 +
 .../replication/BaseReplicationEndpoint.java|   2 +-
 .../regionserver/PeerProcedureHandler.java  |  38 
 .../regionserver/PeerProcedureHandlerImpl.java  |  81 +++
 .../regionserver/RefreshPeerCallable.java   |  39 +++-
 .../replication/regionserver/Replication.java   |   9 +
 .../regionserver/ReplicationSource.java |   8 +-
 .../regionserver/ReplicationSourceManager.java  |  37 ++-
 .../TestReplicationAdminUsingProcedure.java | 226 +++
 .../replication/DummyModifyPeerProcedure.java   |  48 
 .../TestDummyModifyPeerProcedure.java   |  80 ---
 .../TestReplicationTrackerZKImpl.java   |  61 -
 .../TestReplicationSourceManager.java   |  32 ++-
 21 files changed, 532 insertions(+), 482 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0432b864/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 3033da7..f11fcf6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.InvalidProtocolBufferException;
@@ -191,7 +193,7 @@ public final class ProtobufUtil {
* byte array that is bytes.length plus {@link 
ProtobufMagic#PB_MAGIC}.length.
*/
   public static byte [] prependPBMagic(final byte [] bytes) {
-return Bytes.add(ProtobufMagic.PB_MAGIC, bytes);
+return Bytes.add(PB_MAGIC, bytes);
   }
 
   /**
@@ -216,10 +218,11 @@ public final class ProtobufUtil {
* @param bytes bytes to check
* @throws DeserializationException if we are missing the pb magic prefix
*/
-  public static void expectPBMagicPrefix(final byte [] bytes) throws 
DeserializationException {
+  public static void expectPBMagicPrefix(final byte[] bytes) throws 
DeserializationException {
 if (!isPBMagicPrefix(bytes)) {
-  throw new DeserializationException("Missing pb magic " +
-  Bytes.toString(ProtobufMagic.PB_MAGIC) + " prefix");
+  String bytesPrefix = bytes == null ? "null" : 
Bytes.toStringBinary(bytes, 0, PB_MAGIC.length);
+  throw new DeserializationException(
+  "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " 
+ bytesPrefix);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0432b864/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 4472846..520a4cd 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.shaded.protobuf;
 
+import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC;
+
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -277,7 +279,7 @@ public final class ProtobufUtil {
* byte array that 

[01/45] hbase git commit: HBASE-20134 dev-support scripts should use mktemp instead of hard-coding /tmp. [Forced Update!]

2018-03-08 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19397-branch-2 f4df3fb3d -> c9bcc0b9b (forced update)


HBASE-20134 dev-support scripts should use mktemp instead of hard-coding /tmp.

Signed-off-by: Mike Drob 
Signed-off-by: Umesh Agashe 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f5c8713b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f5c8713b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f5c8713b

Branch: refs/heads/HBASE-19397-branch-2
Commit: f5c8713bdd142b7cc6555ad696453504a1dc753c
Parents: cbbefe7
Author: Sean Busbey 
Authored: Mon Mar 5 23:40:25 2018 -0600
Committer: Sean Busbey 
Committed: Wed Mar 7 08:39:19 2018 -0600

--
 dev-support/hbase_nightly_source-artifact.sh  | 11 +--
 dev-support/jenkins-scripts/generate-hbase-website.sh | 13 ++---
 2 files changed, 11 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f5c8713b/dev-support/hbase_nightly_source-artifact.sh
--
diff --git a/dev-support/hbase_nightly_source-artifact.sh 
b/dev-support/hbase_nightly_source-artifact.sh
index 375d121..56a3d46 100755
--- a/dev-support/hbase_nightly_source-artifact.sh
+++ b/dev-support/hbase_nightly_source-artifact.sh
@@ -21,7 +21,7 @@ function usage {
   echo "Usage: ${0} [options] /path/to/component/checkout"
   echo ""
   echo "--intermediate-file-dir /path/to/use  Path for writing listings 
and diffs. must exist."
-  echo "  defaults to making a 
directory in /tmp."
+  echo "  defaults to making a 
directory via mktemp."
   echo "--unpack-temp-dir /path/to/usePath for unpacking tarball. 
default to"
   echo "  'unpacked_src_tarball' in 
intermediate directory."
   echo "--maven-m2-initial /path/to/use   Path for maven artifacts 
while building in"
@@ -67,11 +67,10 @@ fi
 component_dir="$(cd "$(dirname "$1")"; pwd)/$(basename "$1")"
 
 if [ -z "${working_dir}" ]; then
-  working_dir=/tmp
-  while [[ -e ${working_dir} ]]; do
-working_dir=/tmp/hbase-srctarball-test-${RANDOM}.${RANDOM}
-  done
-  mkdir "${working_dir}"
+  if ! working_dir="$(mktemp -d -t hbase-srctarball-test)" ; then
+echo "Failed to create temporary working directory. Please specify via 
--unpack-temp-dir"
+exit 1
+  fi
 else
   # absolutes please
   working_dir="$(cd "$(dirname "${working_dir}")"; pwd)/$(basename 
"${working_dir}")"

http://git-wip-us.apache.org/repos/asf/hbase/blob/f5c8713b/dev-support/jenkins-scripts/generate-hbase-website.sh
--
diff --git a/dev-support/jenkins-scripts/generate-hbase-website.sh 
b/dev-support/jenkins-scripts/generate-hbase-website.sh
index c25c895..b6277d0 100644
--- a/dev-support/jenkins-scripts/generate-hbase-website.sh
+++ b/dev-support/jenkins-scripts/generate-hbase-website.sh
@@ -40,7 +40,7 @@ function usage {
   echo ""
   echo "--working-dir /path/to/use  Path for writing logs and a local 
checkout of hbase-site repo."
   echo "if given must exist."
-  echo "defaults to making a directory in 
/tmp."
+  echo "defaults to making a directory via 
mktemp."
   echo "--local-repo /path/for/maven/.m2  Path for putting local maven 
repo."
   echo "if given must exist."
   echo "defaults to making a clean directory 
in --working-dir."
@@ -77,12 +77,11 @@ fi
 component_dir="$(cd "$(dirname "$1")"; pwd)/$(basename "$1")"
 
 if [ -z "${working_dir}" ]; then
-  echo "[DEBUG] defaulting to creating a directory in /tmp"
-  working_dir=/tmp
-  while [[ -e ${working_dir} ]]; do
-working_dir=/tmp/hbase-generate-website-${RANDOM}.${RANDOM}
-  done
-  mkdir "${working_dir}"
+  echo "[DEBUG] defaulting to creating a directory via mktemp"
+  if ! working_dir="$(mktemp -d -t hbase-generate-website)" ; then
+echo "Failed to create temporary working directory. Please specify via 
--working-dir"
+exit 1
+  fi
 else
   # absolutes please
   working_dir="$(cd "$(dirname "${working_dir}")"; pwd)/$(basename 
"${working_dir}")"



[10/18] hbase git commit: HBASE-19747 Introduce a special WALProvider for synchronous replication

2018-03-08 Thread zhangduo
HBASE-19747 Introduce a special WALProvider for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e7ac767
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e7ac767
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e7ac767

Branch: refs/heads/HBASE-19064
Commit: 1e7ac767aa1cbc0616794ea5977d01b32d03b8cf
Parents: 9c8b3c3
Author: zhangduo 
Authored: Fri Jan 19 18:38:39 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:44:41 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   7 +
 .../hbase/regionserver/wal/AsyncFSWAL.java  |   1 -
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |   4 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   3 -
 .../regionserver/PeerActionListener.java|  33 +++
 .../SynchronousReplicationPeerProvider.java |  35 +++
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   1 +
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  18 +-
 .../hbase/wal/NettyAsyncFSWALConfigHelper.java  |   8 +-
 .../hbase/wal/RegionGroupingProvider.java   |  13 +-
 .../wal/SynchronousReplicationWALProvider.java  | 225 +++
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  37 ++-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java |  16 +-
 .../regionserver/TestCompactionPolicy.java  |   1 +
 .../regionserver/TestFailedAppendAndSync.java   | 122 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  24 +-
 .../TestHRegionWithInMemoryFlush.java   |   7 -
 .../hbase/regionserver/TestRegionIncrement.java |  20 +-
 .../hbase/regionserver/TestWALLockup.java   |   1 +
 .../regionserver/wal/AbstractTestWALReplay.java |   1 +
 .../regionserver/wal/ProtobufLogTestHelper.java |  44 +++-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java  |  13 +-
 .../regionserver/wal/TestAsyncWALReplay.java|   4 +-
 .../wal/TestCombinedAsyncWriter.java|   3 +-
 .../hbase/regionserver/wal/TestFSHLog.java  |  15 +-
 .../hbase/regionserver/wal/TestWALReplay.java   |   1 +
 .../apache/hadoop/hbase/wal/IOTestProvider.java |   2 -
 .../TestSynchronousReplicationWALProvider.java  | 153 +
 28 files changed, 659 insertions(+), 153 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e7ac767/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index ce8dafa..4816d77 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -430,6 +430,13 @@ public abstract class AbstractFSWAL 
implements WAL {
 this.implClassName = getClass().getSimpleName();
   }
 
+  /**
+   * Used to initialize the WAL. Usually just call rollWriter to create the 
first log writer.
+   */
+  public void init() throws IOException {
+rollWriter();
+  }
+
   @Override
   public void registerWALActionsListener(WALActionsListener listener) {
 this.listeners.add(listener);

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e7ac767/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 0bee9d6..17133ed 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -248,7 +248,6 @@ public class AsyncFSWAL extends AbstractFSWAL {
 batchSize = conf.getLong(WAL_BATCH_SIZE, DEFAULT_WAL_BATCH_SIZE);
 waitOnShutdownInSeconds = 
conf.getInt(ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS,
   DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS);
-rollWriter();
   }
 
   private static boolean waitingRoll(int epochAndState) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e7ac767/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 42b0dae..0495337 100644
--- 

[05/18] hbase git commit: HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

2018-03-08 Thread zhangduo
HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6044a880
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6044a880
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6044a880

Branch: refs/heads/HBASE-19064
Commit: 6044a88030a17ee46d71462c5733b621eafcece9
Parents: df9eeae
Author: Guanghao Zhang 
Authored: Fri Jan 26 16:50:48 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:44:41 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  | 22 ++---
 .../hbase/replication/SyncReplicationState.java | 17 +
 .../hbase/shaded/protobuf/RequestConverter.java |  7 +++---
 .../src/main/protobuf/Replication.proto | 13 ++
 .../replication/ZKReplicationPeerStorage.java   | 25 +---
 .../hadoop/hbase/master/MasterRpcServices.java  |  9 ---
 ...ransitPeerSyncReplicationStateProcedure.java |  9 ---
 .../TestReplicationSourceManager.java   |  2 +-
 8 files changed, 67 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6044a880/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 86b49ea..5096824 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -398,7 +398,7 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationState.State.ENABLED == 
desc.getState().getState();
 ReplicationPeerConfig config = convert(desc.getConfig());
 return new ReplicationPeerDescription(desc.getId(), enabled, config,
-
SyncReplicationState.valueOf(desc.getSyncReplicationState().getNumber()));
+  toSyncReplicationState(desc.getSyncReplicationState()));
   }
 
   public static ReplicationProtos.ReplicationPeerDescription
@@ -406,17 +406,33 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationPeerDescription.Builder builder =
 ReplicationProtos.ReplicationPeerDescription.newBuilder();
 builder.setId(desc.getPeerId());
+
 ReplicationProtos.ReplicationState.Builder stateBuilder =
 ReplicationProtos.ReplicationState.newBuilder();
 stateBuilder.setState(desc.isEnabled() ? 
ReplicationProtos.ReplicationState.State.ENABLED :
 ReplicationProtos.ReplicationState.State.DISABLED);
 builder.setState(stateBuilder.build());
+
 builder.setConfig(convert(desc.getPeerConfig()));
-builder.setSyncReplicationState(
-  
ReplicationProtos.SyncReplicationState.forNumber(desc.getSyncReplicationState().ordinal()));
+
builder.setSyncReplicationState(toSyncReplicationState(desc.getSyncReplicationState()));
+
 return builder.build();
   }
 
+  public static ReplicationProtos.SyncReplicationState
+  toSyncReplicationState(SyncReplicationState state) {
+ReplicationProtos.SyncReplicationState.Builder syncReplicationStateBuilder 
=
+ReplicationProtos.SyncReplicationState.newBuilder();
+syncReplicationStateBuilder
+
.setState(ReplicationProtos.SyncReplicationState.State.forNumber(state.ordinal()));
+return syncReplicationStateBuilder.build();
+  }
+
+  public static SyncReplicationState
+  toSyncReplicationState(ReplicationProtos.SyncReplicationState state) {
+return SyncReplicationState.valueOf(state.getState().getNumber());
+  }
+
   public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig(
   Map tableCfs, ReplicationPeerConfig peerConfig) 
{
 ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(peerConfig);

http://git-wip-us.apache.org/repos/asf/hbase/blob/6044a880/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
index bd144e9..a65b144 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java

[06/18] hbase git commit: HBASE-19973 Implement a procedure to replay sync replication wal for standby cluster

2018-03-08 Thread zhangduo
HBASE-19973 Implement a procedure to replay sync replication wal for standby 
cluster


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/606314a2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/606314a2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/606314a2

Branch: refs/heads/HBASE-19064
Commit: 606314a2b35e48e2aa2e723d9f2e6f1390ce880d
Parents: 669e80d
Author: Guanghao Zhang 
Authored: Fri Mar 2 18:43:25 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:44:41 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  22 +++
 .../apache/hadoop/hbase/executor/EventType.java |   9 +-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   9 +
 .../hadoop/hbase/master/MasterServices.java |   6 +
 .../procedure/PeerProcedureInterface.java   |   3 +-
 .../hbase/master/procedure/PeerQueue.java   |   3 +-
 .../replication/RecoverStandbyProcedure.java| 114 +++
 .../ReplaySyncReplicationWALManager.java| 139 +
 .../ReplaySyncReplicationWALProcedure.java  | 193 +++
 .../hbase/regionserver/HRegionServer.java   |   9 +-
 .../ReplaySyncReplicationWALCallable.java   | 149 ++
 .../SyncReplicationPeerInfoProviderImpl.java|   3 +
 .../org/apache/hadoop/hbase/util/FSUtils.java   |   5 +
 .../hbase/master/MockNoopMasterServices.java|   6 +
 .../master/TestRecoverStandbyProcedure.java | 186 ++
 16 files changed, 853 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/606314a2/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index fe08be5..2bb912b 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -436,3 +436,25 @@ message TransitPeerSyncReplicationStateStateData {
   optional SyncReplicationState fromState = 1;
   required SyncReplicationState toState = 2;
 }
+
+enum RecoverStandbyState {
+  RENAME_SYNC_REPLICATION_WALS_DIR = 1;
+  INIT_WORKERS = 2;
+  DISPATCH_TASKS = 3;
+  REMOVE_SYNC_REPLICATION_WALS_DIR = 4;
+}
+
+message RecoverStandbyStateData {
+  required string peer_id = 1;
+}
+
+message ReplaySyncReplicationWALStateData {
+  required string peer_id = 1;
+  required string wal = 2;
+  optional ServerName target_server = 3;
+}
+
+message ReplaySyncReplicationWALParameter {
+  required string peer_id = 1;
+  required string wal = 2;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/606314a2/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
index 922deb8..ad38d1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
@@ -281,7 +281,14 @@ public enum EventType {
*
* RS_REFRESH_PEER
*/
-  RS_REFRESH_PEER (84, ExecutorType.RS_REFRESH_PEER);
+  RS_REFRESH_PEER(84, ExecutorType.RS_REFRESH_PEER),
+
+  /**
+   * RS replay sync replication wal.
+   *
+   * RS_REPLAY_SYNC_REPLICATION_WAL
+   */
+  RS_REPLAY_SYNC_REPLICATION_WAL(85, 
ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL);
 
   private final int code;
   private final ExecutorType executor;

http://git-wip-us.apache.org/repos/asf/hbase/blob/606314a2/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
index 7f130d1..ea97354 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
@@ -47,7 +47,8 @@ public enum ExecutorType {
   RS_REGION_REPLICA_FLUSH_OPS  (28),
   RS_COMPACTED_FILES_DISCHARGER (29),
   RS_OPEN_PRIORITY_REGION(30),
-  RS_REFRESH_PEER   (31);
+  RS_REFRESH_PEER(31),
+  RS_REPLAY_SYNC_REPLICATION_WAL(32);
 
   ExecutorType(int value) {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/606314a2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java

[17/18] hbase git commit: HBASE-19957 General framework to transit sync replication state

2018-03-08 Thread zhangduo
HBASE-19957 General framework to transit sync replication state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/48e705ae
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/48e705ae
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/48e705ae

Branch: refs/heads/HBASE-19064
Commit: 48e705aeeb8e08747199598de17127f5700b4144
Parents: 074891f
Author: zhangduo 
Authored: Fri Feb 9 18:33:28 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:44:41 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |   2 -
 .../replication/ReplicationPeerDescription.java |   5 +-
 .../hbase/replication/SyncReplicationState.java |  19 +-
 .../org/apache/hadoop/hbase/HConstants.java |   3 +
 .../src/main/protobuf/MasterProcedure.proto |  20 ++-
 .../hbase/replication/ReplicationPeerImpl.java  |  45 -
 .../replication/ReplicationPeerStorage.java |  25 ++-
 .../hbase/replication/ReplicationPeers.java |  27 ++-
 .../replication/ZKReplicationPeerStorage.java   |  65 +--
 .../hbase/coprocessor/MasterObserver.java   |   7 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../hbase/master/MasterCoprocessorHost.java |  12 +-
 .../replication/AbstractPeerProcedure.java  |  14 +-
 .../master/replication/ModifyPeerProcedure.java |  15 +-
 .../replication/RefreshPeerProcedure.java   |  18 +-
 .../replication/ReplicationPeerManager.java | 107 +++-
 ...ransitPeerSyncReplicationStateProcedure.java | 175 ---
 .../hbase/regionserver/HRegionServer.java   |  35 ++--
 .../regionserver/ReplicationSourceService.java  |  11 +-
 .../regionserver/PeerActionListener.java|   4 +-
 .../regionserver/PeerProcedureHandler.java  |  16 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  55 +-
 .../regionserver/RefreshPeerCallable.java   |   7 +
 .../replication/regionserver/Replication.java   |  22 ++-
 .../regionserver/ReplicationSourceManager.java  |  41 +++--
 .../SyncReplicationPeerInfoProvider.java|  43 +
 .../SyncReplicationPeerInfoProviderImpl.java|  71 
 .../SyncReplicationPeerMappingManager.java  |  48 +
 .../SyncReplicationPeerProvider.java|  35 
 .../hbase/wal/SyncReplicationWALProvider.java   |  35 ++--
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  47 ++---
 .../replication/TestReplicationAdmin.java   |   3 +-
 .../TestReplicationSourceManager.java   |   5 +-
 .../wal/TestSyncReplicationWALProvider.java |  36 ++--
 34 files changed, 752 insertions(+), 325 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/48e705ae/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 69565a7..79b3a1d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication;
 
 import java.util.Collection;
@@ -25,7 +24,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/48e705ae/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
index 2d077c5..b0c27bb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.hbase.replication;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription
+ * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription.
+ * 
+ * To developer, here we do not store the new sync replication state since it 
is just an
+ * intermediate state and this class is 

[16/18] hbase git commit: HBASE-19957 General framework to transit sync replication state

2018-03-08 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/48e705ae/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index ba8bb9e..0ad821e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -50,6 +50,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationListener;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
@@ -131,6 +132,8 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   // For recovered source, the queue id's format is peer_id-servername-*
   private final ConcurrentMap> 
walsByIdRecoveredQueues;
 
+  private final SyncReplicationPeerMappingManager 
syncReplicationPeerMappingManager;
+
   private final Configuration conf;
   private final FileSystem fs;
   // The paths to the latest log of each wal group, for new coming peers
@@ -165,9 +168,8 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   public ReplicationSourceManager(ReplicationQueueStorage queueStorage,
   ReplicationPeers replicationPeers, ReplicationTracker 
replicationTracker, Configuration conf,
   Server server, FileSystem fs, Path logDir, Path oldLogDir, UUID 
clusterId,
-  WALFileLengthProvider walFileLengthProvider) throws IOException {
-// CopyOnWriteArrayList is thread-safe.
-// Generally, reading is more than modifying.
+  WALFileLengthProvider walFileLengthProvider,
+  SyncReplicationPeerMappingManager syncReplicationPeerMappingManager) 
throws IOException {
 this.sources = new ConcurrentHashMap<>();
 this.queueStorage = queueStorage;
 this.replicationPeers = replicationPeers;
@@ -180,10 +182,11 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 this.fs = fs;
 this.logDir = logDir;
 this.oldLogDir = oldLogDir;
-this.sleepBeforeFailover = 
conf.getLong("replication.sleep.before.failover", 3); // 30
-   
  // seconds
+// 30 seconds
+this.sleepBeforeFailover = 
conf.getLong("replication.sleep.before.failover", 3);
 this.clusterId = clusterId;
 this.walFileLengthProvider = walFileLengthProvider;
+this.syncReplicationPeerMappingManager = syncReplicationPeerMappingManager;
 this.replicationTracker.registerListener(this);
 // It's preferable to failover 1 RS at a time, but with good zk servers
 // more could be processed at the same time.
@@ -244,8 +247,11 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   }
 
   /**
-   * 1. Add peer to replicationPeers 2. Add the normal source and related 
replication queue 3. Add
-   * HFile Refs
+   * 
+   * Add peer to replicationPeers
+   * Add the normal source and related replication queue
+   * Add HFile Refs
+   * 
* @param peerId the id of replication peer
*/
   public void addPeer(String peerId) throws IOException {
@@ -264,13 +270,16 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   }
 
   /**
-   * 1. Remove peer for replicationPeers 2. Remove all the recovered sources 
for the specified id
-   * and related replication queues 3. Remove the normal source and related 
replication queue 4.
-   * Remove HFile Refs
+   * 
+   * Remove peer for replicationPeers
+   * Remove all the recovered sources for the specified id and related 
replication queues
+   * Remove the normal source and related replication queue
+   * Remove HFile Refs
+   * 
* @param peerId the id of the replication peer
*/
   public void removePeer(String peerId) {
-replicationPeers.removePeer(peerId);
+ReplicationPeer peer = replicationPeers.removePeer(peerId);
 String terminateMessage = "Replication stream was removed by a user";
 List oldSourcesToDelete = new ArrayList<>();
 // synchronized on oldsources to avoid adding recovered source for the 
to-be-removed peer
@@ -301,7 +310,10 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   deleteQueue(peerId);
   

[02/18] hbase git commit: HBASE-19987 error-prone 2.2.0

2018-03-08 Thread zhangduo
HBASE-19987 error-prone 2.2.0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/641e870e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/641e870e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/641e870e

Branch: refs/heads/HBASE-19064
Commit: 641e870e1159fc67d7416ce3cab344869c304c1a
Parents: 37d91cd
Author: Mike Drob 
Authored: Mon Feb 12 17:13:32 2018 -0600
Committer: Mike Drob 
Committed: Wed Mar 7 13:30:51 2018 -0600

--
 hbase-build-support/hbase-error-prone/pom.xml  | 6 ++
 .../java/org/apache/hadoop/hbase/errorprone/AlwaysPasses.java  | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/ServerLoad.java  | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java | 2 +-
 .../org/apache/hadoop/hbase/filter/BigDecimalComparator.java   | 1 +
 .../java/org/apache/hadoop/hbase/filter/BinaryComparator.java  | 1 +
 .../org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java | 1 +
 .../java/org/apache/hadoop/hbase/filter/BitComparator.java | 1 +
 .../java/org/apache/hadoop/hbase/filter/LongComparator.java| 1 +
 .../java/org/apache/hadoop/hbase/filter/NullComparator.java| 1 +
 .../org/apache/hadoop/hbase/filter/RegexStringComparator.java  | 1 +
 .../org/apache/hadoop/hbase/filter/SubstringComparator.java| 1 +
 .../org/apache/hadoop/hbase/filter/ByteArrayComparable.java| 1 +
 pom.xml| 2 +-
 14 files changed, 19 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/641e870e/hbase-build-support/hbase-error-prone/pom.xml
--
diff --git a/hbase-build-support/hbase-error-prone/pom.xml 
b/hbase-build-support/hbase-error-prone/pom.xml
index 343bb3e..161ed24 100644
--- a/hbase-build-support/hbase-error-prone/pom.xml
+++ b/hbase-build-support/hbase-error-prone/pom.xml
@@ -37,6 +37,12 @@
   error_prone_annotation
   ${error-prone.version}
   provided
+  
+
+  com.google.guava
+  guava
+
+  
 
 
   

[09/18] hbase git commit: HBASE-19857 Complete the procedure for adding a sync replication peer

2018-03-08 Thread zhangduo
HBASE-19857 Complete the procedure for adding a sync replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/df9eeae0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/df9eeae0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/df9eeae0

Branch: refs/heads/HBASE-19064
Commit: df9eeae02c3108962c8560ed99581d81deb49cf0
Parents: b4d1a35
Author: zhangduo 
Authored: Thu Jan 25 20:09:00 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:44:41 2018 +0800

--
 .../hbase/replication/ReplicationPeer.java  |   9 +
 .../hbase/replication/ReplicationPeerImpl.java  |  28 +--
 .../hbase/replication/ReplicationPeers.java |   3 +-
 .../regionserver/PeerActionListener.java|  10 +-
 .../SyncReplicationPeerProvider.java|  35 +++
 .../SynchronousReplicationPeerProvider.java |  35 ---
 .../hbase/wal/SyncReplicationWALProvider.java   | 234 +++
 .../wal/SynchronousReplicationWALProvider.java  | 225 --
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   8 +-
 .../TestReplicationSourceManager.java   |   3 +
 .../wal/TestSyncReplicationWALProvider.java | 153 
 .../TestSynchronousReplicationWALProvider.java  | 153 
 12 files changed, 456 insertions(+), 440 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/df9eeae0/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 2da3cce..0196a9a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,15 @@ public interface ReplicationPeer {
   PeerState getPeerState();
 
   /**
+   * Returns the sync replication state of the peer by reading local cache.
+   * 
+   * If the peer is not a synchronous replication peer, a {@link 
SyncReplicationState#NONE} will be
+   * returned.
+   * @return the sync replication state
+   */
+  SyncReplicationState getSyncReplicationState();
+
+  /**
* Test whether the peer is enabled.
* @return {@code true} if enabled, otherwise {@code false}.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/df9eeae0/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index d656466..ff3f662 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -36,6 +36,8 @@ public class ReplicationPeerImpl implements ReplicationPeer {
 
   private volatile PeerState peerState;
 
+  private volatile SyncReplicationState syncReplicationState;
+
   private final List peerConfigListeners;
 
   /**
@@ -45,12 +47,13 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
* @param id string representation of this peer's identifier
* @param peerConfig configuration for the replication peer
*/
-  public ReplicationPeerImpl(Configuration conf, String id, boolean peerState,
-  ReplicationPeerConfig peerConfig) {
+  public ReplicationPeerImpl(Configuration conf, String id, 
ReplicationPeerConfig peerConfig,
+  boolean peerState, SyncReplicationState syncReplicationState) {
 this.conf = conf;
 this.id = id;
 this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED;
 this.peerConfig = peerConfig;
+this.syncReplicationState = syncReplicationState;
 this.peerConfigListeners = new ArrayList<>();
   }
 
@@ -77,37 +80,26 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
 return peerState;
   }
 
-  /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
+  @Override
+  public SyncReplicationState getSyncReplicationState() {
+return syncReplicationState;
+  }
+
   @Override
   public ReplicationPeerConfig getPeerConfig() {
 return peerConfig;
   }
 
-  /**
-   * Get the configuration object required to communicate with this peer
-   * @return configuration object
-   */
   @Override
   public Configuration getConfiguration() {
 return conf;
   }
 
-  /**
-  

[18/18] hbase git commit: HBASE-19943 Only allow removing sync replication peer which is in DA state

2018-03-08 Thread zhangduo
HBASE-19943 Only allow removing sync replication peer which is in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/669e80d8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/669e80d8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/669e80d8

Branch: refs/heads/HBASE-19064
Commit: 669e80d830be8e97c88cb58465773b528bc6905f
Parents: 38a072b
Author: huzheng 
Authored: Thu Mar 1 18:34:02 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:44:41 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 11 
 .../hbase/wal/SyncReplicationWALProvider.java   |  2 +-
 .../replication/TestReplicationAdmin.java   | 63 
 3 files changed, 75 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/669e80d8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index d80e9a4..6698aa5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -120,8 +120,19 @@ public class ReplicationPeerManager {
 return desc;
   }
 
+  private void checkPeerInDAStateIfSyncReplication(String peerId) throws 
DoNotRetryIOException {
+ReplicationPeerDescription desc = peers.get(peerId);
+if (desc != null && desc.getPeerConfig().isSyncReplication()
+&& 
!SyncReplicationState.DOWNGRADE_ACTIVE.equals(desc.getSyncReplicationState())) {
+  throw new DoNotRetryIOException("Couldn't remove synchronous replication 
peer with state="
+  + desc.getSyncReplicationState()
+  + ", Transit the synchronous replication state to be 
DOWNGRADE_ACTIVE firstly.");
+}
+  }
+
   public void preRemovePeer(String peerId) throws DoNotRetryIOException {
 checkPeerExists(peerId);
+checkPeerInDAStateIfSyncReplication(peerId);
   }
 
   public void preEnablePeer(String peerId) throws DoNotRetryIOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/669e80d8/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index ac4b4cd..282aa21 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
@@ -142,7 +142,7 @@ public class SyncReplicationWALProvider implements 
WALProvider, PeerActionListen
   @Override
   public WAL getWAL(RegionInfo region) throws IOException {
 if (region == null) {
-  return provider.getWAL(region);
+  return provider.getWAL(null);
 }
 Optional> peerIdAndRemoteWALDir =
   peerInfoProvider.getPeerIdAndRemoteWALDir(region);

http://git-wip-us.apache.org/repos/asf/hbase/blob/669e80d8/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 0ad476f..486ab51 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -254,6 +254,62 @@ public class TestReplicationAdmin {
   }
 
   @Test
+  public void testRemovePeerWithNonDAState() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TEST_UTIL.createTable(tableName, Bytes.toBytes("family"));
+ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
+
+String rootDir = "hdfs://srv1:/hbase";
+builder.setClusterKey(KEY_ONE);
+builder.setRemoteWALDir(rootDir);
+builder.setReplicateAllUserTables(false);
+Map tableCfs = new HashMap<>();
+tableCfs.put(tableName, new ArrayList<>());
+builder.setTableCFsMap(tableCfs);
+hbaseAdmin.addReplicationPeer(ID_ONE, builder.build());
+

[07/18] hbase git commit: HBASE-19935 Only allow table replication for sync replication for now

2018-03-08 Thread zhangduo
HBASE-19935 Only allow table replication for sync replication for now


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/074891f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/074891f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/074891f1

Branch: refs/heads/HBASE-19064
Commit: 074891f1f8ba4be38c0ac6b2b4ecb7541eed44af
Parents: 6044a88
Author: Guanghao Zhang 
Authored: Tue Feb 6 16:00:59 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:44:41 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |  9 +++
 .../replication/ReplicationPeerManager.java | 34 -
 .../replication/TestReplicationAdmin.java   | 73 ++--
 .../wal/TestCombinedAsyncWriter.java|  6 ++
 .../wal/TestSyncReplicationWALProvider.java |  6 ++
 5 files changed, 102 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/074891f1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 4c10c46..69565a7 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -25,6 +25,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
+
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -217,6 +219,13 @@ public class ReplicationPeerConfig {
 return this.remoteWALDir;
   }
 
+  /**
+   * Use remote wal dir to decide whether a peer is sync replication peer
+   */
+  public boolean isSyncReplication() {
+return !StringUtils.isBlank(this.remoteWALDir);
+  }
+
   public static ReplicationPeerConfigBuilder newBuilder() {
 return new ReplicationPeerConfigBuilderImpl();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/074891f1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 9336fbd..6bfd9c9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -167,7 +167,7 @@ public class ReplicationPeerManager {
   " does not match new remote wal dir '" + 
peerConfig.getRemoteWALDir() + "'");
 }
 
-if (oldPeerConfig.getRemoteWALDir() != null) {
+if (oldPeerConfig.isSyncReplication()) {
   if (!ReplicationUtils.isKeyConfigEqual(oldPeerConfig, peerConfig)) {
 throw new DoNotRetryIOException(
 "Changing the replicated namespace/table config on a synchronous 
replication "
@@ -195,8 +195,8 @@ public class ReplicationPeerManager {
 }
 ReplicationPeerConfig copiedPeerConfig = 
ReplicationPeerConfig.newBuilder(peerConfig).build();
 SyncReplicationState syncReplicationState =
-StringUtils.isBlank(peerConfig.getRemoteWALDir()) ? 
SyncReplicationState.NONE
-: SyncReplicationState.DOWNGRADE_ACTIVE;
+copiedPeerConfig.isSyncReplication() ? 
SyncReplicationState.DOWNGRADE_ACTIVE
+: SyncReplicationState.NONE;
 peerStorage.addPeer(peerId, copiedPeerConfig, enabled, 
syncReplicationState);
 peers.put(peerId,
   new ReplicationPeerDescription(peerId, enabled, copiedPeerConfig, 
syncReplicationState));
@@ -316,9 +316,37 @@ public class ReplicationPeerManager {
 peerConfig.getTableCFsMap());
 }
 
+if (peerConfig.isSyncReplication()) {
+  checkPeerConfigForSyncReplication(peerConfig);
+}
+
 checkConfiguredWALEntryFilters(peerConfig);
   }
 
+  private void checkPeerConfigForSyncReplication(ReplicationPeerConfig 
peerConfig)
+  throws DoNotRetryIOException {
+// This is used to reduce the difficulty for implementing the sync 
replication state transition
+// as we need to reopen all the related regions.
+// TODO: Add namespace, replicat_all flag back
+if (peerConfig.replicateAllUserTables()) {
+  throw new DoNotRetryIOException(
+  "Only 

[15/18] hbase git commit: HBASE-19078 Add a remote peer cluster wal directory config for synchronous replication

2018-03-08 Thread zhangduo
HBASE-19078 Add a remote peer cluster wal directory config for synchronous 
replication

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9c8b3c33
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9c8b3c33
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9c8b3c33

Branch: refs/heads/HBASE-19064
Commit: 9c8b3c3331727adb5de3352a25c7a8bf12cf24e7
Parents: ea6d939
Author: Guanghao Zhang 
Authored: Sat Jan 13 18:55:28 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 22:44:41 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  6 ++
 .../replication/ReplicationPeerConfig.java  | 21 +-
 .../ReplicationPeerConfigBuilder.java   |  7 ++
 .../src/main/protobuf/Replication.proto |  1 +
 .../replication/ReplicationPeerManager.java | 15 
 .../replication/TestReplicationAdmin.java   | 77 
 .../src/main/ruby/hbase/replication_admin.rb| 17 +++--
 hbase-shell/src/main/ruby/hbase_constants.rb|  1 +
 .../src/main/ruby/shell/commands/add_peer.rb| 21 +-
 .../src/main/ruby/shell/commands/list_peers.rb  | 19 -
 .../test/ruby/hbase/replication_admin_test.rb   | 16 
 11 files changed, 188 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9c8b3c33/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index a234a9b..642149b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -315,6 +315,9 @@ public final class ReplicationPeerConfigUtil {
 
excludeNamespacesList.stream().map(ByteString::toStringUtf8).collect(Collectors.toSet()));
 }
 
+if (peer.hasRemoteWALDir()) {
+  builder.setRemoteWALDir(peer.getRemoteWALDir());
+}
 return builder.build();
   }
 
@@ -371,6 +374,9 @@ public final class ReplicationPeerConfigUtil {
   }
 }
 
+if (peerConfig.getRemoteWALDir() != null) {
+  builder.setRemoteWALDir(peerConfig.getRemoteWALDir());
+}
 return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9c8b3c33/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index bf8d030..4c10c46 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -46,6 +46,8 @@ public class ReplicationPeerConfig {
   private Map excludeTableCFsMap = 
null;
   private Set excludeNamespaces = null;
   private long bandwidth = 0;
+  // Used by synchronous replication
+  private String remoteWALDir;
 
   private ReplicationPeerConfig(ReplicationPeerConfigBuilderImpl builder) {
 this.clusterKey = builder.clusterKey;
@@ -64,6 +66,7 @@ public class ReplicationPeerConfig {
 builder.excludeNamespaces != null ? 
Collections.unmodifiableSet(builder.excludeNamespaces)
 : null;
 this.bandwidth = builder.bandwidth;
+this.remoteWALDir = builder.remoteWALDir;
   }
 
   private Map
@@ -210,6 +213,10 @@ public class ReplicationPeerConfig {
 return this;
   }
 
+  public String getRemoteWALDir() {
+return this.remoteWALDir;
+  }
+
   public static ReplicationPeerConfigBuilder newBuilder() {
 return new ReplicationPeerConfigBuilderImpl();
   }
@@ -223,7 +230,8 @@ public class ReplicationPeerConfig {
 .setReplicateAllUserTables(peerConfig.replicateAllUserTables())
 .setExcludeTableCFsMap(peerConfig.getExcludeTableCFsMap())
 .setExcludeNamespaces(peerConfig.getExcludeNamespaces())
-.setBandwidth(peerConfig.getBandwidth());
+.setBandwidth(peerConfig.getBandwidth())
+.setRemoteWALDir(peerConfig.getRemoteWALDir());
 return builder;
   }
 
@@ -250,6 +258,8 @@ public class ReplicationPeerConfig {
 
 private long bandwidth = 0;
 
+private String remoteWALDir = null;
+
 @Override
 public 

[04/18] hbase git commit: HBASE-20144 The shutdown of master will hang if there are no live region server

2018-03-08 Thread zhangduo
HBASE-20144 The shutdown of master will hang if there are no live region server


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a03d09ab
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a03d09ab
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a03d09ab

Branch: refs/heads/HBASE-19064
Commit: a03d09abd72789bbf9364d8a9b2c54d0e9351af9
Parents: 8e0674a
Author: zhangduo 
Authored: Wed Mar 7 20:32:35 2018 +0800
Committer: zhangduo 
Committed: Thu Mar 8 15:05:57 2018 +0800

--
 .../hadoop/hbase/master/ServerManager.java  |  4 ++
 .../master/TestAssignmentManagerMetrics.java| 42 +-
 .../master/TestShutdownWithNoRegionServer.java  | 58 
 3 files changed, 75 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a03d09ab/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 06d6c8b..a65d95f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -957,6 +957,10 @@ public class ServerManager {
 String statusStr = "Cluster shutdown requested of master=" + 
this.master.getServerName();
 LOG.info(statusStr);
 this.clusterShutdown.set(true);
+if (onlineServers.isEmpty()) {
+  // we do not synchronize here so this may cause a double stop, but not a 
big deal
+  master.stop("OnlineServer=0 right after cluster shutdown set");
+}
   }
 
   boolean isClusterShutdown() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a03d09ab/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
index aa3a20c..287fc70 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerMetrics.java
@@ -24,20 +24,21 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompatibilityFactory;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
-import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -45,10 +46,7 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.junit.Assert.fail;
-
-@Ignore // Disabled temporarily; reenable 
-@Category(MediumTests.class)
+@Category({ MasterTests.class, MediumTests.class })
 public class TestAssignmentManagerMetrics {
 
   @ClassRule
@@ -61,7 +59,7 @@ public class TestAssignmentManagerMetrics {
 
   private static MiniHBaseCluster cluster;
   private static HMaster master;
-  private static HBaseTestingUtility TEST_UTIL;
+  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private static Configuration conf;
   private static final int msgInterval = 1000;
 
@@ -71,7 +69,6 @@ public class TestAssignmentManagerMetrics {
   @BeforeClass
   public static void startCluster() throws Exception {
 LOG.info("Starting cluster");
-TEST_UTIL = new HBaseTestingUtility();
 conf = TEST_UTIL.getConfiguration();
 
 // Disable sanity check for coprocessor
@@ -98,20 +95,14 @@ public class TestAssignmentManagerMetrics {
 
   @AfterClass
   public static void after() throws Exception {
-  

  1   2   >