[hbase] branch branch-2.0 updated: HBASE-22128 Move namespace region then master crashed make deadlock

2019-04-08 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
 new 8208030  HBASE-22128 Move namespace region then master crashed make 
deadlock
8208030 is described below

commit 8208030b8a0bcf02ba26af9c1c76cf229dedfa95
Author: Bing Xiao 
AuthorDate: Mon Apr 8 21:27:07 2019 +0800

HBASE-22128 Move namespace region then master crashed make deadlock

Signed-off-by: Allan Yang 
---
 .../master/assignment/MoveRegionProcedure.java |  18 +++
 .../TestMoveSystemTableWithStopMaster.java | 178 +
 2 files changed, 196 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
index 968f5f1..21e7490 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
@@ -191,4 +191,22 @@ public class MoveRegionProcedure extends 
AbstractStateMachineRegionProcedure
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.assignment;
+
+import static 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionState.MOVE_REGION_ASSIGN;
+
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+
+@Category({LargeTests.class})
+public class TestMoveSystemTableWithStopMaster {
+
+  private static final Logger LOG =
+LoggerFactory.getLogger(TestMoveSystemTableWithStopMaster.class);
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+HBaseClassTestRule.forClass(TestMoveSystemTableWithStopMaster.class);
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+UTIL.startMiniCluster(1, 2);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testMoveMetaRegoinWithStopMaster() throws Exception {
+ClusterConnection conn = (ClusterConnection) UTIL.getConnection();
+MiniHBaseCluster miniHBaseCluster = UTIL.getHBaseCluster();
+
+List namespaceRegionLocations = 
conn.locateRegions(TableName.META_TABLE_NAME);
+
+RegionInfo regionInfo = namespaceRegionLocations.get(0).getRegion();
+ServerName source = namespaceRegionLocations.get(0).getServerName();
+ServerName dstServerName = UTIL.getOtherRegionServer(
+  miniHBaseCluster.getRegionServer(source)).getServerName();
+
+RegionPlan rp = new RegionPlan(regionInfo, source, dstServerName);
+
+HMaster master = UTIL.getHBaseCluster().getMaster();
+
+CountDownLatch moveRegionAssignLatch = new CountDownLatch(1);
+CountDownLatch masterAbortLatch = new CountDownLatch(1);
+
+MoveRegionProcedureHoldBeforeAssign proc = new 
MoveRegionProcedureHoldBeforeAssign(
+  master.getMasterProcedureExecutor().getEnvironment(), rp, true);
+
+proc.moveRegionAssignLatch = moveRegionAssignLatch;
+proc.masterStoppedLatch = masterAbortLatch;
+
+ProcedureSyncWait.submitProcedure(master.getMasterProcedureExecutor(), 
proc);
+
+moveRegionAssignLatch.await();
+master.abort("for test");
+// may not closed, and rs stil

[hbase] branch branch-2.1 updated: HBASE-22128 Move namespace region then master crashed make deadlock

2019-04-08 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 83668c7  HBASE-22128 Move namespace region then master crashed make 
deadlock
83668c7 is described below

commit 83668c78e30e5909681309b7b2c94bb2e91709f0
Author: Bing Xiao 
AuthorDate: Mon Apr 8 21:05:54 2019 +0800

HBASE-22128 Move namespace region then master crashed make deadlock

Signed-off-by: Allan Yang 
---
 .../master/assignment/MoveRegionProcedure.java |  18 +++
 .../TestMoveSystemTableWithStopMaster.java | 178 +
 2 files changed, 196 insertions(+)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
index 968f5f1..21e7490 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
@@ -191,4 +191,22 @@ public class MoveRegionProcedure extends 
AbstractStateMachineRegionProcedure
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.assignment;
+
+import static 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionState.MOVE_REGION_ASSIGN;
+
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+
+@Category({LargeTests.class})
+public class TestMoveSystemTableWithStopMaster {
+
+  private static final Logger LOG =
+LoggerFactory.getLogger(TestMoveSystemTableWithStopMaster.class);
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+HBaseClassTestRule.forClass(TestMoveSystemTableWithStopMaster.class);
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+UTIL.startMiniCluster(1, 2);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testMoveMetaRegoinWithStopMaster() throws Exception {
+ClusterConnection conn = (ClusterConnection) UTIL.getConnection();
+MiniHBaseCluster miniHBaseCluster = UTIL.getHBaseCluster();
+
+List namespaceRegionLocations = 
conn.locateRegions(TableName.META_TABLE_NAME);
+
+RegionInfo regionInfo = namespaceRegionLocations.get(0).getRegion();
+ServerName source = namespaceRegionLocations.get(0).getServerName();
+ServerName dstServerName = UTIL.getOtherRegionServer(
+  miniHBaseCluster.getRegionServer(source)).getServerName();
+
+RegionPlan rp = new RegionPlan(regionInfo, source, dstServerName);
+
+HMaster master = UTIL.getHBaseCluster().getMaster();
+
+CountDownLatch moveRegionAssignLatch = new CountDownLatch(1);
+CountDownLatch masterAbortLatch = new CountDownLatch(1);
+
+MoveRegionProcedureHoldBeforeAssign proc = new 
MoveRegionProcedureHoldBeforeAssign(
+  master.getMasterProcedureExecutor().getEnvironment(), rp, true);
+
+proc.moveRegionAssignLatch = moveRegionAssignLatch;
+proc.masterStoppedLatch = masterAbortLatch;
+
+ProcedureSyncWait.submitProcedure(master.getMasterProcedureExecutor(), 
proc);
+
+moveRegionAssignLatch.await();
+master.abort("for test");
+// may not closed, and rs stil

[hbase] branch branch-2.2 updated: HBASE-22011 ThriftUtilities.getFromThrift should set filter when not set columns

2019-03-11 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 660d854  HBASE-22011 ThriftUtilities.getFromThrift should set filter 
when not set columns
660d854 is described below

commit 660d854168f606732413062ab37b60d3ba902e9b
Author: Bing Xiao 
AuthorDate: Mon Mar 11 15:26:08 2019 +0800

HBASE-22011 ThriftUtilities.getFromThrift should set filter when not set 
columns
---
 .../hadoop/hbase/thrift2/ThriftUtilities.java   | 18 --
 .../hadoop/hbase/thrift2/TestThriftConnection.java  | 21 +
 2 files changed, 29 insertions(+), 10 deletions(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
index 5c9853f..de56438 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
@@ -180,18 +180,16 @@ public class ThriftUtilities {
   out.setCheckExistenceOnly(in.isExistence_only());
 }
 
-
-if (!in.isSetColumns()) {
-  return out;
-}
-
-for (TColumn column : in.getColumns()) {
-  if (column.isSetQualifier()) {
-out.addColumn(column.getFamily(), column.getQualifier());
-  } else {
-out.addFamily(column.getFamily());
+if (in.isSetColumns()) {
+  for (TColumn column : in.getColumns()) {
+if (column.isSetQualifier()) {
+  out.addColumn(column.getFamily(), column.getQualifier());
+} else {
+  out.addFamily(column.getFamily());
+}
   }
 }
+
 if (in.isSetFilterBytes()) {
   out.setFilter(filterFromThrift(in.getFilterBytes()));
 }
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
index 2c9bf69..a11f2e8 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
 import org.apache.hadoop.hbase.filter.ColumnValueFilter;
 import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.PrefixFilter;
@@ -316,6 +317,26 @@ public class TestThriftConnection {
   }
 
   @Test
+  public void testHBASE22011()throws Exception{
+testHBASE22011(thriftConnection, "testHBASE22011Table");
+testHBASE22011(thriftHttpConnection, "testHBASE22011HttpTable");
+  }
+
+  public void testHBASE22011(Connection connection, String tableName) throws 
IOException {
+createTable(thriftAdmin, tableName);
+try (Table table = connection.getTable(TableName.valueOf(tableName))){
+  Get get = new Get(ROW_2);
+  Result result = table.get(get);
+  assertEquals(2, result.listCells().size());
+
+  ColumnCountGetFilter filter = new ColumnCountGetFilter(1);
+  get.setFilter(filter);
+  result = table.get(get);
+  assertEquals(1, result.listCells().size());
+}
+  }
+
+  @Test
   public void testMultiGet() throws Exception {
 testMultiGet(thriftConnection, "testMultiGetTable");
 testMultiGet(thriftHttpConnection, "testMultiGetHttpTable");



[hbase] branch branch-2 updated: HBASE-22011 ThriftUtilities.getFromThrift should set filter when not set columns

2019-03-11 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 091844c  HBASE-22011 ThriftUtilities.getFromThrift should set filter 
when not set columns
091844c is described below

commit 091844ce61f16b1f30ecd74240afa0fd5a7f5c4d
Author: Bing Xiao 
AuthorDate: Mon Mar 11 15:24:12 2019 +0800

HBASE-22011 ThriftUtilities.getFromThrift should set filter when not set 
columns
---
 .../hadoop/hbase/thrift2/ThriftUtilities.java   | 18 --
 .../hadoop/hbase/thrift2/TestThriftConnection.java  | 21 +
 2 files changed, 29 insertions(+), 10 deletions(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
index 5c9853f..de56438 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
@@ -180,18 +180,16 @@ public class ThriftUtilities {
   out.setCheckExistenceOnly(in.isExistence_only());
 }
 
-
-if (!in.isSetColumns()) {
-  return out;
-}
-
-for (TColumn column : in.getColumns()) {
-  if (column.isSetQualifier()) {
-out.addColumn(column.getFamily(), column.getQualifier());
-  } else {
-out.addFamily(column.getFamily());
+if (in.isSetColumns()) {
+  for (TColumn column : in.getColumns()) {
+if (column.isSetQualifier()) {
+  out.addColumn(column.getFamily(), column.getQualifier());
+} else {
+  out.addFamily(column.getFamily());
+}
   }
 }
+
 if (in.isSetFilterBytes()) {
   out.setFilter(filterFromThrift(in.getFilterBytes()));
 }
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
index 2c9bf69..a11f2e8 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
 import org.apache.hadoop.hbase.filter.ColumnValueFilter;
 import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.PrefixFilter;
@@ -316,6 +317,26 @@ public class TestThriftConnection {
   }
 
   @Test
+  public void testHBASE22011()throws Exception{
+testHBASE22011(thriftConnection, "testHBASE22011Table");
+testHBASE22011(thriftHttpConnection, "testHBASE22011HttpTable");
+  }
+
+  public void testHBASE22011(Connection connection, String tableName) throws 
IOException {
+createTable(thriftAdmin, tableName);
+try (Table table = connection.getTable(TableName.valueOf(tableName))){
+  Get get = new Get(ROW_2);
+  Result result = table.get(get);
+  assertEquals(2, result.listCells().size());
+
+  ColumnCountGetFilter filter = new ColumnCountGetFilter(1);
+  get.setFilter(filter);
+  result = table.get(get);
+  assertEquals(1, result.listCells().size());
+}
+  }
+
+  @Test
   public void testMultiGet() throws Exception {
 testMultiGet(thriftConnection, "testMultiGetTable");
 testMultiGet(thriftHttpConnection, "testMultiGetHttpTable");



[hbase] branch master updated: HBASE-22011 ThriftUtilities.getFromThrift should set filter when not set columns

2019-03-11 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 8a2ee83  HBASE-22011 ThriftUtilities.getFromThrift should set filter 
when not set columns
8a2ee83 is described below

commit 8a2ee8339a96cb98dca906153f5eac0db507e849
Author: Bing Xiao 
AuthorDate: Mon Mar 11 15:16:15 2019 +0800

HBASE-22011 ThriftUtilities.getFromThrift should set filter when not set 
columns
---
 .../hadoop/hbase/thrift2/ThriftUtilities.java   | 18 --
 .../hadoop/hbase/thrift2/TestThriftConnection.java  | 21 +
 2 files changed, 29 insertions(+), 10 deletions(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
index 204d20d..1fc85e5 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
@@ -180,18 +180,16 @@ public final class ThriftUtilities {
   out.setCheckExistenceOnly(in.isExistence_only());
 }
 
-
-if (!in.isSetColumns()) {
-  return out;
-}
-
-for (TColumn column : in.getColumns()) {
-  if (column.isSetQualifier()) {
-out.addColumn(column.getFamily(), column.getQualifier());
-  } else {
-out.addFamily(column.getFamily());
+if (in.isSetColumns()) {
+  for (TColumn column : in.getColumns()) {
+if (column.isSetQualifier()) {
+  out.addColumn(column.getFamily(), column.getQualifier());
+} else {
+  out.addFamily(column.getFamily());
+}
   }
 }
+
 if (in.isSetFilterBytes()) {
   out.setFilter(filterFromThrift(in.getFilterBytes()));
 }
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
index 2c9bf69..a11f2e8 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
 import org.apache.hadoop.hbase.filter.ColumnValueFilter;
 import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.PrefixFilter;
@@ -316,6 +317,26 @@ public class TestThriftConnection {
   }
 
   @Test
+  public void testHBASE22011()throws Exception{
+testHBASE22011(thriftConnection, "testHBASE22011Table");
+testHBASE22011(thriftHttpConnection, "testHBASE22011HttpTable");
+  }
+
+  public void testHBASE22011(Connection connection, String tableName) throws 
IOException {
+createTable(thriftAdmin, tableName);
+try (Table table = connection.getTable(TableName.valueOf(tableName))){
+  Get get = new Get(ROW_2);
+  Result result = table.get(get);
+  assertEquals(2, result.listCells().size());
+
+  ColumnCountGetFilter filter = new ColumnCountGetFilter(1);
+  get.setFilter(filter);
+  result = table.get(get);
+  assertEquals(1, result.listCells().size());
+}
+  }
+
+  @Test
   public void testMultiGet() throws Exception {
 testMultiGet(thriftConnection, "testMultiGetTable");
 testMultiGet(thriftHttpConnection, "testMultiGetHttpTable");



[hbase] branch branch-2.2 updated: HBASE-21962 Filters do not work in ThriftTable

2019-02-27 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new ab9be43  HBASE-21962 Filters do not work in ThriftTable
ab9be43 is described below

commit ab9be433fa4668c0d5c88878b81856216f17109f
Author: Allan Yang 
AuthorDate: Wed Feb 27 16:03:14 2019 +0800

HBASE-21962 Filters do not work in ThriftTable
---
 .../hadoop/hbase/thrift2/ThriftUtilities.java  | 33 +++---
 .../hadoop/hbase/thrift2/TestThriftConnection.java |  5 ++--
 2 files changed, 20 insertions(+), 18 deletions(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
index ba85dc7..5c9853f 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.filter.ParseFilter;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.io.compress.Compression;
@@ -106,6 +105,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils;
 
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
+
 @InterfaceAudience.Private
 public class ThriftUtilities {
 
@@ -191,14 +193,8 @@ public class ThriftUtilities {
   }
 }
 if (in.isSetFilterBytes()) {
-  try {
-Filter filter = FilterBase.parseFrom(in.getFilterBytes());
-out.setFilter(filter);
-  } catch (DeserializationException e) {
-throw new RuntimeException(e);
-  }
+  out.setFilter(filterFromThrift(in.getFilterBytes()));
 }
-
 return out;
   }
 
@@ -594,17 +590,22 @@ public class ThriftUtilities {
 }
 
 if (in.isSetFilterBytes()) {
-  try {
-Filter filter = FilterBase.parseFrom(in.getFilterBytes());
-out.setFilter(filter);
-  } catch (DeserializationException e) {
-throw new RuntimeException(e);
-  }
+  out.setFilter(filterFromThrift(in.getFilterBytes()));
 }
 
 return out;
   }
 
+  public static byte[] filterFromHBase(Filter filter) throws IOException {
+FilterProtos.Filter filterPB = ProtobufUtil.toFilter(filter);
+return filterPB.toByteArray();
+  }
+
+  public static Filter filterFromThrift(byte[] filterBytes) throws IOException 
{
+FilterProtos.Filter filterPB  = FilterProtos.Filter.parseFrom(filterBytes);
+return ProtobufUtil.toFilter(filterPB);
+  }
+
   public static TScan scanFromHBase(Scan in) throws IOException {
 TScan out = new TScan();
 out.setStartRow(in.getStartRow());
@@ -662,7 +663,7 @@ public class ThriftUtilities {
 }
 if (in.getFilter() != null) {
   try {
-out.setFilterBytes(in.getFilter().toByteArray());
+out.setFilterBytes(filterFromHBase(in.getFilter()));
   } catch (IOException ioE) {
 throw new RuntimeException(ioE);
   }
@@ -1227,7 +1228,7 @@ public class ThriftUtilities {
 }
 if (in.getFilter() != null) {
   try {
-out.setFilterBytes(in.getFilter().toByteArray());
+out.setFilterBytes(filterFromHBase(in.getFilter()));
   } catch (IOException ioE) {
 throw new RuntimeException(ioE);
   }
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
index 1583619..2c9bf69 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -697,8 +697,8 @@ public class TestThriftConnection {
 
   @Test
   public void testScanWithFilters() throws Exception {
-testIteratorScanner(thriftConnection, "testScanWithFiltersTable");
-testIteratorScanner(thriftHttpConnection, "testScanWithFiltersHttpTable");
+testScanWithFilters(thriftConnection, "testScanWithFiltersTable");
+testScanWithFilters(thriftHttpConnection, "testScanWithFiltersHttpTable");
   }
 
   private void testScanWithFilters(Connection connection, String tableName) 
throws IOException {
@@ -712,6 +712,7 @@ public class TestThriftConnection {
   filterList.addFilter(columnValueFilter);
   Scan scan = new Scan();
   scan.

[hbase] branch branch-2 updated: HBASE-21962 Filters do not work in ThriftTable

2019-02-27 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 4d9ce77  HBASE-21962 Filters do not work in ThriftTable
4d9ce77 is described below

commit 4d9ce7706bb9ca17463ae66bba3d105f1331de51
Author: Allan Yang 
AuthorDate: Wed Feb 27 16:00:04 2019 +0800

HBASE-21962 Filters do not work in ThriftTable
---
 .../hadoop/hbase/thrift2/ThriftUtilities.java  | 33 +++---
 .../hadoop/hbase/thrift2/TestThriftConnection.java |  5 ++--
 2 files changed, 20 insertions(+), 18 deletions(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
index ba85dc7..5c9853f 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.filter.ParseFilter;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.io.compress.Compression;
@@ -106,6 +105,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils;
 
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
+
 @InterfaceAudience.Private
 public class ThriftUtilities {
 
@@ -191,14 +193,8 @@ public class ThriftUtilities {
   }
 }
 if (in.isSetFilterBytes()) {
-  try {
-Filter filter = FilterBase.parseFrom(in.getFilterBytes());
-out.setFilter(filter);
-  } catch (DeserializationException e) {
-throw new RuntimeException(e);
-  }
+  out.setFilter(filterFromThrift(in.getFilterBytes()));
 }
-
 return out;
   }
 
@@ -594,17 +590,22 @@ public class ThriftUtilities {
 }
 
 if (in.isSetFilterBytes()) {
-  try {
-Filter filter = FilterBase.parseFrom(in.getFilterBytes());
-out.setFilter(filter);
-  } catch (DeserializationException e) {
-throw new RuntimeException(e);
-  }
+  out.setFilter(filterFromThrift(in.getFilterBytes()));
 }
 
 return out;
   }
 
+  public static byte[] filterFromHBase(Filter filter) throws IOException {
+FilterProtos.Filter filterPB = ProtobufUtil.toFilter(filter);
+return filterPB.toByteArray();
+  }
+
+  public static Filter filterFromThrift(byte[] filterBytes) throws IOException 
{
+FilterProtos.Filter filterPB  = FilterProtos.Filter.parseFrom(filterBytes);
+return ProtobufUtil.toFilter(filterPB);
+  }
+
   public static TScan scanFromHBase(Scan in) throws IOException {
 TScan out = new TScan();
 out.setStartRow(in.getStartRow());
@@ -662,7 +663,7 @@ public class ThriftUtilities {
 }
 if (in.getFilter() != null) {
   try {
-out.setFilterBytes(in.getFilter().toByteArray());
+out.setFilterBytes(filterFromHBase(in.getFilter()));
   } catch (IOException ioE) {
 throw new RuntimeException(ioE);
   }
@@ -1227,7 +1228,7 @@ public class ThriftUtilities {
 }
 if (in.getFilter() != null) {
   try {
-out.setFilterBytes(in.getFilter().toByteArray());
+out.setFilterBytes(filterFromHBase(in.getFilter()));
   } catch (IOException ioE) {
 throw new RuntimeException(ioE);
   }
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
index 1583619..2c9bf69 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -697,8 +697,8 @@ public class TestThriftConnection {
 
   @Test
   public void testScanWithFilters() throws Exception {
-testIteratorScanner(thriftConnection, "testScanWithFiltersTable");
-testIteratorScanner(thriftHttpConnection, "testScanWithFiltersHttpTable");
+testScanWithFilters(thriftConnection, "testScanWithFiltersTable");
+testScanWithFilters(thriftHttpConnection, "testScanWithFiltersHttpTable");
   }
 
   private void testScanWithFilters(Connection connection, String tableName) 
throws IOException {
@@ -712,6 +712,7 @@ public class TestThriftConnection {
   filterList.addFilter(columnValueFilter);
   Scan scan = new Scan();
   scan.setMaxVersi

[hbase] branch master updated: HBASE-21962 Filters do not work in ThriftTable

2019-02-26 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 8408e26  HBASE-21962 Filters do not work in ThriftTable
8408e26 is described below

commit 8408e26d26e88462dc2f6f61efb6e5ecaa98063d
Author: Allan Yang 
AuthorDate: Wed Feb 27 15:56:25 2019 +0800

HBASE-21962 Filters do not work in ThriftTable
---
 .../hadoop/hbase/thrift2/ThriftUtilities.java  | 33 +++---
 .../hadoop/hbase/thrift2/TestThriftConnection.java |  5 ++--
 2 files changed, 20 insertions(+), 18 deletions(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
index 0ffedf6..204d20d 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.filter.ParseFilter;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.io.compress.Compression;
@@ -106,6 +105,9 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils;
 
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
+
 @InterfaceAudience.Private
 public final class ThriftUtilities {
 
@@ -191,14 +193,8 @@ public final class ThriftUtilities {
   }
 }
 if (in.isSetFilterBytes()) {
-  try {
-Filter filter = FilterBase.parseFrom(in.getFilterBytes());
-out.setFilter(filter);
-  } catch (DeserializationException e) {
-throw new RuntimeException(e);
-  }
+  out.setFilter(filterFromThrift(in.getFilterBytes()));
 }
-
 return out;
   }
 
@@ -599,17 +595,22 @@ public final class ThriftUtilities {
 }
 
 if (in.isSetFilterBytes()) {
-  try {
-Filter filter = FilterBase.parseFrom(in.getFilterBytes());
-out.setFilter(filter);
-  } catch (DeserializationException e) {
-throw new RuntimeException(e);
-  }
+  out.setFilter(filterFromThrift(in.getFilterBytes()));
 }
 
 return out;
   }
 
+  public static byte[] filterFromHBase(Filter filter) throws IOException {
+FilterProtos.Filter filterPB = ProtobufUtil.toFilter(filter);
+return filterPB.toByteArray();
+  }
+
+  public static Filter filterFromThrift(byte[] filterBytes) throws IOException 
{
+FilterProtos.Filter filterPB  = FilterProtos.Filter.parseFrom(filterBytes);
+return ProtobufUtil.toFilter(filterPB);
+  }
+
   public static TScan scanFromHBase(Scan in) throws IOException {
 TScan out = new TScan();
 out.setStartRow(in.getStartRow());
@@ -667,7 +668,7 @@ public final class ThriftUtilities {
 }
 if (in.getFilter() != null) {
   try {
-out.setFilterBytes(in.getFilter().toByteArray());
+out.setFilterBytes(filterFromHBase(in.getFilter()));
   } catch (IOException ioE) {
 throw new RuntimeException(ioE);
   }
@@ -1232,7 +1233,7 @@ public final class ThriftUtilities {
 }
 if (in.getFilter() != null) {
   try {
-out.setFilterBytes(in.getFilter().toByteArray());
+out.setFilterBytes(filterFromHBase(in.getFilter()));
   } catch (IOException ioE) {
 throw new RuntimeException(ioE);
   }
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
index 1583619..2c9bf69 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -697,8 +697,8 @@ public class TestThriftConnection {
 
   @Test
   public void testScanWithFilters() throws Exception {
-testIteratorScanner(thriftConnection, "testScanWithFiltersTable");
-testIteratorScanner(thriftHttpConnection, "testScanWithFiltersHttpTable");
+testScanWithFilters(thriftConnection, "testScanWithFiltersTable");
+testScanWithFilters(thriftHttpConnection, "testScanWithFiltersHttpTable");
   }
 
   private void testScanWithFilters(Connection connection, String tableName) 
throws IOException {
@@ -712,6 +712,7 @@ public class TestThriftConnection {
   filterList.addFilter(columnValueFilter);
   Scan scan = 

[hbase] branch branch-2 updated: HBASE-21809 Add retry thrift client for ThriftTable/Admin

2019-01-31 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 331a7f2  HBASE-21809 Add retry thrift client for ThriftTable/Admin
331a7f2 is described below

commit 331a7f2bda5256b83375a7ce6746e7b7b6baa321
Author: Allan Yang 
AuthorDate: Thu Jan 31 16:46:02 2019 +0800

HBASE-21809 Add retry thrift client for ThriftTable/Admin
---
 .../hbase/thrift2/client/ThriftConnection.java | 47 +-
 1 file changed, 46 insertions(+), 1 deletion(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
index cc186aa..36e513c 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
@@ -22,11 +22,16 @@ import static 
org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_SOCKET_TIMEOUT_CONNE
 import static org.apache.hadoop.hbase.ipc.RpcClient.SOCKET_TIMEOUT_CONNECT;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.lang.reflect.Constructor;
+import java.net.UnknownHostException;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
 
+import javax.net.ssl.SSLException;
+
 import org.apache.commons.lang3.NotImplementedException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
@@ -35,6 +40,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.BufferedMutatorParams;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableBuilder;
@@ -42,10 +48,13 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.thrift.Constants;
 import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.http.HttpRequest;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.config.RequestConfig;
 import org.apache.http.client.utils.HttpClientUtils;
+import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
 import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.protocol.HttpContext;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.protocol.TCompactProtocol;
 import org.apache.thrift.protocol.TProtocol;
@@ -227,14 +236,50 @@ public class ThriftConnection implements Connection {
 return new ThriftAdmin(client.getFirst(), client.getSecond(), conf);
   }
 
+  public static class DelayRetryHandler extends DefaultHttpRequestRetryHandler 
{
+private long pause;
+
+public DelayRetryHandler(int retryCount, long pause) {
+  super(retryCount, true, Arrays.asList(
+  InterruptedIOException.class,
+  UnknownHostException.class,
+  SSLException.class));
+  this.pause = pause;
+}
+
+@Override
+public boolean retryRequest(IOException exception, int executionCount, 
HttpContext context) {
+  // Don't sleep for retrying the first time
+  if (executionCount > 1 && pause > 0) {
+try {
+  long sleepTime = ConnectionUtils.getPauseTime(pause, executionCount 
- 1);
+  Thread.sleep(sleepTime);
+} catch (InterruptedException ie) {
+  //reset interrupt marker
+  Thread.currentThread().interrupt();
+}
+  }
+  return super.retryRequest(exception, executionCount, context);
+}
+
+@Override
+protected boolean handleAsIdempotent(HttpRequest request) {
+  return true;
+}
+  }
+
   public synchronized HttpClient getHttpClient() {
 if (httpClient != null) {
   return httpClient;
 }
+int retry = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, 5);
 HttpClientBuilder builder = HttpClientBuilder.create();
 RequestConfig.Builder requestBuilder = RequestConfig.custom();
 requestBuilder = requestBuilder.setConnectTimeout(getConnectTimeout());
-requestBuilder = 
requestBuilder.setConnectionRequestTimeout(getOperationTimeout());
+requestBuilder = requestBuilder.setSocketTimeout(getOperationTimeout());
+builder.setRetryHandler(new DelayRetryHandler(retry, pause));
 builder.setDefaultRequestConfig(requestBuilder.build());
 httpClient = builder.build();
 httpClientCreated = true;



[hbase] branch branch-2.2 updated: HBASE-21809 Add retry thrift client for ThriftTable/Admin

2019-01-31 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2.2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.2 by this push:
 new 24ab16f  HBASE-21809 Add retry thrift client for ThriftTable/Admin
24ab16f is described below

commit 24ab16f3e89d85f344c7058a283a4f7467b06e72
Author: Allan Yang 
AuthorDate: Thu Jan 31 17:18:49 2019 +0800

HBASE-21809 Add retry thrift client for ThriftTable/Admin
---
 .../hbase/thrift2/client/ThriftConnection.java | 47 +-
 1 file changed, 46 insertions(+), 1 deletion(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
index cc186aa..36e513c 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
@@ -22,11 +22,16 @@ import static 
org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_SOCKET_TIMEOUT_CONNE
 import static org.apache.hadoop.hbase.ipc.RpcClient.SOCKET_TIMEOUT_CONNECT;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.lang.reflect.Constructor;
+import java.net.UnknownHostException;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
 
+import javax.net.ssl.SSLException;
+
 import org.apache.commons.lang3.NotImplementedException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
@@ -35,6 +40,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.BufferedMutatorParams;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableBuilder;
@@ -42,10 +48,13 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.thrift.Constants;
 import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.http.HttpRequest;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.config.RequestConfig;
 import org.apache.http.client.utils.HttpClientUtils;
+import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
 import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.protocol.HttpContext;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.protocol.TCompactProtocol;
 import org.apache.thrift.protocol.TProtocol;
@@ -227,14 +236,50 @@ public class ThriftConnection implements Connection {
 return new ThriftAdmin(client.getFirst(), client.getSecond(), conf);
   }
 
+  public static class DelayRetryHandler extends DefaultHttpRequestRetryHandler 
{
+private long pause;
+
+public DelayRetryHandler(int retryCount, long pause) {
+  super(retryCount, true, Arrays.asList(
+  InterruptedIOException.class,
+  UnknownHostException.class,
+  SSLException.class));
+  this.pause = pause;
+}
+
+@Override
+public boolean retryRequest(IOException exception, int executionCount, 
HttpContext context) {
+  // Don't sleep for retrying the first time
+  if (executionCount > 1 && pause > 0) {
+try {
+  long sleepTime = ConnectionUtils.getPauseTime(pause, executionCount 
- 1);
+  Thread.sleep(sleepTime);
+} catch (InterruptedException ie) {
+  //reset interrupt marker
+  Thread.currentThread().interrupt();
+}
+  }
+  return super.retryRequest(exception, executionCount, context);
+}
+
+@Override
+protected boolean handleAsIdempotent(HttpRequest request) {
+  return true;
+}
+  }
+
   public synchronized HttpClient getHttpClient() {
 if (httpClient != null) {
   return httpClient;
 }
+int retry = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, 5);
 HttpClientBuilder builder = HttpClientBuilder.create();
 RequestConfig.Builder requestBuilder = RequestConfig.custom();
 requestBuilder = requestBuilder.setConnectTimeout(getConnectTimeout());
-requestBuilder = 
requestBuilder.setConnectionRequestTimeout(getOperationTimeout());
+requestBuilder = requestBuilder.setSocketTimeout(getOperationTimeout());
+builder.setRetryHandler(new DelayRetryHandler(retry, pause));
 builder.setDefaultRequestConfig(requestBuilder.build());
 httpClient = builder.build();
 httpClientCreated = true;



[hbase] branch master updated: HBASE-21809 Add retry thrift client for ThriftTable/Admin

2019-01-31 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 2776bc0  HBASE-21809 Add retry thrift client for ThriftTable/Admin
2776bc0 is described below

commit 2776bc0151051c8d20d9b1c2ac6142ade6a31b62
Author: Allan Yang 
AuthorDate: Thu Jan 31 16:43:09 2019 +0800

HBASE-21809 Add retry thrift client for ThriftTable/Admin
---
 .../hbase/thrift2/client/ThriftConnection.java | 47 +-
 1 file changed, 46 insertions(+), 1 deletion(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
index cc186aa..36e513c 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java
@@ -22,11 +22,16 @@ import static 
org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_SOCKET_TIMEOUT_CONNE
 import static org.apache.hadoop.hbase.ipc.RpcClient.SOCKET_TIMEOUT_CONNECT;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.lang.reflect.Constructor;
+import java.net.UnknownHostException;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
 
+import javax.net.ssl.SSLException;
+
 import org.apache.commons.lang3.NotImplementedException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
@@ -35,6 +40,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.BufferedMutator;
 import org.apache.hadoop.hbase.client.BufferedMutatorParams;
 import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableBuilder;
@@ -42,10 +48,13 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.thrift.Constants;
 import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.http.HttpRequest;
 import org.apache.http.client.HttpClient;
 import org.apache.http.client.config.RequestConfig;
 import org.apache.http.client.utils.HttpClientUtils;
+import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
 import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.protocol.HttpContext;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.apache.thrift.protocol.TCompactProtocol;
 import org.apache.thrift.protocol.TProtocol;
@@ -227,14 +236,50 @@ public class ThriftConnection implements Connection {
 return new ThriftAdmin(client.getFirst(), client.getSecond(), conf);
   }
 
+  public static class DelayRetryHandler extends DefaultHttpRequestRetryHandler 
{
+private long pause;
+
+public DelayRetryHandler(int retryCount, long pause) {
+  super(retryCount, true, Arrays.asList(
+  InterruptedIOException.class,
+  UnknownHostException.class,
+  SSLException.class));
+  this.pause = pause;
+}
+
+@Override
+public boolean retryRequest(IOException exception, int executionCount, 
HttpContext context) {
+  // Don't sleep for retrying the first time
+  if (executionCount > 1 && pause > 0) {
+try {
+  long sleepTime = ConnectionUtils.getPauseTime(pause, executionCount 
- 1);
+  Thread.sleep(sleepTime);
+} catch (InterruptedException ie) {
+  //reset interrupt marker
+  Thread.currentThread().interrupt();
+}
+  }
+  return super.retryRequest(exception, executionCount, context);
+}
+
+@Override
+protected boolean handleAsIdempotent(HttpRequest request) {
+  return true;
+}
+  }
+
   public synchronized HttpClient getHttpClient() {
 if (httpClient != null) {
   return httpClient;
 }
+int retry = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, 5);
 HttpClientBuilder builder = HttpClientBuilder.create();
 RequestConfig.Builder requestBuilder = RequestConfig.custom();
 requestBuilder = requestBuilder.setConnectTimeout(getConnectTimeout());
-requestBuilder = 
requestBuilder.setConnectionRequestTimeout(getOperationTimeout());
+requestBuilder = requestBuilder.setSocketTimeout(getOperationTimeout());
+builder.setRetryHandler(new DelayRetryHandler(retry, pause));
 builder.setDefaultRequestConfig(requestBuilder.build());
 httpClient = builder.build();
 httpClientCreated = true;



[hbase] branch branch-2.0 updated: HBASE-21754 ReportRegionStateTransitionRequest should be executed in priority executor

2019-01-23 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.0 by this push:
 new bdc0c82  HBASE-21754 ReportRegionStateTransitionRequest should be 
executed in priority executor
bdc0c82 is described below

commit bdc0c828376bc2231372b7d3e573053152aae39b
Author: Allan Yang 
AuthorDate: Wed Jan 23 21:41:40 2019 +0800

HBASE-21754 ReportRegionStateTransitionRequest should be executed in 
priority executor
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |   9 +-
 .../hadoop/hbase/ipc/MetricsHBaseServerSource.java |   2 +
 .../hbase/ipc/MetricsHBaseServerWrapper.java   |   2 +
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java|   2 +
 .../apache/hadoop/hbase/ipc/FifoRpcScheduler.java  |   5 +
 .../hbase/ipc/MetricsHBaseServerWrapperImpl.java   |   9 +-
 .../org/apache/hadoop/hbase/ipc/RpcScheduler.java  |   3 +
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java   |  78 
 .../apache/hadoop/hbase/ipc/SimpleRpcServer.java   |   3 +-
 .../MasterAnnotationReadingPriorityFunction.java   |   9 +-
 .../regionserver/SimpleRpcSchedulerFactory.java|   2 +
 .../hadoop/hbase/ipc/DelegatingRpcScheduler.java   |   5 +
 .../hbase/ipc/MetricsHBaseServerWrapperStub.java   |   6 ++
 .../hadoop/hbase/ipc/TestRpcHandlerException.java  |   2 +-
 .../TestMasterHandlerFullWhenTransitRegion.java| 101 +
 .../hadoop/hbase/master/TestMasterQosFunction.java |   4 +-
 16 files changed, 214 insertions(+), 28 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index cbff7d8..c62bbdf 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1035,6 +1035,11 @@ public final class HConstants {
   public static final String REGION_SERVER_REPLICATION_HANDLER_COUNT =
   "hbase.regionserver.replication.handler.count";
   public static final int DEFAULT_REGION_SERVER_REPLICATION_HANDLER_COUNT = 3;
+  // Meta Transition handlers to deal with meta 
ReportRegionStateTransitionRequest. Meta transition
+  // should be dealt with in a separate handler in case blocking other 
region's transition.
+  public static final String MASTER_META_TRANSITION_HANDLER_COUNT =
+  "hbase.master.meta.transition.handler.count";
+  public static final int MASTER__META_TRANSITION_HANDLER_COUNT_DEFAULT = 1;
 
   @Deprecated // unused. see HBASE-10569. remove this in 3.0
   public static final String MASTER_HANDLER_COUNT = 
"hbase.master.handler.count";
@@ -1102,7 +1107,7 @@ public final class HConstants {
* by different set of handlers. For example, HIGH_QOS tagged methods are
* handled by high priority handlers.
*/
-  // normal_QOS < replication_QOS < replay_QOS < QOS_threshold < admin_QOS < 
high_QOS
+  // normal_QOS < replication_QOS < replay_QOS < QOS_threshold < admin_QOS < 
high_QOS < meta_QOS
   public static final int PRIORITY_UNSET = -1;
   public static final int NORMAL_QOS = 0;
   public static final int REPLICATION_QOS = 5;
@@ -,6 +1116,8 @@ public final class HConstants {
   public static final int ADMIN_QOS = 100;
   public static final int HIGH_QOS = 200;
   public static final int SYSTEMTABLE_QOS = HIGH_QOS;
+  public static final int META_QOS = 300;
+
 
   /** Directory under /hbase where archived hfiles are stored */
   public static final String HFILE_ARCHIVE_DIRECTORY = "archive";
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index 0833751..b87f3cc 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -60,10 +60,12 @@ public interface MetricsHBaseServerSource extends 
ExceptionTrackingSource {
   String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " +
 "parsed requests waiting in scheduler to be executed";
   String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue";
+  String METAPRIORITY_QUEUE_NAME = "numCallsInMetaPriorityQueue";
   String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue";
   String REPLICATION_QUEUE_DESC =
   "Number of calls in the replication call queue waiting to be run";
   String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue 
waiting to be run";
+  String METAPRIORITY_QUEUE_DESC = "Number of calls in the priority call queue 
waiting to be run";
   String WRITE_Q

[hbase] branch branch-2.1 updated: HBASE-21754 ReportRegionStateTransitionRequest should be executed in priority executor

2019-01-23 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
 new 681864c  HBASE-21754 ReportRegionStateTransitionRequest should be 
executed in priority executor
681864c is described below

commit 681864cff04b1376f25f995134b30297b82a887d
Author: Allan Yang 
AuthorDate: Wed Jan 23 21:34:48 2019 +0800

HBASE-21754 ReportRegionStateTransitionRequest should be executed in 
priority executor
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |   9 +-
 .../hadoop/hbase/ipc/MetricsHBaseServerSource.java |   2 +
 .../hbase/ipc/MetricsHBaseServerWrapper.java   |   2 +
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java|   2 +
 .../apache/hadoop/hbase/ipc/FifoRpcScheduler.java  |   5 +
 .../hbase/ipc/MetricsHBaseServerWrapperImpl.java   |   9 +-
 .../org/apache/hadoop/hbase/ipc/RpcScheduler.java  |   3 +
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java   |  78 
 .../apache/hadoop/hbase/ipc/SimpleRpcServer.java   |   3 +-
 .../MasterAnnotationReadingPriorityFunction.java   |   9 +-
 .../regionserver/SimpleRpcSchedulerFactory.java|   2 +
 .../hadoop/hbase/ipc/DelegatingRpcScheduler.java   |   5 +
 .../hbase/ipc/MetricsHBaseServerWrapperStub.java   |   6 ++
 .../hadoop/hbase/ipc/TestRpcHandlerException.java  |   2 +-
 .../TestMasterHandlerFullWhenTransitRegion.java| 101 +
 .../hadoop/hbase/master/TestMasterQosFunction.java |   4 +-
 16 files changed, 214 insertions(+), 28 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 6d7dd18..02c540d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1069,6 +1069,11 @@ public final class HConstants {
   public static final String REGION_SERVER_REPLICATION_HANDLER_COUNT =
   "hbase.regionserver.replication.handler.count";
   public static final int DEFAULT_REGION_SERVER_REPLICATION_HANDLER_COUNT = 3;
+  // Meta Transition handlers to deal with meta 
ReportRegionStateTransitionRequest. Meta transition
+  // should be dealt with in a separate handler in case blocking other 
region's transition.
+  public static final String MASTER_META_TRANSITION_HANDLER_COUNT =
+  "hbase.master.meta.transition.handler.count";
+  public static final int MASTER__META_TRANSITION_HANDLER_COUNT_DEFAULT = 1;
 
   @Deprecated // unused. see HBASE-10569. remove this in 3.0
   public static final String MASTER_HANDLER_COUNT = 
"hbase.master.handler.count";
@@ -1136,7 +1141,7 @@ public final class HConstants {
* by different set of handlers. For example, HIGH_QOS tagged methods are
* handled by high priority handlers.
*/
-  // normal_QOS < replication_QOS < replay_QOS < QOS_threshold < admin_QOS < 
high_QOS
+  // normal_QOS < replication_QOS < replay_QOS < QOS_threshold < admin_QOS < 
high_QOS < meta_QOS
   public static final int PRIORITY_UNSET = -1;
   public static final int NORMAL_QOS = 0;
   public static final int REPLICATION_QOS = 5;
@@ -1145,6 +1150,8 @@ public final class HConstants {
   public static final int ADMIN_QOS = 100;
   public static final int HIGH_QOS = 200;
   public static final int SYSTEMTABLE_QOS = HIGH_QOS;
+  public static final int META_QOS = 300;
+
 
   /** Directory under /hbase where archived hfiles are stored */
   public static final String HFILE_ARCHIVE_DIRECTORY = "archive";
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index 0833751..b87f3cc 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -60,10 +60,12 @@ public interface MetricsHBaseServerSource extends 
ExceptionTrackingSource {
   String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " +
 "parsed requests waiting in scheduler to be executed";
   String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue";
+  String METAPRIORITY_QUEUE_NAME = "numCallsInMetaPriorityQueue";
   String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue";
   String REPLICATION_QUEUE_DESC =
   "Number of calls in the replication call queue waiting to be run";
   String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue 
waiting to be run";
+  String METAPRIORITY_QUEUE_DESC = "Number of calls in the priority call queue 
waiting to be run";
   String WRITE_Q

[hbase] branch branch-2 updated: HBASE-21754 ReportRegionStateTransitionRequest should be executed in priority executor

2019-01-23 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new d80556e  HBASE-21754 ReportRegionStateTransitionRequest should be 
executed in priority executor
d80556e is described below

commit d80556ec85cd5c2f6d65dae696e5a746ee0beb5d
Author: Allan Yang 
AuthorDate: Wed Jan 23 21:08:13 2019 +0800

HBASE-21754 ReportRegionStateTransitionRequest should be executed in 
priority executor
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |   9 +-
 .../hadoop/hbase/ipc/MetricsHBaseServerSource.java |   2 +
 .../hbase/ipc/MetricsHBaseServerWrapper.java   |   4 +
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java|   2 +
 .../apache/hadoop/hbase/ipc/FifoRpcScheduler.java  |  10 ++
 .../hbase/ipc/MetricsHBaseServerWrapperImpl.java   |  16 
 .../org/apache/hadoop/hbase/ipc/RpcScheduler.java  |   6 ++
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java   |  82 +
 .../apache/hadoop/hbase/ipc/SimpleRpcServer.java   |   3 +-
 .../MasterAnnotationReadingPriorityFunction.java   |   9 +-
 .../regionserver/SimpleRpcSchedulerFactory.java|   2 +
 .../hadoop/hbase/ipc/DelegatingRpcScheduler.java   |  10 ++
 .../hbase/ipc/MetricsHBaseServerWrapperStub.java   |  10 ++
 .../hadoop/hbase/ipc/TestRpcHandlerException.java  |   2 +-
 .../TestMasterHandlerFullWhenTransitRegion.java| 101 +
 .../hadoop/hbase/master/TestMasterQosFunction.java |   4 +-
 16 files changed, 245 insertions(+), 27 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index c5eaa00..e6de71d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1074,6 +1074,11 @@ public final class HConstants {
   public static final String REGION_SERVER_REPLICATION_HANDLER_COUNT =
   "hbase.regionserver.replication.handler.count";
   public static final int DEFAULT_REGION_SERVER_REPLICATION_HANDLER_COUNT = 3;
+  // Meta Transition handlers to deal with meta 
ReportRegionStateTransitionRequest. Meta transition
+  // should be dealt with in a separate handler in case blocking other 
region's transition.
+  public static final String MASTER_META_TRANSITION_HANDLER_COUNT =
+  "hbase.master.meta.transition.handler.count";
+  public static final int MASTER__META_TRANSITION_HANDLER_COUNT_DEFAULT = 1;
 
   @Deprecated // unused. see HBASE-10569. remove this in 3.0
   public static final String MASTER_HANDLER_COUNT = 
"hbase.master.handler.count";
@@ -1141,7 +1146,7 @@ public final class HConstants {
* by different set of handlers. For example, HIGH_QOS tagged methods are
* handled by high priority handlers.
*/
-  // normal_QOS < replication_QOS < replay_QOS < QOS_threshold < admin_QOS < 
high_QOS
+  // normal_QOS < replication_QOS < replay_QOS < QOS_threshold < admin_QOS < 
high_QOS < meta_QOS
   public static final int PRIORITY_UNSET = -1;
   public static final int NORMAL_QOS = 0;
   public static final int REPLICATION_QOS = 5;
@@ -1150,6 +1155,8 @@ public final class HConstants {
   public static final int ADMIN_QOS = 100;
   public static final int HIGH_QOS = 200;
   public static final int SYSTEMTABLE_QOS = HIGH_QOS;
+  public static final int META_QOS = 300;
+
 
   /** Directory under /hbase where archived hfiles are stored */
   public static final String HFILE_ARCHIVE_DIRECTORY = "archive";
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index 3877aae..69bd040 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -60,10 +60,12 @@ public interface MetricsHBaseServerSource extends 
ExceptionTrackingSource {
   String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " +
 "parsed requests waiting in scheduler to be executed";
   String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue";
+  String METAPRIORITY_QUEUE_NAME = "numCallsInMetaPriorityQueue";
   String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue";
   String REPLICATION_QUEUE_DESC =
   "Number of calls in the replication call queue waiting to be run";
   String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue 
waiting to be run";
+  String METAPRIORITY_QUEUE_DESC = "Number of calls in the priority call queue 
waiting to be run";
   S

[hbase] branch master updated: HBASE-21754 ReportRegionStateTransitionRequest should be executed in priority executor

2019-01-23 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new b5619a2  HBASE-21754 ReportRegionStateTransitionRequest should be 
executed in priority executor
b5619a2 is described below

commit b5619a2a26a41c423d6c7af1f1feaa990b63c58c
Author: Allan Yang 
AuthorDate: Wed Jan 23 20:55:00 2019 +0800

HBASE-21754 ReportRegionStateTransitionRequest should be executed in 
priority executor
---
 .../java/org/apache/hadoop/hbase/HConstants.java   |   9 +-
 .../hadoop/hbase/ipc/MetricsHBaseServerSource.java |   2 +
 .../hbase/ipc/MetricsHBaseServerWrapper.java   |   4 +
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java|   2 +
 .../apache/hadoop/hbase/ipc/FifoRpcScheduler.java  |  10 ++
 .../hbase/ipc/MetricsHBaseServerWrapperImpl.java   |  16 
 .../org/apache/hadoop/hbase/ipc/RpcScheduler.java  |   6 ++
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java   |  81 +
 .../apache/hadoop/hbase/ipc/SimpleRpcServer.java   |   3 +-
 .../MasterAnnotationReadingPriorityFunction.java   |   9 +-
 .../regionserver/SimpleRpcSchedulerFactory.java|   2 +
 .../hadoop/hbase/ipc/DelegatingRpcScheduler.java   |  10 ++
 .../hbase/ipc/MetricsHBaseServerWrapperStub.java   |  10 ++
 .../hadoop/hbase/ipc/TestRpcHandlerException.java  |   2 +-
 .../TestMasterHandlerFullWhenTransitRegion.java| 101 +
 .../hadoop/hbase/master/TestMasterQosFunction.java |   4 +-
 16 files changed, 244 insertions(+), 27 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 8b34bbd..6d57744 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1048,6 +1048,11 @@ public final class HConstants {
   public static final String REGION_SERVER_REPLICATION_HANDLER_COUNT =
   "hbase.regionserver.replication.handler.count";
   public static final int DEFAULT_REGION_SERVER_REPLICATION_HANDLER_COUNT = 3;
+  // Meta Transition handlers to deal with meta 
ReportRegionStateTransitionRequest. Meta transition
+  // should be dealt with in a separate handler in case blocking other 
region's transition.
+  public static final String MASTER_META_TRANSITION_HANDLER_COUNT =
+  "hbase.master.meta.transition.handler.count";
+  public static final int MASTER__META_TRANSITION_HANDLER_COUNT_DEFAULT = 1;
 
   /** Conf key for enabling meta replication */
   public static final String USE_META_REPLICAS = "hbase.meta.replicas.use";
@@ -1105,7 +1110,7 @@ public final class HConstants {
* by different set of handlers. For example, HIGH_QOS tagged methods are
* handled by high priority handlers.
*/
-  // normal_QOS < replication_QOS < replay_QOS < QOS_threshold < admin_QOS < 
high_QOS
+  // normal_QOS < replication_QOS < replay_QOS < QOS_threshold < admin_QOS < 
high_QOS < meta_QOS
   public static final int PRIORITY_UNSET = -1;
   public static final int NORMAL_QOS = 0;
   public static final int REPLICATION_QOS = 5;
@@ -1114,6 +1119,8 @@ public final class HConstants {
   public static final int ADMIN_QOS = 100;
   public static final int HIGH_QOS = 200;
   public static final int SYSTEMTABLE_QOS = HIGH_QOS;
+  public static final int META_QOS = 300;
+
 
   /** Directory under /hbase where archived hfiles are stored */
   public static final String HFILE_ARCHIVE_DIRECTORY = "archive";
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index 3877aae..69bd040 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -60,10 +60,12 @@ public interface MetricsHBaseServerSource extends 
ExceptionTrackingSource {
   String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " +
 "parsed requests waiting in scheduler to be executed";
   String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue";
+  String METAPRIORITY_QUEUE_NAME = "numCallsInMetaPriorityQueue";
   String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue";
   String REPLICATION_QUEUE_DESC =
   "Number of calls in the replication call queue waiting to be run";
   String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue 
waiting to be run";
+  String METAPRIORITY_QUEUE_DESC = "Number of calls in the priority call queue 
waiting to be run";
   String WRITE_QUEUE_NAME = "numC

[hbase] branch branch-2 updated: HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 server(addendum)

2019-01-09 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new f59b99c  HBASE-21652 Refactor ThriftServer making thrift2 server 
inherited from thrift1 server(addendum)
f59b99c is described below

commit f59b99c48a6fad1f20584491fbe223a7799bb3d4
Author: Allan Yang 
AuthorDate: Thu Jan 10 11:09:26 2019 +0800

HBASE-21652 Refactor ThriftServer making thrift2 server inherited from 
thrift1 server(addendum)
---
 .../apache/hadoop/hbase/thrift/ThriftMetrics.java  |  6 +--
 .../apache/hadoop/hbase/thrift/ThriftServer.java   | 44 --
 .../hbase/thrift2/ThriftHBaseServiceHandler.java   |  2 +-
 .../apache/hadoop/hbase/thrift2/ThriftServer.java  |  6 +++
 4 files changed, 43 insertions(+), 15 deletions(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java
index 1009210..ddd7072 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java
@@ -56,8 +56,8 @@ public class ThriftMetrics  {
 this.source = source;
   }
 
-  private MetricsThriftServerSource source;
-  private final long slowResponseTime;
+  protected MetricsThriftServerSource source;
+  protected final long slowResponseTime;
   public static final String SLOW_RESPONSE_NANO_SEC =
 "hbase.thrift.slow.response.nano.second";
   public static final long DEFAULT_SLOW_RESPONSE_NANO_SEC = 10 * 1000 * 1000;
@@ -147,7 +147,7 @@ public class ThriftMetrics  {
 }
   }
 
-  private static Throwable unwrap(Throwable t) {
+  protected static Throwable unwrap(Throwable t) {
 if (t == null) {
   return t;
 }
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
index adbed91..1971f44 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
@@ -196,6 +196,10 @@ public class ThriftServer  extends Configured implements 
Tool {
 this.conf = HBaseConfiguration.create(conf);
   }
 
+  protected ThriftMetrics createThriftMetrics(Configuration conf) {
+return new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.ONE);
+  }
+
   protected void setupParamters() throws IOException {
 // login the server principal (if using secure Hadoop)
 UserProvider userProvider = UserProvider.instantiate(conf);
@@ -210,7 +214,7 @@ public class ThriftServer  extends Configured implements 
Tool {
 this.serviceUGI = userProvider.getCurrent().getUGI();
 
 this.listenPort = conf.getInt(PORT_CONF_KEY, DEFAULT_LISTEN_PORT);
-this.metrics = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.ONE);
+this.metrics = createThriftMetrics(conf);
 this.pauseMonitor = new JvmPauseMonitor(conf, this.metrics.getSource());
 this.hbaseServiceHandler = createHandler(conf, userProvider);
 this.hbaseServiceHandler.initMetrics(metrics);
@@ -278,11 +282,19 @@ public class ThriftServer  extends Configured implements 
Tool {
 HbaseHandlerMetricsProxy.newInstance((Hbase.Iface) 
hbaseServiceHandler, metrics, conf));
   }
 
+  /**
+   * the thrift server, not null means the server is started, for test only
+   * @return the tServer
+   */
   @VisibleForTesting
   public TServer getTserver() {
 return tserver;
   }
 
+  /**
+   * the Jetty server, not null means the HTTP server is started, for test only
+   * @return the http server
+   */
   @VisibleForTesting
   public Server getHttpServer() {
 return httpServer;
@@ -301,14 +313,24 @@ public class ThriftServer  extends Configured implements 
Tool {
   }
 
   /**
+   * Create a Servlet for the http server
+   * @param protocolFactory protocolFactory
+   * @return the servlet
+   * @throws IOException IOException
+   */
+  protected TServlet createTServlet(TProtocolFactory protocolFactory) throws 
IOException {
+return new ThriftHttpServlet(processor, protocolFactory, serviceUGI,
+conf, hbaseServiceHandler, securityEnabled, doAsEnabled);
+  }
+
+  /**
* Setup a HTTP Server using Jetty to serve calls from THttpClient
*
* @throws IOException IOException
*/
   protected void setupHTTPServer() throws IOException {
 TProtocolFactory protocolFactory = new TBinaryProtocol.Factory();
-TServlet thriftHttpServlet = new ThriftHttpServlet(processor, 
protocolFactory, serviceUGI,
-conf, hbaseServiceHandler, securityEnabled, doAsEnabled);
+TServlet thriftHttpServlet = createTServlet(protocolFactory);
 
 // Set the default max thread number to 100 to limit
 // 

[hbase] branch master updated: HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 server(addendum)

2019-01-09 Thread allan163
This is an automated email from the ASF dual-hosted git repository.

allan163 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
 new 52bc6db  HBASE-21652 Refactor ThriftServer making thrift2 server 
inherited from thrift1 server(addendum)
52bc6db is described below

commit 52bc6db0502cd1e37a55fa97fe5ab32232b787cc
Author: Allan Yang 
AuthorDate: Thu Jan 10 11:03:46 2019 +0800

HBASE-21652 Refactor ThriftServer making thrift2 server inherited from 
thrift1 server(addendum)
---
 .../apache/hadoop/hbase/thrift/ThriftMetrics.java  |  6 +--
 .../apache/hadoop/hbase/thrift/ThriftServer.java   | 44 --
 .../hbase/thrift2/ThriftHBaseServiceHandler.java   |  2 +-
 .../apache/hadoop/hbase/thrift2/ThriftServer.java  |  6 +++
 4 files changed, 43 insertions(+), 15 deletions(-)

diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java
index f612eeb..8c4c6f0 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftMetrics.java
@@ -56,8 +56,8 @@ public class ThriftMetrics  {
 this.source = source;
   }
 
-  private MetricsThriftServerSource source;
-  private final long slowResponseTime;
+  protected MetricsThriftServerSource source;
+  protected final long slowResponseTime;
   public static final String SLOW_RESPONSE_NANO_SEC =
 "hbase.thrift.slow.response.nano.second";
   public static final long DEFAULT_SLOW_RESPONSE_NANO_SEC = 10 * 1000 * 1000;
@@ -149,7 +149,7 @@ public class ThriftMetrics  {
 }
   }
 
-  private static Throwable unwrap(Throwable t) {
+  protected static Throwable unwrap(Throwable t) {
 if (t == null) {
   return t;
 }
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
index 830ce52..598b306 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
@@ -196,6 +196,10 @@ public class ThriftServer  extends Configured implements 
Tool {
 this.conf = HBaseConfiguration.create(conf);
   }
 
+  protected ThriftMetrics createThriftMetrics(Configuration conf) {
+return new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.ONE);
+  }
+
   protected void setupParamters() throws IOException {
 // login the server principal (if using secure Hadoop)
 UserProvider userProvider = UserProvider.instantiate(conf);
@@ -210,7 +214,7 @@ public class ThriftServer  extends Configured implements 
Tool {
 this.serviceUGI = userProvider.getCurrent().getUGI();
 
 this.listenPort = conf.getInt(PORT_CONF_KEY, DEFAULT_LISTEN_PORT);
-this.metrics = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.ONE);
+this.metrics = createThriftMetrics(conf);
 this.pauseMonitor = new JvmPauseMonitor(conf, this.metrics.getSource());
 this.hbaseServiceHandler = createHandler(conf, userProvider);
 this.hbaseServiceHandler.initMetrics(metrics);
@@ -278,11 +282,19 @@ public class ThriftServer  extends Configured implements 
Tool {
 HbaseHandlerMetricsProxy.newInstance((Hbase.Iface) 
hbaseServiceHandler, metrics, conf));
   }
 
+  /**
+   * the thrift server, not null means the server is started, for test only
+   * @return the tServer
+   */
   @VisibleForTesting
   public TServer getTserver() {
 return tserver;
   }
 
+  /**
+   * the Jetty server, not null means the HTTP server is started, for test only
+   * @return the http server
+   */
   @VisibleForTesting
   public Server getHttpServer() {
 return httpServer;
@@ -301,14 +313,24 @@ public class ThriftServer  extends Configured implements 
Tool {
   }
 
   /**
+   * Create a Servlet for the http server
+   * @param protocolFactory protocolFactory
+   * @return the servlet
+   * @throws IOException IOException
+   */
+  protected TServlet createTServlet(TProtocolFactory protocolFactory) throws 
IOException {
+return new ThriftHttpServlet(processor, protocolFactory, serviceUGI,
+conf, hbaseServiceHandler, securityEnabled, doAsEnabled);
+  }
+
+  /**
* Setup a HTTP Server using Jetty to serve calls from THttpClient
*
* @throws IOException IOException
*/
   protected void setupHTTPServer() throws IOException {
 TProtocolFactory protocolFactory = new TBinaryProtocol.Factory();
-TServlet thriftHttpServlet = new ThriftHttpServlet(processor, 
protocolFactory, serviceUGI,
-conf, hbaseServiceHandler, securityEnabled, doAsEnabled);
+TServlet thriftHttpServlet = createTServlet(protocolFactory);
 
 // Set the default max thread number to 100 to limit
 // the number of 

[1/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-09 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 26e02e1c0 -> 891c620c5


http://git-wip-us.apache.org/repos/asf/hbase/blob/891c620c/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
new file mode 100644
index 000..1583619
--- /dev/null
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -0,0 +1,841 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift2;
+
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_PORT;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.filter.ColumnValueFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RestTests;
+import org.apache.hadoop.hbase.thrift.Constants;
+import org.apache.hadoop.hbase.thrift2.client.ThriftConnection;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Category({ RestTests.class, MediumTests.class})
+
+public class TestThriftConnection {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestThriftConnection.class);
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestThriftConnection.class);
+
+  private static final byte[] FAMILYA = Bytes.toBytes("fa");
+  private static final byte[] FAMILYB = Bytes.toBytes("fb");
+  private static final byte[] FAMILYC = Bytes.toBytes("fc");
+  private static final byte[] FAMILYD = Bytes.toBytes("fd");
+
+  private static final byte[] ROW_1 = Bytes.toBytes("testrow1");
+  private static final byte[] ROW_2 = Bytes.toBytes("testrow2");
+  private static final byte[] ROW_3 = Bytes.toBytes("testrow3");
+  private static final byte[] ROW_4 = Bytes.toBytes("testrow4");
+
+  private static final byte[] QUALIFIER_1 = Bytes.toBytes("1");
+  private static final byte[] QUALIFIER_2 = Bytes.toBytes("2");
+  

[3/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-09 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/891c620c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
new file mode 100644
index 000..f6eb993
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
@@ -0,0 +1,1389 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift2.client;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Future;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang3.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CacheEvictionStats;
+import org.apache.hadoop.hbase.ClusterMetrics;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.CompactType;
+import org.apache.hadoop.hbase.client.CompactionState;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.SnapshotType;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.replication.TableCFs;
+import org.apache.hadoop.hbase.client.security.SecurityCapability;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.quotas.QuotaFilter;
+import org.apache.hadoop.hbase.quotas.QuotaRetriever;
+import org.apache.hadoop.hbase.quotas.QuotaSettings;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.thrift2.ThriftUtilities;
+import org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
+import org.apache.hadoop.hbase.thrift2.generated.TNamespaceDescriptor;
+import org.apache.hadoop.hbase.thrift2.generated.TTableDescriptor;
+import org.apache.hadoop.hbase.thrift2.generated.TTableName;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.TTransport;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class ThriftAdmin implements Admin {
+
+  private THBaseService.Client client;
+  private TTransport transport;
+  private int operationTimeout;
+  private Configuration conf;
+
+
+  public ThriftAdmin(THBaseService.Client client, TTransport tTransport, 
Configuration conf) {
+this.client = client;
+this.transport = tTransport;
+this.operationTimeout = 
conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+this.conf = conf;
+  }
+
+  @Override
+  public int getOperationTimeout() {
+return operationTimeout;
+  }
+
+  @Override
+  public void abort(String why, Throwable e) {
+
+  }
+
+  @Override
+  public boolean isAborted() {
+return false;
+  }
+
+  @Override
+  public void close() throws IOException {
+transport.close();
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+return conf;
+  }
+
+  @Override
+  public boolean tableExists(TableName tableName) 

[4/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-09 Thread allan163
HBASE-21661 Provide Thrift2 implementation of Table/Admin


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/891c620c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/891c620c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/891c620c

Branch: refs/heads/branch-2
Commit: 891c620c5b13aed46f26b562465635d31cbfc655
Parents: 26e02e1
Author: Allan Yang 
Authored: Wed Jan 9 16:01:22 2019 +0800
Committer: Allan Yang 
Committed: Wed Jan 9 16:01:22 2019 +0800

--
 .../apache/hadoop/hbase/thrift/Constants.java   |8 +
 .../hadoop/hbase/thrift/ThriftServer.java   |   11 +
 .../hbase/thrift/generated/AlreadyExists.java   |2 +-
 .../hbase/thrift/generated/BatchMutation.java   |2 +-
 .../thrift/generated/ColumnDescriptor.java  |2 +-
 .../hadoop/hbase/thrift/generated/Hbase.java|2 +-
 .../hadoop/hbase/thrift/generated/IOError.java  |2 +-
 .../hbase/thrift/generated/IllegalArgument.java |2 +-
 .../hadoop/hbase/thrift/generated/Mutation.java |2 +-
 .../hadoop/hbase/thrift/generated/TAppend.java  |2 +-
 .../hadoop/hbase/thrift/generated/TCell.java|2 +-
 .../hadoop/hbase/thrift/generated/TColumn.java  |2 +-
 .../hbase/thrift/generated/TIncrement.java  |2 +-
 .../hbase/thrift/generated/TRegionInfo.java |2 +-
 .../hbase/thrift/generated/TRowResult.java  |2 +-
 .../hadoop/hbase/thrift/generated/TScan.java|2 +-
 .../thrift2/ThriftHBaseServiceHandler.java  |4 +-
 .../hadoop/hbase/thrift2/ThriftUtilities.java   |  437 ++
 .../hbase/thrift2/client/ThriftAdmin.java   | 1389 ++
 .../thrift2/client/ThriftClientBuilder.java |   37 +
 .../hbase/thrift2/client/ThriftConnection.java  |  322 
 .../hbase/thrift2/client/ThriftTable.java   |  492 +++
 .../hadoop/hbase/thrift2/generated/TAppend.java |2 +-
 .../hbase/thrift2/generated/TAuthorization.java |2 +-
 .../thrift2/generated/TCellVisibility.java  |2 +-
 .../hadoop/hbase/thrift2/generated/TColumn.java |2 +-
 .../generated/TColumnFamilyDescriptor.java  |2 +-
 .../thrift2/generated/TColumnIncrement.java |2 +-
 .../hbase/thrift2/generated/TColumnValue.java   |2 +-
 .../hadoop/hbase/thrift2/generated/TDelete.java |2 +-
 .../hadoop/hbase/thrift2/generated/TGet.java|  127 +-
 .../hbase/thrift2/generated/THBaseService.java  |  122 +-
 .../hbase/thrift2/generated/THRegionInfo.java   |2 +-
 .../thrift2/generated/THRegionLocation.java |2 +-
 .../hbase/thrift2/generated/TIOError.java   |2 +-
 .../thrift2/generated/TIllegalArgument.java |2 +-
 .../hbase/thrift2/generated/TIncrement.java |2 +-
 .../thrift2/generated/TNamespaceDescriptor.java |2 +-
 .../hadoop/hbase/thrift2/generated/TPut.java|2 +-
 .../hadoop/hbase/thrift2/generated/TResult.java |2 +-
 .../hbase/thrift2/generated/TRowMutations.java  |2 +-
 .../hadoop/hbase/thrift2/generated/TScan.java   |  127 +-
 .../hbase/thrift2/generated/TServerName.java|2 +-
 .../thrift2/generated/TTableDescriptor.java |2 +-
 .../hbase/thrift2/generated/TTableName.java |   50 +-
 .../hbase/thrift2/generated/TTimeRange.java |2 +-
 .../apache/hadoop/hbase/thrift2/hbase.thrift|   13 +-
 .../hbase/thrift2/TestThriftConnection.java |  841 +++
 48 files changed, 3930 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/891c620c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
index 8e3d004..55f2499 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
@@ -144,8 +144,16 @@ public final class Constants {
   public static final String THRIFT_READONLY_ENABLED = "hbase.thrift.readonly";
   public static final boolean THRIFT_READONLY_ENABLED_DEFAULT = false;
 
+  public static final String HBASE_THRIFT_CLIENT_SCANNER_CACHING =
+  "hbase.thrift.client.scanner.caching";
 
+  public static final int HBASE_THRIFT_CLIENT_SCANNER_CACHING_DEFAULT = 20;
 
+  public static final String HBASE_THRIFT_SERVER_NAME = 
"hbase.thrift.server.name";
+  public static final String HBASE_THRIFT_SERVER_PORT = 
"hbase.thrift.server.port";
+
+  public static final String HBASE_THRIFT_CLIENT_BUIDLER_CLASS =
+  "hbase.thrift.client.builder.class";
 
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/891c620c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java

[2/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-09 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/891c620c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
index b38d936..676275a 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
@@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
  * If you specify a time range and a timestamp the range is ignored.
  * Timestamps on TColumns are ignored.
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2019-01-03")
 public class TGet implements org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TGet");
 
@@ -65,6 +65,7 @@ public class TGet implements org.apache.thrift.TBase, java.i
   private static final org.apache.thrift.protocol.TField 
STORE_LIMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("storeLimit", 
org.apache.thrift.protocol.TType.I32, (short)12);
   private static final org.apache.thrift.protocol.TField 
STORE_OFFSET_FIELD_DESC = new org.apache.thrift.protocol.TField("storeOffset", 
org.apache.thrift.protocol.TType.I32, (short)13);
   private static final org.apache.thrift.protocol.TField 
EXISTENCE_ONLY_FIELD_DESC = new 
org.apache.thrift.protocol.TField("existence_only", 
org.apache.thrift.protocol.TType.BOOL, (short)14);
+  private static final org.apache.thrift.protocol.TField 
FILTER_BYTES_FIELD_DESC = new org.apache.thrift.protocol.TField("filterBytes", 
org.apache.thrift.protocol.TType.STRING, (short)15);
 
   private static final Map, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
   static {
@@ -90,6 +91,7 @@ public class TGet implements org.apache.thrift.TBase, java.i
   public int storeLimit; // optional
   public int storeOffset; // optional
   public boolean existence_only; // optional
+  public ByteBuffer filterBytes; // optional
 
   /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -110,7 +112,8 @@ public class TGet implements org.apache.thrift.TBase, java.i
 CACHE_BLOCKS((short)11, "cacheBlocks"),
 STORE_LIMIT((short)12, "storeLimit"),
 STORE_OFFSET((short)13, "storeOffset"),
-EXISTENCE_ONLY((short)14, "existence_only");
+EXISTENCE_ONLY((short)14, "existence_only"),
+FILTER_BYTES((short)15, "filterBytes");
 
 private static final Map byName = new HashMap();
 
@@ -153,6 +156,8 @@ public class TGet implements org.apache.thrift.TBase, java.i
   return STORE_OFFSET;
 case 14: // EXISTENCE_ONLY
   return EXISTENCE_ONLY;
+case 15: // FILTER_BYTES
+  return FILTER_BYTES;
 default:
   return null;
   }
@@ -201,7 +206,7 @@ public class TGet implements org.apache.thrift.TBase, java.i
   private static final int __STOREOFFSET_ISSET_ID = 5;
   private static final int __EXISTENCE_ONLY_ISSET_ID = 6;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = 
{_Fields.COLUMNS,_Fields.TIMESTAMP,_Fields.TIME_RANGE,_Fields.MAX_VERSIONS,_Fields.FILTER_STRING,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.CONSISTENCY,_Fields.TARGET_REPLICA_ID,_Fields.CACHE_BLOCKS,_Fields.STORE_LIMIT,_Fields.STORE_OFFSET,_Fields.EXISTENCE_ONLY};
+  private static final _Fields optionals[] = 
{_Fields.COLUMNS,_Fields.TIMESTAMP,_Fields.TIME_RANGE,_Fields.MAX_VERSIONS,_Fields.FILTER_STRING,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.CONSISTENCY,_Fields.TARGET_REPLICA_ID,_Fields.CACHE_BLOCKS,_Fields.STORE_LIMIT,_Fields.STORE_OFFSET,_Fields.EXISTENCE_ONLY,_Fields.FILTER_BYTES};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -236,6 +241,8 @@ public class TGet implements org.apache.thrift.TBase, java.i
 new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
 tmpMap.put(_Fields.EXISTENCE_ONLY, new 
org.apache.thrift.meta_data.FieldMetaData("existence_only", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
 new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+tmpMap.put(_Fields.FILTER_BYTES, new 
org.apache.thrift.meta_data.FieldMetaData("filterBytes", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+

[3/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-08 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/f053003c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
new file mode 100644
index 000..d45a6db4
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
@@ -0,0 +1,1405 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift2.client;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Future;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang3.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CacheEvictionStats;
+import org.apache.hadoop.hbase.ClusterMetrics;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.CompactType;
+import org.apache.hadoop.hbase.client.CompactionState;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.SnapshotType;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.replication.TableCFs;
+import org.apache.hadoop.hbase.client.security.SecurityCapability;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.quotas.QuotaFilter;
+import org.apache.hadoop.hbase.quotas.QuotaRetriever;
+import org.apache.hadoop.hbase.quotas.QuotaSettings;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.thrift2.ThriftUtilities;
+import org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
+import org.apache.hadoop.hbase.thrift2.generated.TNamespaceDescriptor;
+import org.apache.hadoop.hbase.thrift2.generated.TTableDescriptor;
+import org.apache.hadoop.hbase.thrift2.generated.TTableName;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.TTransport;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class ThriftAdmin implements Admin {
+
+  private THBaseService.Client client;
+  private TTransport transport;
+  private int operationTimeout;
+  private Configuration conf;
+
+
+  public ThriftAdmin(THBaseService.Client client, TTransport tTransport, 
Configuration conf) {
+this.client = client;
+this.transport = tTransport;
+this.operationTimeout = 
conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+this.conf = conf;
+  }
+
+  @Override
+  public int getOperationTimeout() {
+return operationTimeout;
+  }
+
+  @Override
+  public void abort(String why, Throwable e) {
+
+  }
+
+  @Override
+  public boolean isAborted() {
+return false;
+  }
+
+  @Override
+  public void close() throws IOException {
+transport.close();
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+return conf;
+  

[4/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-08 Thread allan163
HBASE-21661 Provide Thrift2 implementation of Table/Admin


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f053003c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f053003c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f053003c

Branch: refs/heads/master
Commit: f053003ce7e8d9c86b2ff762b646d69e5e04cfe2
Parents: 5c902b4
Author: Allan Yang 
Authored: Wed Jan 9 15:38:23 2019 +0800
Committer: Allan Yang 
Committed: Wed Jan 9 15:38:23 2019 +0800

--
 .../apache/hadoop/hbase/thrift/Constants.java   |8 +
 .../hadoop/hbase/thrift/ThriftServer.java   |   11 +
 .../hbase/thrift/generated/AlreadyExists.java   |2 +-
 .../hbase/thrift/generated/BatchMutation.java   |2 +-
 .../thrift/generated/ColumnDescriptor.java  |2 +-
 .../hadoop/hbase/thrift/generated/Hbase.java|2 +-
 .../hadoop/hbase/thrift/generated/IOError.java  |2 +-
 .../hbase/thrift/generated/IllegalArgument.java |2 +-
 .../hadoop/hbase/thrift/generated/Mutation.java |2 +-
 .../hadoop/hbase/thrift/generated/TAppend.java  |2 +-
 .../hadoop/hbase/thrift/generated/TCell.java|2 +-
 .../hadoop/hbase/thrift/generated/TColumn.java  |2 +-
 .../hbase/thrift/generated/TIncrement.java  |2 +-
 .../hbase/thrift/generated/TRegionInfo.java |2 +-
 .../hbase/thrift/generated/TRowResult.java  |2 +-
 .../hadoop/hbase/thrift/generated/TScan.java|2 +-
 .../thrift2/ThriftHBaseServiceHandler.java  |4 +-
 .../hadoop/hbase/thrift2/ThriftUtilities.java   |  437 ++
 .../hbase/thrift2/client/ThriftAdmin.java   | 1405 ++
 .../thrift2/client/ThriftClientBuilder.java |   37 +
 .../hbase/thrift2/client/ThriftConnection.java  |  322 
 .../hbase/thrift2/client/ThriftTable.java   |  492 ++
 .../hadoop/hbase/thrift2/generated/TAppend.java |2 +-
 .../hbase/thrift2/generated/TAuthorization.java |2 +-
 .../thrift2/generated/TCellVisibility.java  |2 +-
 .../hadoop/hbase/thrift2/generated/TColumn.java |2 +-
 .../generated/TColumnFamilyDescriptor.java  |2 +-
 .../thrift2/generated/TColumnIncrement.java |2 +-
 .../hbase/thrift2/generated/TColumnValue.java   |2 +-
 .../hadoop/hbase/thrift2/generated/TDelete.java |2 +-
 .../hadoop/hbase/thrift2/generated/TGet.java|  127 +-
 .../hbase/thrift2/generated/THBaseService.java  |  122 +-
 .../hbase/thrift2/generated/THRegionInfo.java   |2 +-
 .../thrift2/generated/THRegionLocation.java |2 +-
 .../hbase/thrift2/generated/TIOError.java   |2 +-
 .../thrift2/generated/TIllegalArgument.java |2 +-
 .../hbase/thrift2/generated/TIncrement.java |2 +-
 .../thrift2/generated/TNamespaceDescriptor.java |2 +-
 .../hadoop/hbase/thrift2/generated/TPut.java|2 +-
 .../hadoop/hbase/thrift2/generated/TResult.java |2 +-
 .../hbase/thrift2/generated/TRowMutations.java  |2 +-
 .../hadoop/hbase/thrift2/generated/TScan.java   |  127 +-
 .../hbase/thrift2/generated/TServerName.java|2 +-
 .../thrift2/generated/TTableDescriptor.java |2 +-
 .../hbase/thrift2/generated/TTableName.java |   50 +-
 .../hbase/thrift2/generated/TTimeRange.java |2 +-
 .../apache/hadoop/hbase/thrift2/hbase.thrift|   13 +-
 .../hbase/thrift2/TestThriftConnection.java |  841 +++
 48 files changed, 3946 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f053003c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
index 8e3d004..55f2499 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
@@ -144,8 +144,16 @@ public final class Constants {
   public static final String THRIFT_READONLY_ENABLED = "hbase.thrift.readonly";
   public static final boolean THRIFT_READONLY_ENABLED_DEFAULT = false;
 
+  public static final String HBASE_THRIFT_CLIENT_SCANNER_CACHING =
+  "hbase.thrift.client.scanner.caching";
 
+  public static final int HBASE_THRIFT_CLIENT_SCANNER_CACHING_DEFAULT = 20;
 
+  public static final String HBASE_THRIFT_SERVER_NAME = 
"hbase.thrift.server.name";
+  public static final String HBASE_THRIFT_SERVER_PORT = 
"hbase.thrift.server.port";
+
+  public static final String HBASE_THRIFT_CLIENT_BUIDLER_CLASS =
+  "hbase.thrift.client.builder.class";
 
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f053003c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java

[1/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-08 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 5c902b48e -> f053003ce


http://git-wip-us.apache.org/repos/asf/hbase/blob/f053003c/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
new file mode 100644
index 000..1583619
--- /dev/null
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -0,0 +1,841 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift2;
+
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_PORT;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.filter.ColumnValueFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RestTests;
+import org.apache.hadoop.hbase.thrift.Constants;
+import org.apache.hadoop.hbase.thrift2.client.ThriftConnection;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Category({ RestTests.class, MediumTests.class})
+
+public class TestThriftConnection {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestThriftConnection.class);
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestThriftConnection.class);
+
+  private static final byte[] FAMILYA = Bytes.toBytes("fa");
+  private static final byte[] FAMILYB = Bytes.toBytes("fb");
+  private static final byte[] FAMILYC = Bytes.toBytes("fc");
+  private static final byte[] FAMILYD = Bytes.toBytes("fd");
+
+  private static final byte[] ROW_1 = Bytes.toBytes("testrow1");
+  private static final byte[] ROW_2 = Bytes.toBytes("testrow2");
+  private static final byte[] ROW_3 = Bytes.toBytes("testrow3");
+  private static final byte[] ROW_4 = Bytes.toBytes("testrow4");
+
+  private static final byte[] QUALIFIER_1 = Bytes.toBytes("1");
+  private static final byte[] QUALIFIER_2 = Bytes.toBytes("2");
+  

[2/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-08 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/f053003c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
index b38d936..676275a 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
@@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
  * If you specify a time range and a timestamp the range is ignored.
  * Timestamps on TColumns are ignored.
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2019-01-03")
 public class TGet implements org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TGet");
 
@@ -65,6 +65,7 @@ public class TGet implements org.apache.thrift.TBase, java.i
   private static final org.apache.thrift.protocol.TField 
STORE_LIMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("storeLimit", 
org.apache.thrift.protocol.TType.I32, (short)12);
   private static final org.apache.thrift.protocol.TField 
STORE_OFFSET_FIELD_DESC = new org.apache.thrift.protocol.TField("storeOffset", 
org.apache.thrift.protocol.TType.I32, (short)13);
   private static final org.apache.thrift.protocol.TField 
EXISTENCE_ONLY_FIELD_DESC = new 
org.apache.thrift.protocol.TField("existence_only", 
org.apache.thrift.protocol.TType.BOOL, (short)14);
+  private static final org.apache.thrift.protocol.TField 
FILTER_BYTES_FIELD_DESC = new org.apache.thrift.protocol.TField("filterBytes", 
org.apache.thrift.protocol.TType.STRING, (short)15);
 
   private static final Map, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
   static {
@@ -90,6 +91,7 @@ public class TGet implements org.apache.thrift.TBase, java.i
   public int storeLimit; // optional
   public int storeOffset; // optional
   public boolean existence_only; // optional
+  public ByteBuffer filterBytes; // optional
 
   /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -110,7 +112,8 @@ public class TGet implements org.apache.thrift.TBase, java.i
 CACHE_BLOCKS((short)11, "cacheBlocks"),
 STORE_LIMIT((short)12, "storeLimit"),
 STORE_OFFSET((short)13, "storeOffset"),
-EXISTENCE_ONLY((short)14, "existence_only");
+EXISTENCE_ONLY((short)14, "existence_only"),
+FILTER_BYTES((short)15, "filterBytes");
 
 private static final Map byName = new HashMap();
 
@@ -153,6 +156,8 @@ public class TGet implements org.apache.thrift.TBase, java.i
   return STORE_OFFSET;
 case 14: // EXISTENCE_ONLY
   return EXISTENCE_ONLY;
+case 15: // FILTER_BYTES
+  return FILTER_BYTES;
 default:
   return null;
   }
@@ -201,7 +206,7 @@ public class TGet implements org.apache.thrift.TBase, java.i
   private static final int __STOREOFFSET_ISSET_ID = 5;
   private static final int __EXISTENCE_ONLY_ISSET_ID = 6;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = 
{_Fields.COLUMNS,_Fields.TIMESTAMP,_Fields.TIME_RANGE,_Fields.MAX_VERSIONS,_Fields.FILTER_STRING,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.CONSISTENCY,_Fields.TARGET_REPLICA_ID,_Fields.CACHE_BLOCKS,_Fields.STORE_LIMIT,_Fields.STORE_OFFSET,_Fields.EXISTENCE_ONLY};
+  private static final _Fields optionals[] = 
{_Fields.COLUMNS,_Fields.TIMESTAMP,_Fields.TIME_RANGE,_Fields.MAX_VERSIONS,_Fields.FILTER_STRING,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.CONSISTENCY,_Fields.TARGET_REPLICA_ID,_Fields.CACHE_BLOCKS,_Fields.STORE_LIMIT,_Fields.STORE_OFFSET,_Fields.EXISTENCE_ONLY,_Fields.FILTER_BYTES};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -236,6 +241,8 @@ public class TGet implements org.apache.thrift.TBase, java.i
 new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
 tmpMap.put(_Fields.EXISTENCE_ONLY, new 
org.apache.thrift.meta_data.FieldMetaData("existence_only", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
 new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+tmpMap.put(_Fields.FILTER_BYTES, new 
org.apache.thrift.meta_data.FieldMetaData("filterBytes", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+

[4/4] hbase git commit: HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 server

2019-01-02 Thread allan163
HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 
server


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2d8d74c6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2d8d74c6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2d8d74c6

Branch: refs/heads/branch-2
Commit: 2d8d74c64db6dd1894fd8f6e409f3e7fae426d11
Parents: 0b08608
Author: Allan Yang 
Authored: Wed Jan 2 17:34:31 2019 +0800
Committer: Allan Yang 
Committed: Wed Jan 2 17:34:31 2019 +0800

--
 .../apache/hadoop/hbase/thrift/Constants.java   |  151 ++
 .../hbase/thrift/HBaseServiceHandler.java   |   90 +
 .../hbase/thrift/HbaseHandlerMetricsProxy.java  |   20 +-
 .../apache/hadoop/hbase/thrift/ImplType.java|  143 ++
 .../hadoop/hbase/thrift/IncrementCoalescer.java |6 +-
 .../hbase/thrift/ThriftHBaseServiceHandler.java | 1347 
 .../hadoop/hbase/thrift/ThriftHttpServlet.java  |   12 +-
 .../hadoop/hbase/thrift/ThriftServer.java   |  709 +-
 .../hadoop/hbase/thrift/ThriftServerRunner.java | 2026 --
 .../thrift2/ThriftHBaseServiceHandler.java  |   69 +-
 .../hadoop/hbase/thrift2/ThriftServer.java  |  581 +
 .../resources/hbase-webapps/thrift/thrift.jsp   |2 +-
 .../hbase/thrift/TestThriftHttpServer.java  |   28 +-
 .../hadoop/hbase/thrift/TestThriftServer.java   |   58 +-
 .../hbase/thrift/TestThriftServerCmdLine.java   |   48 +-
 .../thrift/TestThriftSpnegoHttpServer.java  |   21 +-
 .../hbase/thrift2/TestThrift2HttpServer.java|   90 +
 .../hbase/thrift2/TestThrift2ServerCmdLine.java |   99 +
 .../thrift2/TestThriftHBaseServiceHandler.java  |   15 +-
 19 files changed, 2717 insertions(+), 2798 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2d8d74c6/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
new file mode 100644
index 000..8e3d004
--- /dev/null
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Thrift related constants
+ */
+@InterfaceAudience.Private
+public final class Constants {
+  private Constants(){}
+
+  public static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k
+
+  public static final String SERVER_TYPE_CONF_KEY =
+  "hbase.regionserver.thrift.server.type";
+
+  public static final String COMPACT_CONF_KEY = 
"hbase.regionserver.thrift.compact";
+  public static final boolean COMPACT_CONF_DEFAULT = false;
+
+  public static final String FRAMED_CONF_KEY = 
"hbase.regionserver.thrift.framed";
+  public static final boolean FRAMED_CONF_DEFAULT = false;
+
+  public static final String MAX_FRAME_SIZE_CONF_KEY =
+  "hbase.regionserver.thrift.framed.max_frame_size_in_mb";
+  public static final int MAX_FRAME_SIZE_CONF_DEFAULT = 2;
+
+  public static final String COALESCE_INC_KEY = 
"hbase.regionserver.thrift.coalesceIncrement";
+  public static final String USE_HTTP_CONF_KEY = 
"hbase.regionserver.thrift.http";
+
+  public static final String HTTP_MIN_THREADS_KEY = 
"hbase.thrift.http_threads.min";
+  public static final int HTTP_MIN_THREADS_KEY_DEFAULT = 2;
+
+  public static final String HTTP_MAX_THREADS_KEY = 
"hbase.thrift.http_threads.max";
+  public static final int HTTP_MAX_THREADS_KEY_DEFAULT = 100;
+
+  // ssl related configs
+  public static final String THRIFT_SSL_ENABLED_KEY = 
"hbase.thrift.ssl.enabled";
+  public static final String THRIFT_SSL_KEYSTORE_STORE_KEY = 
"hbase.thrift.ssl.keystore.store";
+  public static final String THRIFT_SSL_KEYSTORE_PASSWORD_KEY =
+  "hbase.thrift.ssl.keystore.password";
+  public static final String 

[3/4] hbase git commit: HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 server

2019-01-02 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/2d8d74c6/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
index 8dadd49..d5e75b8 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
@@ -18,16 +18,132 @@
 
 package org.apache.hadoop.hbase.thrift;
 
+import static org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_DEAFULT;
+import static org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.BIND_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.BIND_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.COMPACT_CONF_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.COMPACT_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.COMPACT_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.DEFAULT_BIND_ADDR;
+import static 
org.apache.hadoop.hbase.thrift.Constants.DEFAULT_HTTP_MAX_HEADER_SIZE;
+import static org.apache.hadoop.hbase.thrift.Constants.DEFAULT_LISTEN_PORT;
+import static org.apache.hadoop.hbase.thrift.Constants.FRAMED_CONF_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.FRAMED_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.FRAMED_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.HTTP_MAX_THREADS_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.HTTP_MAX_THREADS_KEY_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.HTTP_MIN_THREADS_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.HTTP_MIN_THREADS_KEY_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.INFOPORT_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.KEEP_ALIVE_SEC_OPTION;
+import static 
org.apache.hadoop.hbase.thrift.Constants.MAX_FRAME_SIZE_CONF_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.MAX_FRAME_SIZE_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.MAX_QUEUE_SIZE_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.MAX_WORKERS_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.MIN_WORKERS_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.PORT_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.PORT_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.READ_TIMEOUT_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.SELECTOR_NUM_OPTION;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_DNS_INTERFACE_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_DNS_NAMESERVER_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_FILTERS;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_HTTP_ALLOW_OPTIONS_METHOD;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_BINDING_ADDRESS;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_BINDING_ADDRESS_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_PORT;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_PORT_DEFAULT;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_QOP_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_SELECTOR_NUM;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_ENABLED_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_EXCLUDE_CIPHER_SUITES_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_EXCLUDE_PROTOCOLS_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_INCLUDE_CIPHER_SUITES_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_INCLUDE_PROTOCOLS_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_KEYPASSWORD_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_PASSWORD_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_STORE_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SUPPORT_PROXYUSER_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.USE_HTTP_CONF_KEY;
+
+import 

[1/4] hbase git commit: HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 server

2019-01-02 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 0b086087b -> 2d8d74c64


http://git-wip-us.apache.org/repos/asf/hbase/blob/2d8d74c6/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
index 7a611c9..fa3d39d 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
@@ -18,351 +18,86 @@
  */
 package org.apache.hadoop.hbase.thrift2;
 
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.security.PrivilegedAction;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
+import static org.apache.hadoop.hbase.thrift.Constants.READONLY_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_READONLY_ENABLED;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_READONLY_ENABLED_DEFAULT;
 
-import javax.security.auth.callback.Callback;
-import javax.security.auth.callback.UnsupportedCallbackException;
-import javax.security.sasl.AuthorizeCallback;
-import javax.security.sasl.SaslServer;
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.http.InfoServer;
-import org.apache.hadoop.hbase.security.SaslUtil;
-import org.apache.hadoop.hbase.security.SecurityUtil;
 import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.thrift.CallQueue;
-import org.apache.hadoop.hbase.thrift.THBaseThreadPoolExecutor;
-import org.apache.hadoop.hbase.thrift.ThriftMetrics;
+import org.apache.hadoop.hbase.thrift.HBaseServiceHandler;
+import org.apache.hadoop.hbase.thrift.HbaseHandlerMetricsProxy;
 import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
-import org.apache.hadoop.hbase.util.DNS;
-import org.apache.hadoop.hbase.util.JvmPauseMonitor;
-import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.thrift.TException;
 import org.apache.thrift.TProcessor;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.protocol.TProtocolFactory;
-import org.apache.thrift.server.THsHaServer;
-import org.apache.thrift.server.TNonblockingServer;
-import org.apache.thrift.server.TServer;
-import org.apache.thrift.server.TThreadPoolServer;
-import org.apache.thrift.server.TThreadedSelectorServer;
-import org.apache.thrift.transport.TFramedTransport;
-import org.apache.thrift.transport.TNonblockingServerSocket;
-import org.apache.thrift.transport.TNonblockingServerTransport;
-import org.apache.thrift.transport.TSaslServerTransport;
-import org.apache.thrift.transport.TServerSocket;
-import org.apache.thrift.transport.TServerTransport;
-import org.apache.thrift.transport.TTransportException;
-import org.apache.thrift.transport.TTransportFactory;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
-import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser;
-import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
-import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
-import org.apache.hbase.thirdparty.org.apache.commons.cli.OptionGroup;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.Options;
-import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException;
 
 /**
- * ThriftServer - this class starts up a Thrift server which implements the 
HBase API specified in the
- * HbaseClient.thrift IDL file.
+ * ThriftServer - this class starts up a Thrift server which implements the 
HBase API specified in
+ * the HbaseClient.thrift IDL file.
  */
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = 

[2/4] hbase git commit: HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 server

2019-01-02 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/2d8d74c6/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
deleted file mode 100644
index b510ff5..000
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
+++ /dev/null
@@ -1,2026 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift;
-
-import static org.apache.hadoop.hbase.util.Bytes.getBytes;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.nio.ByteBuffer;
-import java.security.PrivilegedAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import javax.security.auth.callback.Callback;
-import javax.security.auth.callback.UnsupportedCallbackException;
-import javax.security.sasl.AuthorizeCallback;
-import javax.security.sasl.SaslServer;
-
-import org.apache.commons.lang3.ArrayUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell.Type;
-import org.apache.hadoop.hbase.CellBuilder;
-import org.apache.hadoop.hbase.CellBuilderFactory;
-import org.apache.hadoop.hbase.CellBuilderType;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.OperationWithAttributes;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.filter.PrefixFilter;
-import org.apache.hadoop.hbase.filter.WhileMatchFilter;
-import org.apache.hadoop.hbase.http.HttpServerUtil;
-import org.apache.hadoop.hbase.log.HBaseMarkers;
-import org.apache.hadoop.hbase.security.SaslUtil;
-import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
-import org.apache.hadoop.hbase.security.SecurityUtil;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
-import org.apache.hadoop.hbase.thrift.generated.BatchMutation;
-import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
-import org.apache.hadoop.hbase.thrift.generated.Hbase;
-import org.apache.hadoop.hbase.thrift.generated.IOError;
-import org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
-import org.apache.hadoop.hbase.thrift.generated.Mutation;
-import org.apache.hadoop.hbase.thrift.generated.TAppend;
-import org.apache.hadoop.hbase.thrift.generated.TCell;
-import org.apache.hadoop.hbase.thrift.generated.TIncrement;
-import 

[3/4] hbase git commit: HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 server

2019-01-02 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/e4b6b4af/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
index fc00327..6d11ac6 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
@@ -18,16 +18,132 @@
 
 package org.apache.hadoop.hbase.thrift;
 
+import static org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_DEAFULT;
+import static org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.BIND_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.BIND_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.COMPACT_CONF_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.COMPACT_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.COMPACT_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.DEFAULT_BIND_ADDR;
+import static 
org.apache.hadoop.hbase.thrift.Constants.DEFAULT_HTTP_MAX_HEADER_SIZE;
+import static org.apache.hadoop.hbase.thrift.Constants.DEFAULT_LISTEN_PORT;
+import static org.apache.hadoop.hbase.thrift.Constants.FRAMED_CONF_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.FRAMED_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.FRAMED_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.HTTP_MAX_THREADS_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.HTTP_MAX_THREADS_KEY_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.HTTP_MIN_THREADS_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.HTTP_MIN_THREADS_KEY_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.INFOPORT_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.KEEP_ALIVE_SEC_OPTION;
+import static 
org.apache.hadoop.hbase.thrift.Constants.MAX_FRAME_SIZE_CONF_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.MAX_FRAME_SIZE_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.MAX_QUEUE_SIZE_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.MAX_WORKERS_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.MIN_WORKERS_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.PORT_CONF_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.PORT_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.READ_TIMEOUT_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.SELECTOR_NUM_OPTION;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_DNS_INTERFACE_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_DNS_NAMESERVER_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_FILTERS;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_HTTP_ALLOW_OPTIONS_METHOD;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_BINDING_ADDRESS;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_BINDING_ADDRESS_DEFAULT;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_PORT;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_PORT_DEFAULT;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_QOP_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_SELECTOR_NUM;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_ENABLED_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_EXCLUDE_CIPHER_SUITES_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_EXCLUDE_PROTOCOLS_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_INCLUDE_CIPHER_SUITES_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_INCLUDE_PROTOCOLS_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_KEYPASSWORD_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_PASSWORD_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_STORE_KEY;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SUPPORT_PROXYUSER_KEY;
+import static org.apache.hadoop.hbase.thrift.Constants.USE_HTTP_CONF_KEY;
+
+import 

[4/4] hbase git commit: HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 server

2019-01-02 Thread allan163
HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 
server


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e4b6b4af
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e4b6b4af
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e4b6b4af

Branch: refs/heads/master
Commit: e4b6b4afb933a961f543537875f87a2dc62d3757
Parents: f0b50a8
Author: Allan Yang 
Authored: Wed Jan 2 16:13:17 2019 +0800
Committer: Allan Yang 
Committed: Wed Jan 2 16:13:57 2019 +0800

--
 .../apache/hadoop/hbase/thrift/Constants.java   |  151 ++
 .../hbase/thrift/HBaseServiceHandler.java   |   90 +
 .../hbase/thrift/HbaseHandlerMetricsProxy.java  |   20 +-
 .../apache/hadoop/hbase/thrift/ImplType.java|  143 ++
 .../hadoop/hbase/thrift/IncrementCoalescer.java |6 +-
 .../hbase/thrift/ThriftHBaseServiceHandler.java | 1347 
 .../hadoop/hbase/thrift/ThriftHttpServlet.java  |   12 +-
 .../hadoop/hbase/thrift/ThriftServer.java   |  698 +-
 .../hadoop/hbase/thrift/ThriftServerRunner.java | 2031 --
 .../thrift2/ThriftHBaseServiceHandler.java  |   69 +-
 .../hadoop/hbase/thrift2/ThriftServer.java  |  594 +
 .../resources/hbase-webapps/thrift/thrift.jsp   |2 +-
 .../hbase/thrift/TestThriftHttpServer.java  |   28 +-
 .../hadoop/hbase/thrift/TestThriftServer.java   |   58 +-
 .../hbase/thrift/TestThriftServerCmdLine.java   |   48 +-
 .../thrift/TestThriftSpnegoHttpServer.java  |   21 +-
 .../hbase/thrift2/TestThrift2HttpServer.java|   90 +
 .../hbase/thrift2/TestThrift2ServerCmdLine.java |   99 +
 .../thrift2/TestThriftHBaseServiceHandler.java  |   15 +-
 19 files changed, 2711 insertions(+), 2811 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e4b6b4af/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
new file mode 100644
index 000..8e3d004
--- /dev/null
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Thrift related constants
+ */
+@InterfaceAudience.Private
+public final class Constants {
+  private Constants(){}
+
+  public static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k
+
+  public static final String SERVER_TYPE_CONF_KEY =
+  "hbase.regionserver.thrift.server.type";
+
+  public static final String COMPACT_CONF_KEY = 
"hbase.regionserver.thrift.compact";
+  public static final boolean COMPACT_CONF_DEFAULT = false;
+
+  public static final String FRAMED_CONF_KEY = 
"hbase.regionserver.thrift.framed";
+  public static final boolean FRAMED_CONF_DEFAULT = false;
+
+  public static final String MAX_FRAME_SIZE_CONF_KEY =
+  "hbase.regionserver.thrift.framed.max_frame_size_in_mb";
+  public static final int MAX_FRAME_SIZE_CONF_DEFAULT = 2;
+
+  public static final String COALESCE_INC_KEY = 
"hbase.regionserver.thrift.coalesceIncrement";
+  public static final String USE_HTTP_CONF_KEY = 
"hbase.regionserver.thrift.http";
+
+  public static final String HTTP_MIN_THREADS_KEY = 
"hbase.thrift.http_threads.min";
+  public static final int HTTP_MIN_THREADS_KEY_DEFAULT = 2;
+
+  public static final String HTTP_MAX_THREADS_KEY = 
"hbase.thrift.http_threads.max";
+  public static final int HTTP_MAX_THREADS_KEY_DEFAULT = 100;
+
+  // ssl related configs
+  public static final String THRIFT_SSL_ENABLED_KEY = 
"hbase.thrift.ssl.enabled";
+  public static final String THRIFT_SSL_KEYSTORE_STORE_KEY = 
"hbase.thrift.ssl.keystore.store";
+  public static final String THRIFT_SSL_KEYSTORE_PASSWORD_KEY =
+  "hbase.thrift.ssl.keystore.password";
+  public static final String 

[1/4] hbase git commit: HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 server

2019-01-02 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master f0b50a8f9 -> e4b6b4afb


http://git-wip-us.apache.org/repos/asf/hbase/blob/e4b6b4af/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
index 5681569..fa3d39d 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
@@ -18,355 +18,86 @@
  */
 package org.apache.hadoop.hbase.thrift2;
 
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.security.PrivilegedAction;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
+import static org.apache.hadoop.hbase.thrift.Constants.READONLY_OPTION;
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_READONLY_ENABLED;
+import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_READONLY_ENABLED_DEFAULT;
 
-import javax.security.auth.callback.Callback;
-import javax.security.auth.callback.UnsupportedCallbackException;
-import javax.security.sasl.AuthorizeCallback;
-import javax.security.sasl.SaslServer;
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.http.InfoServer;
-import org.apache.hadoop.hbase.security.SaslUtil;
-import org.apache.hadoop.hbase.security.SecurityUtil;
 import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.thrift.CallQueue;
-import org.apache.hadoop.hbase.thrift.THBaseThreadPoolExecutor;
-import org.apache.hadoop.hbase.thrift.ThriftMetrics;
+import org.apache.hadoop.hbase.thrift.HBaseServiceHandler;
+import org.apache.hadoop.hbase.thrift.HbaseHandlerMetricsProxy;
 import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
-import org.apache.hadoop.hbase.util.DNS;
-import org.apache.hadoop.hbase.util.JvmPauseMonitor;
-import org.apache.hadoop.hbase.util.Strings;
-import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.thrift.TException;
 import org.apache.thrift.TProcessor;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.protocol.TProtocolFactory;
-import org.apache.thrift.server.THsHaServer;
-import org.apache.thrift.server.TNonblockingServer;
-import org.apache.thrift.server.TServer;
-import org.apache.thrift.server.TThreadPoolServer;
-import org.apache.thrift.server.TThreadedSelectorServer;
-import org.apache.thrift.transport.TFramedTransport;
-import org.apache.thrift.transport.TNonblockingServerSocket;
-import org.apache.thrift.transport.TNonblockingServerTransport;
-import org.apache.thrift.transport.TSaslServerTransport;
-import org.apache.thrift.transport.TServerSocket;
-import org.apache.thrift.transport.TServerTransport;
-import org.apache.thrift.transport.TTransportException;
-import org.apache.thrift.transport.TTransportFactory;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
-import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser;
-import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
-import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
-import org.apache.hbase.thirdparty.org.apache.commons.cli.OptionGroup;
 import org.apache.hbase.thirdparty.org.apache.commons.cli.Options;
-import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException;
 
 /**
  * ThriftServer - this class starts up a Thrift server which implements the 
HBase API specified in
  * the HbaseClient.thrift IDL file.
  */
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = 
"NM_SAME_SIMPLE_NAME_AS_SUPERCLASS",
+justification = "Change the name will be an incompatible change, will do 
it later")
 

[2/4] hbase git commit: HBASE-21652 Refactor ThriftServer making thrift2 server inherited from thrift1 server

2019-01-02 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/e4b6b4af/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
deleted file mode 100644
index 5e248f1..000
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
+++ /dev/null
@@ -1,2031 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.thrift;
-
-import static org.apache.hadoop.hbase.util.Bytes.getBytes;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.nio.ByteBuffer;
-import java.security.PrivilegedAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import javax.security.auth.callback.Callback;
-import javax.security.auth.callback.UnsupportedCallbackException;
-import javax.security.sasl.AuthorizeCallback;
-import javax.security.sasl.SaslServer;
-
-import org.apache.commons.lang3.ArrayUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell.Type;
-import org.apache.hadoop.hbase.CellBuilder;
-import org.apache.hadoop.hbase.CellBuilderFactory;
-import org.apache.hadoop.hbase.CellBuilderType;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.OperationWithAttributes;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.ParseFilter;
-import org.apache.hadoop.hbase.filter.PrefixFilter;
-import org.apache.hadoop.hbase.filter.WhileMatchFilter;
-import org.apache.hadoop.hbase.http.HttpServerUtil;
-import org.apache.hadoop.hbase.log.HBaseMarkers;
-import org.apache.hadoop.hbase.security.SaslUtil;
-import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
-import org.apache.hadoop.hbase.security.SecurityUtil;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
-import org.apache.hadoop.hbase.thrift.generated.BatchMutation;
-import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
-import org.apache.hadoop.hbase.thrift.generated.Hbase;
-import org.apache.hadoop.hbase.thrift.generated.IOError;
-import org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
-import org.apache.hadoop.hbase.thrift.generated.Mutation;
-import org.apache.hadoop.hbase.thrift.generated.TAppend;
-import org.apache.hadoop.hbase.thrift.generated.TCell;
-import org.apache.hadoop.hbase.thrift.generated.TIncrement;
-import 

[6/9] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-29 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
new file mode 100644
index 000..601d6b4
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
@@ -0,0 +1,69 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hbase.thrift2.generated;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum TBloomFilterType implements org.apache.thrift.TEnum {
+  /**
+   * Bloomfilters disabled
+   */
+  NONE(0),
+  /**
+   * Bloom enabled with Table row as Key
+   */
+  ROW(1),
+  /**
+   * Bloom enabled with Table row  column (family+qualifier) as Key
+   */
+  ROWCOL(2),
+  /**
+   * Bloom enabled with Table row prefix as Key, specify the length of the 
prefix
+   */
+  ROWPREFIX_FIXED_LENGTH(3),
+  /**
+   * Bloom enabled with Table row prefix as Key, specify the delimiter of the 
prefix
+   */
+  ROWPREFIX_DELIMITED(4);
+
+  private final int value;
+
+  private TBloomFilterType(int value) {
+this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static TBloomFilterType findByValue(int value) { 
+switch (value) {
+  case 0:
+return NONE;
+  case 1:
+return ROW;
+  case 2:
+return ROWCOL;
+  case 3:
+return ROWPREFIX_FIXED_LENGTH;
+  case 4:
+return ROWPREFIX_DELIMITED;
+  default:
+return null;
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
index 7da4dda..464ac12 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TCellVisibility implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TCellVisibility");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
index d0d336c..24a7846 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
@@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
  * in a HBase table by column family and optionally
  * a column qualifier and timestamp
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TColumn");
 



[1/9] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-29 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 8add7915d -> 26700fb2c


http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
new file mode 100644
index 000..f2c0743
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
@@ -0,0 +1,512 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hbase.thrift2.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+public class TTableName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TTableName");
+
+  private static final org.apache.thrift.protocol.TField NS_FIELD_DESC = new 
org.apache.thrift.protocol.TField("ns", 
org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC 
= new org.apache.thrift.protocol.TField("qualifier", 
org.apache.thrift.protocol.TType.STRING, (short)2);
+
+  private static final Map, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
+  static {
+schemes.put(StandardScheme.class, new TTableNameStandardSchemeFactory());
+schemes.put(TupleScheme.class, new TTableNameTupleSchemeFactory());
+  }
+
+  public ByteBuffer ns; // required
+  public ByteBuffer qualifier; // required
+
+  /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+NS((short)1, "ns"),
+QUALIFIER((short)2, "qualifier");
+
+private static final Map byName = new HashMap();
+
+static {
+  for (_Fields field : EnumSet.allOf(_Fields.class)) {
+byName.put(field.getFieldName(), field);
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, or null if its not 
found.
+ */
+public static _Fields findByThriftId(int fieldId) {
+  switch(fieldId) {
+case 1: // NS
+  return NS;
+case 2: // QUALIFIER
+  return QUALIFIER;
+default:
+  return null;
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+public static _Fields findByThriftIdOrThrow(int fieldId) {
+  _Fields fields = findByThriftId(fieldId);
+  if (fields == null) throw new IllegalArgumentException("Field " + 
fieldId + " doesn't exist!");
+  return fields;
+}
+
+/**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+public static _Fields findByName(String name) {
+  return byName.get(name);
+}
+
+private final short _thriftId;
+private final String _fieldName;
+
+_Fields(short thriftId, String fieldName) {
+  _thriftId = thriftId;
+  _fieldName = fieldName;
+}
+
+public short getThriftFieldId() {
+  return _thriftId;
+}
+
+public String getFieldName() {
+  return _fieldName;
+}
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> 
metaDataMap;
+  static {
+Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+tmpMap.put(_Fields.NS, new org.apache.thrift.meta_data.FieldMetaData("ns", 
org.apache.thrift.TFieldRequirementType.REQUIRED, 
+new 

[7/9] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-29 Thread allan163
HBASE-21650 Add DDL operation and some other miscellaneous to thrift2


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4e8a8467
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4e8a8467
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4e8a8467

Branch: refs/heads/branch-2
Commit: 4e8a846794f88cfd273649cdce6125b6bfc21b2d
Parents: 8add791
Author: Allan Yang 
Authored: Sat Dec 29 18:04:57 2018 +0800
Committer: Allan Yang 
Committed: Sat Dec 29 18:04:57 2018 +0800

--
 .../hbase/thrift/generated/AlreadyExists.java   | 2 +-
 .../hbase/thrift/generated/BatchMutation.java   | 2 +-
 .../thrift/generated/ColumnDescriptor.java  | 2 +-
 .../hadoop/hbase/thrift/generated/Hbase.java| 2 +-
 .../hadoop/hbase/thrift/generated/IOError.java  | 2 +-
 .../hbase/thrift/generated/IllegalArgument.java | 2 +-
 .../hadoop/hbase/thrift/generated/Mutation.java | 2 +-
 .../hadoop/hbase/thrift/generated/TAppend.java  | 2 +-
 .../hadoop/hbase/thrift/generated/TCell.java| 2 +-
 .../hadoop/hbase/thrift/generated/TColumn.java  | 2 +-
 .../hbase/thrift/generated/TIncrement.java  | 2 +-
 .../hbase/thrift/generated/TRegionInfo.java | 2 +-
 .../hbase/thrift/generated/TRowResult.java  | 2 +-
 .../hadoop/hbase/thrift/generated/TScan.java| 2 +-
 .../thrift2/ThriftHBaseServiceHandler.java  |   290 +
 .../hadoop/hbase/thrift2/ThriftUtilities.java   |   411 +-
 .../thrift2/generated/NamespaceDescriptor.java  |   554 +
 .../hadoop/hbase/thrift2/generated/TAppend.java |   114 +-
 .../hbase/thrift2/generated/TAuthorization.java | 2 +-
 .../thrift2/generated/TBloomFilterType.java |69 +
 .../thrift2/generated/TCellVisibility.java  | 2 +-
 .../hadoop/hbase/thrift2/generated/TColumn.java | 2 +-
 .../generated/TColumnFamilyDescriptor.java  |  2519 +
 .../thrift2/generated/TColumnIncrement.java | 2 +-
 .../hbase/thrift2/generated/TColumnValue.java   |   110 +-
 .../generated/TCompressionAlgorithm.java|60 +
 .../thrift2/generated/TDataBlockEncoding.java   |57 +
 .../hadoop/hbase/thrift2/generated/TDelete.java | 2 +-
 .../hbase/thrift2/generated/TDurability.java| 3 +
 .../hadoop/hbase/thrift2/generated/TGet.java|   410 +-
 .../hbase/thrift2/generated/THBaseService.java  | 44644 +
 .../hbase/thrift2/generated/THRegionInfo.java   | 2 +-
 .../thrift2/generated/THRegionLocation.java | 2 +-
 .../hbase/thrift2/generated/TIOError.java   | 2 +-
 .../thrift2/generated/TIllegalArgument.java | 2 +-
 .../hbase/thrift2/generated/TIncrement.java |   114 +-
 .../thrift2/generated/TKeepDeletedCells.java|63 +
 .../thrift2/generated/TNamespaceDescriptor.java |   554 +
 .../hadoop/hbase/thrift2/generated/TPut.java| 2 +-
 .../hadoop/hbase/thrift2/generated/TResult.java |   112 +-
 .../hbase/thrift2/generated/TRowMutations.java  |38 +-
 .../hadoop/hbase/thrift2/generated/TScan.java   | 2 +-
 .../hbase/thrift2/generated/TServerName.java| 2 +-
 .../thrift2/generated/TTableDescriptor.java |   843 +
 .../hbase/thrift2/generated/TTableName.java |   512 +
 .../hbase/thrift2/generated/TTimeRange.java | 2 +-
 .../apache/hadoop/hbase/thrift2/hbase.thrift|   229 +-
 .../thrift2/TestThriftHBaseServiceHandler.java  |96 +
 48 files changed, 41553 insertions(+), 10303 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
index 68361c1..8ec3e32 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
  * An AlreadyExists exceptions signals that a table with the specified
  * name already exists
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class AlreadyExists extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("AlreadyExists");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java

[2/9] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-29 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
index 129ab2e..8450f5b 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class THRegionInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, 
Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("THRegionInfo");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
index 94b25ff..b1146e9 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class THRegionLocation implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("THRegionLocation");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
index 2e50d3d..9569c3f 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
@@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
  * to the HBase master or a HBase region server. Also used to return
  * more general HBase error conditions.
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TIOError extends TException implements 
org.apache.thrift.TBase, java.io.Serializable, 
Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TIOError");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
index 9387429..6734dec 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
  * A TIllegalArgument exception indicates an illegal or invalid
  * argument was passed into a procedure.
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TIllegalArgument extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TIllegalArgument");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java

[9/9] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2 (ADDENDUM add some comments)

2018-12-29 Thread allan163
HBASE-21650 Add DDL operation and some other miscellaneous to thrift2 (ADDENDUM 
add some comments)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/26700fb2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/26700fb2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/26700fb2

Branch: refs/heads/branch-2
Commit: 26700fb2cd0f1afecb62fcdbdd613cac73c87bb1
Parents: 4e8a846
Author: Allan Yang 
Authored: Sat Dec 29 18:06:23 2018 +0800
Committer: Allan Yang 
Committed: Sat Dec 29 18:06:23 2018 +0800

--
 .../hbase/thrift/generated/AlreadyExists.java   |   2 +-
 .../hbase/thrift/generated/BatchMutation.java   |   2 +-
 .../thrift/generated/ColumnDescriptor.java  |   2 +-
 .../hadoop/hbase/thrift/generated/Hbase.java|   2 +-
 .../hadoop/hbase/thrift/generated/IOError.java  |   2 +-
 .../hbase/thrift/generated/IllegalArgument.java |   2 +-
 .../hadoop/hbase/thrift/generated/Mutation.java |   2 +-
 .../hadoop/hbase/thrift/generated/TAppend.java  |   2 +-
 .../hadoop/hbase/thrift/generated/TCell.java|   2 +-
 .../hadoop/hbase/thrift/generated/TColumn.java  |   2 +-
 .../hbase/thrift/generated/TIncrement.java  |   2 +-
 .../hbase/thrift/generated/TRegionInfo.java |   2 +-
 .../hbase/thrift/generated/TRowResult.java  |   2 +-
 .../hadoop/hbase/thrift/generated/TScan.java|   2 +-
 .../hadoop/hbase/thrift2/generated/TAppend.java |   2 +-
 .../hbase/thrift2/generated/TAuthorization.java |   2 +-
 .../thrift2/generated/TBloomFilterType.java |   4 +
 .../thrift2/generated/TCellVisibility.java  |   2 +-
 .../hadoop/hbase/thrift2/generated/TColumn.java |   2 +-
 .../generated/TColumnFamilyDescriptor.java  |   6 +-
 .../thrift2/generated/TColumnIncrement.java |   2 +-
 .../hbase/thrift2/generated/TColumnValue.java   |   2 +-
 .../generated/TCompressionAlgorithm.java|   4 +
 .../thrift2/generated/TDataBlockEncoding.java   |   4 +
 .../hadoop/hbase/thrift2/generated/TDelete.java |   2 +-
 .../hadoop/hbase/thrift2/generated/TGet.java|   2 +-
 .../hbase/thrift2/generated/THBaseService.java  | 571 ++-
 .../hbase/thrift2/generated/THRegionInfo.java   |   2 +-
 .../thrift2/generated/THRegionLocation.java |   2 +-
 .../hbase/thrift2/generated/TIOError.java   |   2 +-
 .../thrift2/generated/TIllegalArgument.java |   2 +-
 .../hbase/thrift2/generated/TIncrement.java |   2 +-
 .../thrift2/generated/TKeepDeletedCells.java|   4 +
 .../thrift2/generated/TNamespaceDescriptor.java |   6 +-
 .../hadoop/hbase/thrift2/generated/TPut.java|   2 +-
 .../hadoop/hbase/thrift2/generated/TResult.java |   2 +-
 .../hbase/thrift2/generated/TRowMutations.java  |   2 +-
 .../hadoop/hbase/thrift2/generated/TScan.java   |   2 +-
 .../hbase/thrift2/generated/TServerName.java|   2 +-
 .../thrift2/generated/TTableDescriptor.java |   6 +-
 .../hbase/thrift2/generated/TTableName.java |  30 +-
 .../hbase/thrift2/generated/TTimeRange.java |   2 +-
 .../apache/hadoop/hbase/thrift2/hbase.thrift| 168 +-
 43 files changed, 828 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/26700fb2/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
index 8ec3e32..4457b9f 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
  * An AlreadyExists exceptions signals that a table with the specified
  * name already exists
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
 public class AlreadyExists extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("AlreadyExists");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/26700fb2/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
index 0872223..f605286 100644
--- 

[3/9] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-29 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
index e8f36a0..7388443 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-07-04")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class THBaseService {
 
   public interface Iface {
@@ -282,6 +282,56 @@ public class THBaseService {
  */
 public boolean checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer 
family, ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, 
TRowMutations rowMutations) throws TIOError, org.apache.thrift.TException;
 
+public TTableDescriptor getTableDescriptor(TTableName table) throws 
TIOError, org.apache.thrift.TException;
+
+public List getTableDescriptors(List tables) 
throws TIOError, org.apache.thrift.TException;
+
+public boolean tableExists(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public List getTableDescriptorsByPattern(String regex, 
boolean includeSysTables) throws TIOError, org.apache.thrift.TException;
+
+public List getTableDescriptorsByNamespace(String name) 
throws TIOError, org.apache.thrift.TException;
+
+public List getTableNamesByPattern(String regex, boolean 
includeSysTables) throws TIOError, org.apache.thrift.TException;
+
+public List getTableNamesByNamespace(String name) throws 
TIOError, org.apache.thrift.TException;
+
+public void createTable(TTableDescriptor desc, List splitKeys) 
throws TIOError, org.apache.thrift.TException;
+
+public void deleteTable(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public void truncateTable(TTableName tableName, boolean preserveSplits) 
throws TIOError, org.apache.thrift.TException;
+
+public void enableTable(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public void disableTable(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public boolean isTableEnabled(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public boolean isTableDisabled(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public boolean isTableAvailable(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public boolean isTableAvailableWithSplit(TTableName tableName, 
List splitKeys) throws TIOError, org.apache.thrift.TException;
+
+public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor 
column) throws TIOError, org.apache.thrift.TException;
+
+public void deleteColumnFamily(TTableName tableName, ByteBuffer column) 
throws TIOError, org.apache.thrift.TException;
+
+public void modifyColumnFamily(TTableName tableName, 
TColumnFamilyDescriptor column) throws TIOError, org.apache.thrift.TException;
+
+public void modifyTable(TTableDescriptor desc) throws TIOError, 
org.apache.thrift.TException;
+
+public void createNamespace(TNamespaceDescriptor namespaceDesc) throws 
TIOError, org.apache.thrift.TException;
+
+public void modifyNamespace(TNamespaceDescriptor namespaceDesc) throws 
TIOError, org.apache.thrift.TException;
+
+public void deleteNamespace(String name) throws TIOError, 
org.apache.thrift.TException;
+
+public TNamespaceDescriptor getNamespaceDescriptor(String name) throws 
TIOError, org.apache.thrift.TException;
+
+public List listNamespaceDescriptors() throws 
TIOError, org.apache.thrift.TException;
+
   }
 
   public interface AsyncIface {
@@ -326,6 +376,56 @@ public class THBaseService {
 
 public void checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer 
family, ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, 
TRowMutations rowMutations, org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
 
+public void getTableDescriptor(TTableName table, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void getTableDescriptors(List tables, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void tableExists(TTableName tableName, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void 

[8/9] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2 (ADDENDUM add some comments)

2018-12-29 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/26700fb2/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
index 89a8a5e..8e53bdf 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
@@ -34,7 +34,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+/**
+ * Thrift wrapper around
+ * org.apache.hadoop.hbase.client.TableDescriptor
+ */
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
 public class TTableDescriptor implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TTableDescriptor");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/26700fb2/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
index f2c0743..cec268a 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
@@ -34,7 +34,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+/**
+ * Thrift wrapper around
+ * org.apache.hadoop.hbase.TableName
+ */
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
 public class TTableName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TTableName");
 
@@ -47,12 +51,24 @@ public class TTableName implements 
org.apache.thrift.TBase byName = new HashMap();
@@ -157,6 +173,9 @@ public class TTableName implements 
org.apache.thrift.TBasehttp://git-wip-us.apache.org/repos/asf/hbase/blob/26700fb2/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
index 1e1898c..8ab746c 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
 public class TTimeRange implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TTimeRange");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/26700fb2/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
--
diff --git 
a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift 
b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
index 6383329..c1b94ef 100644
--- 
a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
+++ 
b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
@@ -315,6 +315,10 @@ enum TCompareOp {
   NO_OP = 6
 }
 
+/**
+ * Thrift wrapper around
+ * org.apache.hadoop.hbase.regionserver.BloomType
+ */
 enum TBloomFilterType {
 /**
* Bloomfilters disabled
@@ -338,6 +342,10 @@ enum TBloomFilterType {
   ROWPREFIX_DELIMITED = 4
 }
 
+/**
+ * Thrift wrapper around
+ * org.apache.hadoop.hbase.io.compress.Algorithm
+ */
 enum TCompressionAlgorithm {
   LZO = 0,
   GZ = 1,
@@ -348,6 +356,10 @@ enum TCompressionAlgorithm {
   ZSTD = 6
 }
 
+/**
+ * Thrift wrapper around
+ * 

[5/9] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-29 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
new file mode 100644
index 000..03cb2f6
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
@@ -0,0 +1,2519 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hbase.thrift2.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+public class TColumnFamilyDescriptor implements 
org.apache.thrift.TBase, java.io.Serializable, Cloneable, 
Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TColumnFamilyDescriptor");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new 
org.apache.thrift.protocol.TField("name", 
org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC 
= new org.apache.thrift.protocol.TField("attributes", 
org.apache.thrift.protocol.TType.MAP, (short)2);
+  private static final org.apache.thrift.protocol.TField 
CONFIGURATION_FIELD_DESC = new 
org.apache.thrift.protocol.TField("configuration", 
org.apache.thrift.protocol.TType.MAP, (short)3);
+  private static final org.apache.thrift.protocol.TField BLOCK_SIZE_FIELD_DESC 
= new org.apache.thrift.protocol.TField("blockSize", 
org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField 
BLOOMN_FILTER_TYPE_FIELD_DESC = new 
org.apache.thrift.protocol.TField("bloomnFilterType", 
org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField 
COMPRESSION_TYPE_FIELD_DESC = new 
org.apache.thrift.protocol.TField("compressionType", 
org.apache.thrift.protocol.TType.I32, (short)6);
+  private static final org.apache.thrift.protocol.TField 
DFS_REPLICATION_FIELD_DESC = new 
org.apache.thrift.protocol.TField("dfsReplication", 
org.apache.thrift.protocol.TType.I16, (short)7);
+  private static final org.apache.thrift.protocol.TField 
DATA_BLOCK_ENCODING_FIELD_DESC = new 
org.apache.thrift.protocol.TField("dataBlockEncoding", 
org.apache.thrift.protocol.TType.I32, (short)8);
+  private static final org.apache.thrift.protocol.TField 
KEEP_DELETED_CELLS_FIELD_DESC = new 
org.apache.thrift.protocol.TField("keepDeletedCells", 
org.apache.thrift.protocol.TType.I32, (short)9);
+  private static final org.apache.thrift.protocol.TField 
MAX_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxVersions", 
org.apache.thrift.protocol.TType.I32, (short)10);
+  private static final org.apache.thrift.protocol.TField 
MIN_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("minVersions", 
org.apache.thrift.protocol.TType.I32, (short)11);
+  private static final org.apache.thrift.protocol.TField SCOPE_FIELD_DESC = 
new org.apache.thrift.protocol.TField("scope", 
org.apache.thrift.protocol.TType.I32, (short)12);
+  private static final org.apache.thrift.protocol.TField 
TIME_TO_LIVE_FIELD_DESC = new org.apache.thrift.protocol.TField("timeToLive", 
org.apache.thrift.protocol.TType.I32, (short)13);
+  private static final org.apache.thrift.protocol.TField 
BLOCK_CACHE_ENABLED_FIELD_DESC = new 
org.apache.thrift.protocol.TField("blockCacheEnabled", 
org.apache.thrift.protocol.TType.BOOL, (short)14);
+  private static final org.apache.thrift.protocol.TField 
CACHE_BLOOMS_ON_WRITE_FIELD_DESC = new 
org.apache.thrift.protocol.TField("cacheBloomsOnWrite", 

[4/9] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-29 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
index 2fb3f76..0f27519 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
@@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Represents a single cell and the amount to increment it by
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TColumnIncrement implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TColumnIncrement");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
index 3ceb4c0..6cded1b 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
@@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Represents a single cell and its value.
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TColumnValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, 
Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TColumnValue");
 
@@ -46,6 +46,7 @@ public class TColumnValue implements 
org.apache.thrift.TBase, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
   static {
@@ -58,6 +59,7 @@ public class TColumnValue implements 
org.apache.thrift.TBase byName = new HashMap();
 
@@ -90,6 +93,8 @@ public class TColumnValue implements 
org.apache.thrift.TBase 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -146,6 +152,8 @@ public class TColumnValue implements 
org.apache.thrift.TBasehttp://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
new file mode 100644
index 000..46799be
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
@@ -0,0 +1,60 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hbase.thrift2.generated;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum TCompressionAlgorithm implements org.apache.thrift.TEnum {
+  LZO(0),
+  GZ(1),
+  NONE(2),
+  SNAPPY(3),
+  LZ4(4),
+  BZIP2(5),
+  ZSTD(6);
+
+  private final int value;
+
+  private TCompressionAlgorithm(int value) {
+this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static TCompressionAlgorithm findByValue(int value) { 
+switch (value) {
+  case 0:
+return LZO;
+  case 1:
+return GZ;
+  case 2:
+return NONE;
+  case 3:
+return SNAPPY;
+  case 4:
+return LZ4;
+  case 5:
+return BZIP2;
+  case 6:
+return ZSTD;
+  default:
+return null;
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a8467/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDataBlockEncoding.java

[1/2] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2 (ADDENDUM add some comments)

2018-12-27 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 7820ba1db -> b620334c2


http://git-wip-us.apache.org/repos/asf/hbase/blob/b620334c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
index 89a8a5e..8e53bdf 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableDescriptor.java
@@ -34,7 +34,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+/**
+ * Thrift wrapper around
+ * org.apache.hadoop.hbase.client.TableDescriptor
+ */
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
 public class TTableDescriptor implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TTableDescriptor");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b620334c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
index f2c0743..cec268a 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
@@ -34,7 +34,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+/**
+ * Thrift wrapper around
+ * org.apache.hadoop.hbase.TableName
+ */
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
 public class TTableName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TTableName");
 
@@ -47,12 +51,24 @@ public class TTableName implements 
org.apache.thrift.TBase byName = new HashMap();
@@ -157,6 +173,9 @@ public class TTableName implements 
org.apache.thrift.TBasehttp://git-wip-us.apache.org/repos/asf/hbase/blob/b620334c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
index 1e1898c..8ab746c 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTimeRange.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
 public class TTimeRange implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TTimeRange");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b620334c/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
--
diff --git 
a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift 
b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
index 6383329..c1b94ef 100644
--- 
a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
+++ 
b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift
@@ -315,6 +315,10 @@ enum TCompareOp {
   NO_OP = 6
 }
 
+/**
+ * Thrift wrapper around
+ * org.apache.hadoop.hbase.regionserver.BloomType
+ */
 enum TBloomFilterType {
 /**
* Bloomfilters disabled
@@ -338,6 +342,10 @@ enum TBloomFilterType {
   ROWPREFIX_DELIMITED = 4
 }
 
+/**
+ * Thrift wrapper around
+ * org.apache.hadoop.hbase.io.compress.Algorithm
+ */
 enum TCompressionAlgorithm {
   LZO = 0,
   GZ = 1,
@@ -348,6 +356,10 @@ enum 

[2/2] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2 (ADDENDUM add some comments)

2018-12-27 Thread allan163
HBASE-21650 Add DDL operation and some other miscellaneous to thrift2 (ADDENDUM 
add some comments)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b620334c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b620334c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b620334c

Branch: refs/heads/master
Commit: b620334c20e84a4876226b508213ce11b8b187a0
Parents: 7820ba1
Author: Allan Yang 
Authored: Fri Dec 28 15:32:50 2018 +0800
Committer: Allan Yang 
Committed: Fri Dec 28 15:32:50 2018 +0800

--
 .../hbase/thrift/generated/AlreadyExists.java   |   2 +-
 .../hbase/thrift/generated/BatchMutation.java   |   2 +-
 .../thrift/generated/ColumnDescriptor.java  |   2 +-
 .../hadoop/hbase/thrift/generated/Hbase.java|   2 +-
 .../hadoop/hbase/thrift/generated/IOError.java  |   2 +-
 .../hbase/thrift/generated/IllegalArgument.java |   2 +-
 .../hadoop/hbase/thrift/generated/Mutation.java |   2 +-
 .../hadoop/hbase/thrift/generated/TAppend.java  |   2 +-
 .../hadoop/hbase/thrift/generated/TCell.java|   2 +-
 .../hadoop/hbase/thrift/generated/TColumn.java  |   2 +-
 .../hbase/thrift/generated/TIncrement.java  |   2 +-
 .../hbase/thrift/generated/TRegionInfo.java |   2 +-
 .../hbase/thrift/generated/TRowResult.java  |   2 +-
 .../hadoop/hbase/thrift/generated/TScan.java|   2 +-
 .../hadoop/hbase/thrift2/generated/TAppend.java |   2 +-
 .../hbase/thrift2/generated/TAuthorization.java |   2 +-
 .../thrift2/generated/TBloomFilterType.java |   4 +
 .../thrift2/generated/TCellVisibility.java  |   2 +-
 .../hadoop/hbase/thrift2/generated/TColumn.java |   2 +-
 .../generated/TColumnFamilyDescriptor.java  |   6 +-
 .../thrift2/generated/TColumnIncrement.java |   2 +-
 .../hbase/thrift2/generated/TColumnValue.java   |   2 +-
 .../generated/TCompressionAlgorithm.java|   4 +
 .../thrift2/generated/TDataBlockEncoding.java   |   4 +
 .../hadoop/hbase/thrift2/generated/TDelete.java |   2 +-
 .../hadoop/hbase/thrift2/generated/TGet.java|   2 +-
 .../hbase/thrift2/generated/THBaseService.java  | 571 ++-
 .../hbase/thrift2/generated/THRegionInfo.java   |   2 +-
 .../thrift2/generated/THRegionLocation.java |   2 +-
 .../hbase/thrift2/generated/TIOError.java   |   2 +-
 .../thrift2/generated/TIllegalArgument.java |   2 +-
 .../hbase/thrift2/generated/TIncrement.java |   2 +-
 .../thrift2/generated/TKeepDeletedCells.java|   4 +
 .../thrift2/generated/TNamespaceDescriptor.java |   6 +-
 .../hadoop/hbase/thrift2/generated/TPut.java|   2 +-
 .../hadoop/hbase/thrift2/generated/TResult.java |   2 +-
 .../hbase/thrift2/generated/TRowMutations.java  |   2 +-
 .../hadoop/hbase/thrift2/generated/TScan.java   |   2 +-
 .../hbase/thrift2/generated/TServerName.java|   2 +-
 .../thrift2/generated/TTableDescriptor.java |   6 +-
 .../hbase/thrift2/generated/TTableName.java |  30 +-
 .../hbase/thrift2/generated/TTimeRange.java |   2 +-
 .../apache/hadoop/hbase/thrift2/hbase.thrift| 168 +-
 43 files changed, 828 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b620334c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
index 8ec3e32..4457b9f 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
  * An AlreadyExists exceptions signals that a table with the specified
  * name already exists
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
 public class AlreadyExists extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("AlreadyExists");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b620334c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java
index 0872223..f605286 100644
--- 

[6/7] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-27 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
new file mode 100644
index 000..601d6b4
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TBloomFilterType.java
@@ -0,0 +1,69 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hbase.thrift2.generated;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum TBloomFilterType implements org.apache.thrift.TEnum {
+  /**
+   * Bloomfilters disabled
+   */
+  NONE(0),
+  /**
+   * Bloom enabled with Table row as Key
+   */
+  ROW(1),
+  /**
+   * Bloom enabled with Table row  column (family+qualifier) as Key
+   */
+  ROWCOL(2),
+  /**
+   * Bloom enabled with Table row prefix as Key, specify the length of the 
prefix
+   */
+  ROWPREFIX_FIXED_LENGTH(3),
+  /**
+   * Bloom enabled with Table row prefix as Key, specify the delimiter of the 
prefix
+   */
+  ROWPREFIX_DELIMITED(4);
+
+  private final int value;
+
+  private TBloomFilterType(int value) {
+this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static TBloomFilterType findByValue(int value) { 
+switch (value) {
+  case 0:
+return NONE;
+  case 1:
+return ROW;
+  case 2:
+return ROWCOL;
+  case 3:
+return ROWPREFIX_FIXED_LENGTH;
+  case 4:
+return ROWPREFIX_DELIMITED;
+  default:
+return null;
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
index 7da4dda..464ac12 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCellVisibility.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TCellVisibility implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TCellVisibility");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
index d0d336c..24a7846 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumn.java
@@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
  * in a HBase table by column family and optionally
  * a column qualifier and timestamp
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TColumn");
 



[5/7] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-27 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
new file mode 100644
index 000..03cb2f6
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnFamilyDescriptor.java
@@ -0,0 +1,2519 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hbase.thrift2.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+public class TColumnFamilyDescriptor implements 
org.apache.thrift.TBase, java.io.Serializable, Cloneable, 
Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TColumnFamilyDescriptor");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new 
org.apache.thrift.protocol.TField("name", 
org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField ATTRIBUTES_FIELD_DESC 
= new org.apache.thrift.protocol.TField("attributes", 
org.apache.thrift.protocol.TType.MAP, (short)2);
+  private static final org.apache.thrift.protocol.TField 
CONFIGURATION_FIELD_DESC = new 
org.apache.thrift.protocol.TField("configuration", 
org.apache.thrift.protocol.TType.MAP, (short)3);
+  private static final org.apache.thrift.protocol.TField BLOCK_SIZE_FIELD_DESC 
= new org.apache.thrift.protocol.TField("blockSize", 
org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField 
BLOOMN_FILTER_TYPE_FIELD_DESC = new 
org.apache.thrift.protocol.TField("bloomnFilterType", 
org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField 
COMPRESSION_TYPE_FIELD_DESC = new 
org.apache.thrift.protocol.TField("compressionType", 
org.apache.thrift.protocol.TType.I32, (short)6);
+  private static final org.apache.thrift.protocol.TField 
DFS_REPLICATION_FIELD_DESC = new 
org.apache.thrift.protocol.TField("dfsReplication", 
org.apache.thrift.protocol.TType.I16, (short)7);
+  private static final org.apache.thrift.protocol.TField 
DATA_BLOCK_ENCODING_FIELD_DESC = new 
org.apache.thrift.protocol.TField("dataBlockEncoding", 
org.apache.thrift.protocol.TType.I32, (short)8);
+  private static final org.apache.thrift.protocol.TField 
KEEP_DELETED_CELLS_FIELD_DESC = new 
org.apache.thrift.protocol.TField("keepDeletedCells", 
org.apache.thrift.protocol.TType.I32, (short)9);
+  private static final org.apache.thrift.protocol.TField 
MAX_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxVersions", 
org.apache.thrift.protocol.TType.I32, (short)10);
+  private static final org.apache.thrift.protocol.TField 
MIN_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("minVersions", 
org.apache.thrift.protocol.TType.I32, (short)11);
+  private static final org.apache.thrift.protocol.TField SCOPE_FIELD_DESC = 
new org.apache.thrift.protocol.TField("scope", 
org.apache.thrift.protocol.TType.I32, (short)12);
+  private static final org.apache.thrift.protocol.TField 
TIME_TO_LIVE_FIELD_DESC = new org.apache.thrift.protocol.TField("timeToLive", 
org.apache.thrift.protocol.TType.I32, (short)13);
+  private static final org.apache.thrift.protocol.TField 
BLOCK_CACHE_ENABLED_FIELD_DESC = new 
org.apache.thrift.protocol.TField("blockCacheEnabled", 
org.apache.thrift.protocol.TType.BOOL, (short)14);
+  private static final org.apache.thrift.protocol.TField 
CACHE_BLOOMS_ON_WRITE_FIELD_DESC = new 
org.apache.thrift.protocol.TField("cacheBloomsOnWrite", 

[1/7] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-27 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master f5ea00f72 -> 7820ba1db


http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
new file mode 100644
index 000..f2c0743
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TTableName.java
@@ -0,0 +1,512 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hbase.thrift2.generated;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
+public class TTableName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TTableName");
+
+  private static final org.apache.thrift.protocol.TField NS_FIELD_DESC = new 
org.apache.thrift.protocol.TField("ns", 
org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC 
= new org.apache.thrift.protocol.TField("qualifier", 
org.apache.thrift.protocol.TType.STRING, (short)2);
+
+  private static final Map, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
+  static {
+schemes.put(StandardScheme.class, new TTableNameStandardSchemeFactory());
+schemes.put(TupleScheme.class, new TTableNameTupleSchemeFactory());
+  }
+
+  public ByteBuffer ns; // required
+  public ByteBuffer qualifier; // required
+
+  /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+NS((short)1, "ns"),
+QUALIFIER((short)2, "qualifier");
+
+private static final Map byName = new HashMap();
+
+static {
+  for (_Fields field : EnumSet.allOf(_Fields.class)) {
+byName.put(field.getFieldName(), field);
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, or null if its not 
found.
+ */
+public static _Fields findByThriftId(int fieldId) {
+  switch(fieldId) {
+case 1: // NS
+  return NS;
+case 2: // QUALIFIER
+  return QUALIFIER;
+default:
+  return null;
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+public static _Fields findByThriftIdOrThrow(int fieldId) {
+  _Fields fields = findByThriftId(fieldId);
+  if (fields == null) throw new IllegalArgumentException("Field " + 
fieldId + " doesn't exist!");
+  return fields;
+}
+
+/**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+public static _Fields findByName(String name) {
+  return byName.get(name);
+}
+
+private final short _thriftId;
+private final String _fieldName;
+
+_Fields(short thriftId, String fieldName) {
+  _thriftId = thriftId;
+  _fieldName = fieldName;
+}
+
+public short getThriftFieldId() {
+  return _thriftId;
+}
+
+public String getFieldName() {
+  return _fieldName;
+}
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> 
metaDataMap;
+  static {
+Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+tmpMap.put(_Fields.NS, new org.apache.thrift.meta_data.FieldMetaData("ns", 
org.apache.thrift.TFieldRequirementType.REQUIRED, 
+new 

[2/7] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-27 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
index 129ab2e..8450f5b 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionInfo.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class THRegionInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, 
Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("THRegionInfo");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
index 94b25ff..b1146e9 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THRegionLocation.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class THRegionLocation implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("THRegionLocation");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
index 2e50d3d..9569c3f 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIOError.java
@@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
  * to the HBase master or a HBase region server. Also used to return
  * more general HBase error conditions.
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TIOError extends TException implements 
org.apache.thrift.TBase, java.io.Serializable, 
Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TIOError");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
index 9387429..6734dec 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIllegalArgument.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
  * A TIllegalArgument exception indicates an illegal or invalid
  * argument was passed into a procedure.
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TIllegalArgument extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TIllegalArgument");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TIncrement.java

[4/7] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-27 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
index 2fb3f76..0f27519 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnIncrement.java
@@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Represents a single cell and the amount to increment it by
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TColumnIncrement implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TColumnIncrement");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
index 3ceb4c0..6cded1b 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TColumnValue.java
@@ -37,7 +37,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Represents a single cell and its value.
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class TColumnValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, 
Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TColumnValue");
 
@@ -46,6 +46,7 @@ public class TColumnValue implements 
org.apache.thrift.TBase, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
   static {
@@ -58,6 +59,7 @@ public class TColumnValue implements 
org.apache.thrift.TBase byName = new HashMap();
 
@@ -90,6 +93,8 @@ public class TColumnValue implements 
org.apache.thrift.TBase 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -146,6 +152,8 @@ public class TColumnValue implements 
org.apache.thrift.TBasehttp://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
new file mode 100644
index 000..46799be
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TCompressionAlgorithm.java
@@ -0,0 +1,60 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hbase.thrift2.generated;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum TCompressionAlgorithm implements org.apache.thrift.TEnum {
+  LZO(0),
+  GZ(1),
+  NONE(2),
+  SNAPPY(3),
+  LZ4(4),
+  BZIP2(5),
+  ZSTD(6);
+
+  private final int value;
+
+  private TCompressionAlgorithm(int value) {
+this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static TCompressionAlgorithm findByValue(int value) { 
+switch (value) {
+  case 0:
+return LZO;
+  case 1:
+return GZ;
+  case 2:
+return NONE;
+  case 3:
+return SNAPPY;
+  case 4:
+return LZ4;
+  case 5:
+return BZIP2;
+  case 6:
+return ZSTD;
+  default:
+return null;
+}
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TDataBlockEncoding.java

[7/7] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-27 Thread allan163
HBASE-21650 Add DDL operation and some other miscellaneous to thrift2


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7820ba1d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7820ba1d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7820ba1d

Branch: refs/heads/master
Commit: 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da
Parents: f5ea00f
Author: Allan Yang 
Authored: Thu Dec 27 22:25:33 2018 +0800
Committer: Allan Yang 
Committed: Thu Dec 27 22:25:33 2018 +0800

--
 .../hbase/thrift/generated/AlreadyExists.java   | 2 +-
 .../hbase/thrift/generated/BatchMutation.java   | 2 +-
 .../thrift/generated/ColumnDescriptor.java  | 2 +-
 .../hadoop/hbase/thrift/generated/Hbase.java| 2 +-
 .../hadoop/hbase/thrift/generated/IOError.java  | 2 +-
 .../hbase/thrift/generated/IllegalArgument.java | 2 +-
 .../hadoop/hbase/thrift/generated/Mutation.java | 2 +-
 .../hadoop/hbase/thrift/generated/TAppend.java  | 2 +-
 .../hadoop/hbase/thrift/generated/TCell.java| 2 +-
 .../hadoop/hbase/thrift/generated/TColumn.java  | 2 +-
 .../hbase/thrift/generated/TIncrement.java  | 2 +-
 .../hbase/thrift/generated/TRegionInfo.java | 2 +-
 .../hbase/thrift/generated/TRowResult.java  | 2 +-
 .../hadoop/hbase/thrift/generated/TScan.java| 2 +-
 .../thrift2/ThriftHBaseServiceHandler.java  |   290 +
 .../hadoop/hbase/thrift2/ThriftUtilities.java   |   411 +-
 .../thrift2/generated/NamespaceDescriptor.java  |   554 +
 .../hadoop/hbase/thrift2/generated/TAppend.java |   114 +-
 .../hbase/thrift2/generated/TAuthorization.java | 2 +-
 .../thrift2/generated/TBloomFilterType.java |69 +
 .../thrift2/generated/TCellVisibility.java  | 2 +-
 .../hadoop/hbase/thrift2/generated/TColumn.java | 2 +-
 .../generated/TColumnFamilyDescriptor.java  |  2519 +
 .../thrift2/generated/TColumnIncrement.java | 2 +-
 .../hbase/thrift2/generated/TColumnValue.java   |   110 +-
 .../generated/TCompressionAlgorithm.java|60 +
 .../thrift2/generated/TDataBlockEncoding.java   |57 +
 .../hadoop/hbase/thrift2/generated/TDelete.java | 2 +-
 .../hbase/thrift2/generated/TDurability.java| 3 +
 .../hadoop/hbase/thrift2/generated/TGet.java|   410 +-
 .../hbase/thrift2/generated/THBaseService.java  | 44644 +
 .../hbase/thrift2/generated/THRegionInfo.java   | 2 +-
 .../thrift2/generated/THRegionLocation.java | 2 +-
 .../hbase/thrift2/generated/TIOError.java   | 2 +-
 .../thrift2/generated/TIllegalArgument.java | 2 +-
 .../hbase/thrift2/generated/TIncrement.java |   114 +-
 .../thrift2/generated/TKeepDeletedCells.java|63 +
 .../thrift2/generated/TNamespaceDescriptor.java |   554 +
 .../hadoop/hbase/thrift2/generated/TPut.java| 2 +-
 .../hadoop/hbase/thrift2/generated/TResult.java |   112 +-
 .../hbase/thrift2/generated/TRowMutations.java  |38 +-
 .../hadoop/hbase/thrift2/generated/TScan.java   | 2 +-
 .../hbase/thrift2/generated/TServerName.java| 2 +-
 .../thrift2/generated/TTableDescriptor.java |   843 +
 .../hbase/thrift2/generated/TTableName.java |   512 +
 .../hbase/thrift2/generated/TTimeRange.java | 2 +-
 .../apache/hadoop/hbase/thrift2/hbase.thrift|   229 +-
 .../thrift2/TestThriftHBaseServiceHandler.java  |96 +
 48 files changed, 41553 insertions(+), 10303 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
index 68361c1..8ec3e32 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
  * An AlreadyExists exceptions signals that a table with the specified
  * name already exists
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2016-05-25")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class AlreadyExists extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("AlreadyExists");
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java

[3/7] hbase git commit: HBASE-21650 Add DDL operation and some other miscellaneous to thrift2

2018-12-27 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/7820ba1d/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
index e8f36a0..7388443 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/THBaseService.java
@@ -34,7 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-07-04")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-27")
 public class THBaseService {
 
   public interface Iface {
@@ -282,6 +282,56 @@ public class THBaseService {
  */
 public boolean checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer 
family, ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, 
TRowMutations rowMutations) throws TIOError, org.apache.thrift.TException;
 
+public TTableDescriptor getTableDescriptor(TTableName table) throws 
TIOError, org.apache.thrift.TException;
+
+public List getTableDescriptors(List tables) 
throws TIOError, org.apache.thrift.TException;
+
+public boolean tableExists(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public List getTableDescriptorsByPattern(String regex, 
boolean includeSysTables) throws TIOError, org.apache.thrift.TException;
+
+public List getTableDescriptorsByNamespace(String name) 
throws TIOError, org.apache.thrift.TException;
+
+public List getTableNamesByPattern(String regex, boolean 
includeSysTables) throws TIOError, org.apache.thrift.TException;
+
+public List getTableNamesByNamespace(String name) throws 
TIOError, org.apache.thrift.TException;
+
+public void createTable(TTableDescriptor desc, List splitKeys) 
throws TIOError, org.apache.thrift.TException;
+
+public void deleteTable(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public void truncateTable(TTableName tableName, boolean preserveSplits) 
throws TIOError, org.apache.thrift.TException;
+
+public void enableTable(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public void disableTable(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public boolean isTableEnabled(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public boolean isTableDisabled(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public boolean isTableAvailable(TTableName tableName) throws TIOError, 
org.apache.thrift.TException;
+
+public boolean isTableAvailableWithSplit(TTableName tableName, 
List splitKeys) throws TIOError, org.apache.thrift.TException;
+
+public void addColumnFamily(TTableName tableName, TColumnFamilyDescriptor 
column) throws TIOError, org.apache.thrift.TException;
+
+public void deleteColumnFamily(TTableName tableName, ByteBuffer column) 
throws TIOError, org.apache.thrift.TException;
+
+public void modifyColumnFamily(TTableName tableName, 
TColumnFamilyDescriptor column) throws TIOError, org.apache.thrift.TException;
+
+public void modifyTable(TTableDescriptor desc) throws TIOError, 
org.apache.thrift.TException;
+
+public void createNamespace(TNamespaceDescriptor namespaceDesc) throws 
TIOError, org.apache.thrift.TException;
+
+public void modifyNamespace(TNamespaceDescriptor namespaceDesc) throws 
TIOError, org.apache.thrift.TException;
+
+public void deleteNamespace(String name) throws TIOError, 
org.apache.thrift.TException;
+
+public TNamespaceDescriptor getNamespaceDescriptor(String name) throws 
TIOError, org.apache.thrift.TException;
+
+public List listNamespaceDescriptors() throws 
TIOError, org.apache.thrift.TException;
+
   }
 
   public interface AsyncIface {
@@ -326,6 +376,56 @@ public class THBaseService {
 
 public void checkAndMutate(ByteBuffer table, ByteBuffer row, ByteBuffer 
family, ByteBuffer qualifier, TCompareOp compareOp, ByteBuffer value, 
TRowMutations rowMutations, org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
 
+public void getTableDescriptor(TTableName table, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void getTableDescriptors(List tables, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void tableExists(TTableName tableName, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void 

hbase git commit: HBASE-21468 separate workers for meta table is not working

2018-11-13 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 c95832159 -> 0f295de15


HBASE-21468 separate workers for meta table is not working


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0f295de1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0f295de1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0f295de1

Branch: refs/heads/branch-2.1
Commit: 0f295de1565174fa5fd9d6074dc29cb1b41b47d7
Parents: c958321
Author: Allan Yang 
Authored: Wed Nov 14 11:43:41 2018 +0800
Committer: Allan Yang 
Committed: Wed Nov 14 11:43:41 2018 +0800

--
 .../hbase/procedure2/AbstractProcedureScheduler.java |  8 ++--
 .../hadoop/hbase/procedure2/ProcedureExecutor.java   | 11 ++-
 .../hadoop/hbase/procedure2/ProcedureTestingUtility.java |  2 +-
 .../hadoop/hbase/procedure2/TestYieldProcedures.java |  8 ++--
 .../hbase/master/assignment/TestAssignmentOnRSCrash.java |  1 +
 .../assignment/TestMergeTableRegionsProcedure.java   |  1 +
 .../hbase/master/assignment/TestRogueRSAssignment.java   |  1 +
 .../hadoop/hbase/master/locking/TestLockManager.java |  1 +
 .../hadoop/hbase/master/locking/TestLockProcedure.java   |  1 +
 .../master/procedure/TestCreateNamespaceProcedure.java   |  1 +
 .../master/procedure/TestDeleteNamespaceProcedure.java   |  1 +
 .../procedure/TestMasterFailoverWithProcedures.java  |  1 +
 .../master/procedure/TestMasterObserverPostCalls.java|  1 +
 .../master/procedure/TestMasterProcedureEvents.java  |  1 +
 .../master/procedure/TestModifyNamespaceProcedure.java   |  1 +
 .../hbase/master/procedure/TestProcedureAdmin.java   |  1 +
 .../master/procedure/TestSafemodeBringsDownMaster.java   |  1 +
 .../hbase/procedure/TestUrgentProcedureWorker.java   |  2 +-
 18 files changed, 29 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0f295de1/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index b2a2e5a..c579626 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -124,7 +124,7 @@ public abstract class AbstractProcedureScheduler implements 
ProcedureScheduler {
 try {
   enqueue(procedure, addFront);
   if (notify) {
-schedWaitCond.signal();
+schedWaitCond.signalAll();
   }
 } finally {
   schedUnlock();
@@ -311,10 +311,6 @@ public abstract class AbstractProcedureScheduler 
implements ProcedureScheduler {
 if (waitingCount <= 0) {
   return;
 }
-if (waitingCount == 1) {
-  schedWaitCond.signal();
-} else {
-  schedWaitCond.signalAll();
-}
+schedWaitCond.signalAll();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f295de1/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index a5b66a0..93c255f 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -211,7 +211,7 @@ public class ProcedureExecutor {
   /**
* Worker thread only for urgent tasks.
*/
-  private List urgentWorkerThreads;
+  private CopyOnWriteArrayList urgentWorkerThreads;
 
   /**
* Created in the {@link #init(int, boolean)} method. Terminated in {@link 
#join()} (FIX! Doing
@@ -564,7 +564,7 @@ public class ProcedureExecutor {
*  is found on replay. otherwise false.
*/
   public void init(int numThreads, boolean abortOnCorruption) throws 
IOException {
-init(numThreads, 1, abortOnCorruption);
+init(numThreads, 0, abortOnCorruption);
   }
 
   /**
@@ -595,7 +595,7 @@ public class ProcedureExecutor {
 // Create the workers
 workerId.set(0);
 workerThreads = new CopyOnWriteArrayList<>();
-urgentWorkerThreads = new ArrayList<>();
+urgentWorkerThreads = new CopyOnWriteArrayList<>();
 for (int i = 0; i < corePoolSize; ++i) {
   workerThreads.add(new WorkerThread(threadGroup));
 }
@@ -637,7 +637,7 @@ public class ProcedureExecutor {
   return;
 }
 // Start the 

hbase git commit: HBASE-21468 separate workers for meta table is not working

2018-11-13 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 5361ed961 -> 2fcf961e7


HBASE-21468 separate workers for meta table is not working


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2fcf961e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2fcf961e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2fcf961e

Branch: refs/heads/branch-2.0
Commit: 2fcf961e73305c9304a9925e9b770f47cd66d316
Parents: 5361ed9
Author: Allan Yang 
Authored: Wed Nov 14 11:41:06 2018 +0800
Committer: Allan Yang 
Committed: Wed Nov 14 11:41:06 2018 +0800

--
 .../hbase/procedure2/AbstractProcedureScheduler.java |  8 ++--
 .../hadoop/hbase/procedure2/ProcedureExecutor.java   | 11 ++-
 .../hadoop/hbase/procedure2/ProcedureTestingUtility.java |  2 +-
 .../hadoop/hbase/procedure2/TestYieldProcedures.java |  8 ++--
 .../hbase/master/assignment/TestAssignmentOnRSCrash.java |  1 +
 .../assignment/TestMergeTableRegionsProcedure.java   |  1 +
 .../hbase/master/assignment/TestRogueRSAssignment.java   |  1 +
 .../hadoop/hbase/master/locking/TestLockManager.java |  1 +
 .../hadoop/hbase/master/locking/TestLockProcedure.java   |  1 +
 .../master/procedure/TestCreateNamespaceProcedure.java   |  1 +
 .../master/procedure/TestDeleteNamespaceProcedure.java   |  1 +
 .../procedure/TestMasterFailoverWithProcedures.java  |  1 +
 .../master/procedure/TestMasterObserverPostCalls.java|  1 +
 .../master/procedure/TestMasterProcedureEvents.java  |  1 +
 .../master/procedure/TestModifyNamespaceProcedure.java   |  1 +
 .../hbase/master/procedure/TestProcedureAdmin.java   |  1 +
 .../master/procedure/TestSafemodeBringsDownMaster.java   |  1 +
 .../hbase/procedure/TestUrgentProcedureWorker.java   |  2 +-
 18 files changed, 29 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2fcf961e/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index b2a2e5a..c579626 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -124,7 +124,7 @@ public abstract class AbstractProcedureScheduler implements 
ProcedureScheduler {
 try {
   enqueue(procedure, addFront);
   if (notify) {
-schedWaitCond.signal();
+schedWaitCond.signalAll();
   }
 } finally {
   schedUnlock();
@@ -311,10 +311,6 @@ public abstract class AbstractProcedureScheduler 
implements ProcedureScheduler {
 if (waitingCount <= 0) {
   return;
 }
-if (waitingCount == 1) {
-  schedWaitCond.signal();
-} else {
-  schedWaitCond.signalAll();
-}
+schedWaitCond.signalAll();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2fcf961e/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index a70a9ef..d863b21 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -211,7 +211,7 @@ public class ProcedureExecutor {
   /**
* Worker thread only for urgent tasks.
*/
-  private List urgentWorkerThreads;
+  private CopyOnWriteArrayList urgentWorkerThreads;
 
   /**
* Created in the {@link #init(int, boolean)} method. Terminated in {@link 
#join()} (FIX! Doing
@@ -564,7 +564,7 @@ public class ProcedureExecutor {
*  is found on replay. otherwise false.
*/
   public void init(int numThreads, boolean abortOnCorruption) throws 
IOException {
-init(numThreads, 1, abortOnCorruption);
+init(numThreads, 0, abortOnCorruption);
   }
 
   /**
@@ -595,7 +595,7 @@ public class ProcedureExecutor {
 // Create the workers
 workerId.set(0);
 workerThreads = new CopyOnWriteArrayList<>();
-urgentWorkerThreads = new ArrayList<>();
+urgentWorkerThreads = new CopyOnWriteArrayList<>();
 for (int i = 0; i < corePoolSize; ++i) {
   workerThreads.add(new WorkerThread(threadGroup));
 }
@@ -637,7 +637,7 @@ public class ProcedureExecutor {
   return;
 }
 // Start the 

hbase git commit: HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is WAITING_TIMEOUT

2018-11-09 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master fe2265fa4 -> ccabf7310


HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is 
WAITING_TIMEOUT

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ccabf731
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ccabf731
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ccabf731

Branch: refs/heads/master
Commit: ccabf7310d3fdab3b4a24ea60b391995367256a6
Parents: fe2265f
Author: jingyuntian 
Authored: Fri Nov 9 23:03:19 2018 +0800
Committer: Allan Yang 
Committed: Fri Nov 9 23:03:19 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java | 22 
 .../hbase/procedure2/TimeoutExecutorThread.java |  2 +-
 .../hbase/procedure2/TestProcedureBypass.java   | 36 
 3 files changed, 52 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ccabf731/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index d02ca6e..c18ca32 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -967,17 +967,25 @@ public class ProcedureExecutor {
 store.update(procedure);
   }
 
-  // If we don't have the lock, we can't re-submit the queue,
-  // since it is already executing. To get rid of the stuck situation, we
-  // need to restart the master. With the procedure set to bypass, the 
procedureExecutor
-  // will bypass it and won't get stuck again.
-  if (lockEntry != null) {
-// add the procedure to run queue,
+  // If state of procedure is WAITING_TIMEOUT, we can directly submit it 
to the scheduler.
+  // Instead we should remove it from timeout Executor queue and tranfer 
its state to RUNNABLE
+  if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
+LOG.debug("transform procedure {} from WAITING_TIMEOUT to RUNNABLE", 
procedure);
+if (timeoutExecutor.remove(procedure)) {
+  LOG.debug("removed procedure {} from timeoutExecutor", procedure);
+  timeoutExecutor.executeTimedoutProcedure(procedure);
+}
+  } else if (lockEntry != null) {
 scheduler.addFront(procedure);
 LOG.debug("Bypassing {} and its ancestors successfully, adding to 
queue", procedure);
   } else {
+// If we don't have the lock, we can't re-submit the queue,
+// since it is already executing. To get rid of the stuck situation, we
+// need to restart the master. With the procedure set to bypass, the 
procedureExecutor
+// will bypass it and won't get stuck again.
 LOG.debug("Bypassing {} and its ancestors successfully, but since it 
is already running, "
-+ "skipping add to queue", procedure);
++ "skipping add to queue",
+  procedure);
   }
   return true;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ccabf731/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
index 9e050a2..4416177 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
@@ -126,7 +126,7 @@ class TimeoutExecutorThread extends 
StoppableThread {
 }
   }
 
-  private void executeTimedoutProcedure(Procedure proc) {
+  protected void executeTimedoutProcedure(Procedure proc) {
 // The procedure received a timeout. if the procedure itself does not 
handle it,
 // call abort() and add the procedure back in the queue for rollback.
 if (proc.setTimeoutFailure(executor.getEnvironment())) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ccabf731/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
index 

hbase git commit: HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is WAITING_TIMEOUT

2018-11-09 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 0875fa063 -> c6090d4f0


HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is 
WAITING_TIMEOUT

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c6090d4f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c6090d4f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c6090d4f

Branch: refs/heads/branch-2.1
Commit: c6090d4f048c078c7f56a08db0e9f7e90225969e
Parents: 0875fa0
Author: jingyuntian 
Authored: Fri Nov 9 22:52:14 2018 +0800
Committer: Allan Yang 
Committed: Fri Nov 9 22:52:14 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java | 19 ++
 .../hbase/procedure2/TimeoutExecutorThread.java |  2 +-
 .../hbase/procedure2/TestProcedureBypass.java   | 38 +++-
 3 files changed, 51 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c6090d4f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index c4fffa8..a5b66a0 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1033,15 +1033,22 @@ public class ProcedureExecutor {
 store.update(procedure);
   }
 
-  // If we don't have the lock, we can't re-submit the queue,
-  // since it is already executing. To get rid of the stuck situation, we
-  // need to restart the master. With the procedure set to bypass, the 
procedureExecutor
-  // will bypass it and won't get stuck again.
-  if (lockEntry != null) {
-// add the procedure to run queue,
+  // If state of procedure is WAITING_TIMEOUT, we can directly submit it 
to the scheduler.
+  // Instead we should remove it from timeout Executor queue and tranfer 
its state to RUNNABLE
+  if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
+LOG.debug("transform procedure {} from WAITING_TIMEOUT to RUNNABLE", 
procedure);
+if (timeoutExecutor.remove(procedure)) {
+  LOG.debug("removed procedure {} from timeoutExecutor", procedure);
+  timeoutExecutor.executeTimedoutProcedure(procedure);
+}
+  } else if (lockEntry != null) {
 scheduler.addFront(procedure);
 LOG.info("Bypassing {} and its ancestors successfully, adding to 
queue", procedure);
   } else {
+// If we don't have the lock, we can't re-submit the queue,
+// since it is already executing. To get rid of the stuck situation, we
+// need to restart the master. With the procedure set to bypass, the 
procedureExecutor
+// will bypass it and won't get stuck again.
 LOG.info("Bypassing {} and its ancestors successfully, but since it is 
already running, "
 + "skipping add to queue", procedure);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c6090d4f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
index 9e050a2..4416177 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
@@ -126,7 +126,7 @@ class TimeoutExecutorThread extends 
StoppableThread {
 }
   }
 
-  private void executeTimedoutProcedure(Procedure proc) {
+  protected void executeTimedoutProcedure(Procedure proc) {
 // The procedure received a timeout. if the procedure itself does not 
handle it,
 // call abort() and add the procedure back in the queue for rollback.
 if (proc.setTimeoutFailure(executor.getEnvironment())) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c6090d4f/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
index fa40631..de7a0a1 100644
--- 

hbase git commit: HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is WAITING_TIMEOUT

2018-11-09 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 6584a76d3 -> e8404c7c2


HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is 
WAITING_TIMEOUT

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e8404c7c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e8404c7c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e8404c7c

Branch: refs/heads/branch-2.0
Commit: e8404c7c21b5ceaddd1359037be1303171c97ae9
Parents: 6584a76
Author: jingyuntian 
Authored: Fri Nov 9 22:45:43 2018 +0800
Committer: Allan Yang 
Committed: Fri Nov 9 22:45:43 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java | 19 ++
 .../hbase/procedure2/TimeoutExecutorThread.java |  2 +-
 .../hbase/procedure2/TestProcedureBypass.java   | 38 +++-
 3 files changed, 51 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e8404c7c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 3bd5e0f..a70a9ef 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1033,15 +1033,22 @@ public class ProcedureExecutor {
 store.update(procedure);
   }
 
-  // If we don't have the lock, we can't re-submit the queue,
-  // since it is already executing. To get rid of the stuck situation, we
-  // need to restart the master. With the procedure set to bypass, the 
procedureExecutor
-  // will bypass it and won't get stuck again.
-  if (lockEntry != null) {
-// add the procedure to run queue,
+  // If state of procedure is WAITING_TIMEOUT, we can directly submit it 
to the scheduler.
+  // Instead we should remove it from timeout Executor queue and tranfer 
its state to RUNNABLE
+  if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
+LOG.debug("transform procedure {} from WAITING_TIMEOUT to RUNNABLE", 
procedure);
+if (timeoutExecutor.remove(procedure)) {
+  LOG.debug("removed procedure {} from timeoutExecutor", procedure);
+  timeoutExecutor.executeTimedoutProcedure(procedure);
+}
+  } else if (lockEntry != null) {
 scheduler.addFront(procedure);
 LOG.info("Bypassing {} and its ancestors successfully, adding to 
queue", procedure);
   } else {
+// If we don't have the lock, we can't re-submit the queue,
+// since it is already executing. To get rid of the stuck situation, we
+// need to restart the master. With the procedure set to bypass, the 
procedureExecutor
+// will bypass it and won't get stuck again.
 LOG.info("Bypassing {} and its ancestors successfully, but since it is 
already running, "
 + "skipping add to queue", procedure);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e8404c7c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
index 9e050a2..4416177 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
@@ -126,7 +126,7 @@ class TimeoutExecutorThread extends 
StoppableThread {
 }
   }
 
-  private void executeTimedoutProcedure(Procedure proc) {
+  protected void executeTimedoutProcedure(Procedure proc) {
 // The procedure received a timeout. if the procedure itself does not 
handle it,
 // call abort() and add the procedure back in the queue for rollback.
 if (proc.setTimeoutFailure(executor.getEnvironment())) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e8404c7c/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
index fa40631..de7a0a1 100644
--- 

hbase git commit: HBASE-21421 Do not kill RS if reportOnlineRegions fails

2018-11-05 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 82ce14960 -> a31458bde


HBASE-21421 Do not kill RS if reportOnlineRegions fails


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a31458bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a31458bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a31458bd

Branch: refs/heads/master
Commit: a31458bdee4353d99682cb4e108e265ec45f8944
Parents: 82ce149
Author: Allan Yang 
Authored: Tue Nov 6 14:58:35 2018 +0800
Committer: Allan Yang 
Committed: Tue Nov 6 14:58:35 2018 +0800

--
 .../hadoop/hbase/master/assignment/AssignmentManager.java | 10 +-
 .../hbase/master/assignment/TestRogueRSAssignment.java|  5 +
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a31458bd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 08536b7..765ab6b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -1053,8 +1053,7 @@ public class AssignmentManager implements ServerListener {
 }
   }
 
-  void checkOnlineRegionsReport(final ServerStateNode serverNode, final 
Set regionNames)
-  throws YouAreDeadException {
+  void checkOnlineRegionsReport(final ServerStateNode serverNode, final 
Set regionNames) {
 final ServerName serverName = serverNode.getServerName();
 try {
   for (byte[] regionName: regionNames) {
@@ -1097,9 +1096,10 @@ public class AssignmentManager implements ServerListener 
{
 }
   }
 } catch (IOException e) {
-  LOG.warn("Killing " + serverName + ": " + e.getMessage());
-  killRegionServer(serverNode);
-  throw (YouAreDeadException)new 
YouAreDeadException(e.getMessage()).initCause(e);
+  //See HBASE-21421, we can count on reportRegionStateTransition calls
+  //We only log a warming here. It could be a network lag.
+  LOG.warn("Failed to checkOnlineRegionsReport, maybe due to network lag, "
+  + "if this message continues, be careful of double assign", e);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a31458bd/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
index afcf446..c88e583 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
@@ -45,6 +45,7 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -137,7 +138,11 @@ public class TestRogueRSAssignment {
 admin.setBalancerRunning(true, false);
   }
 
+  /**
+   * Ignore this test, see HBASE-21421
+   */
   @Test
+  @Ignore
   public void testReportRSWithWrongRegion() throws Exception {
 final TableName tableName = TableName.valueOf(this.name.getMethodName());
 



hbase git commit: HBASE-21421 Do not kill RS if reportOnlineRegions fails

2018-11-05 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 bcd98513d -> d544b7ade


HBASE-21421 Do not kill RS if reportOnlineRegions fails


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d544b7ad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d544b7ad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d544b7ad

Branch: refs/heads/branch-2
Commit: d544b7adea9a158232b220503269eb9628067bb9
Parents: bcd9851
Author: Allan Yang 
Authored: Tue Nov 6 14:56:33 2018 +0800
Committer: Allan Yang 
Committed: Tue Nov 6 14:56:33 2018 +0800

--
 .../hadoop/hbase/master/assignment/AssignmentManager.java | 10 +-
 .../hbase/master/assignment/TestRogueRSAssignment.java|  5 +
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d544b7ad/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 08536b7..765ab6b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -1053,8 +1053,7 @@ public class AssignmentManager implements ServerListener {
 }
   }
 
-  void checkOnlineRegionsReport(final ServerStateNode serverNode, final 
Set regionNames)
-  throws YouAreDeadException {
+  void checkOnlineRegionsReport(final ServerStateNode serverNode, final 
Set regionNames) {
 final ServerName serverName = serverNode.getServerName();
 try {
   for (byte[] regionName: regionNames) {
@@ -1097,9 +1096,10 @@ public class AssignmentManager implements ServerListener 
{
 }
   }
 } catch (IOException e) {
-  LOG.warn("Killing " + serverName + ": " + e.getMessage());
-  killRegionServer(serverNode);
-  throw (YouAreDeadException)new 
YouAreDeadException(e.getMessage()).initCause(e);
+  //See HBASE-21421, we can count on reportRegionStateTransition calls
+  //We only log a warming here. It could be a network lag.
+  LOG.warn("Failed to checkOnlineRegionsReport, maybe due to network lag, "
+  + "if this message continues, be careful of double assign", e);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d544b7ad/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
index afcf446..c88e583 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
@@ -45,6 +45,7 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -137,7 +138,11 @@ public class TestRogueRSAssignment {
 admin.setBalancerRunning(true, false);
   }
 
+  /**
+   * Ignore this test, see HBASE-21421
+   */
   @Test
+  @Ignore
   public void testReportRSWithWrongRegion() throws Exception {
 final TableName tableName = TableName.valueOf(this.name.getMethodName());
 



hbase git commit: HBASE-21421 Do not kill RS if reportOnlineRegions fails

2018-11-05 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 d19e6dff2 -> 5b8c76737


HBASE-21421 Do not kill RS if reportOnlineRegions fails


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5b8c7673
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5b8c7673
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5b8c7673

Branch: refs/heads/branch-2.1
Commit: 5b8c7673743eae7f143b23e683cf03d123caeb47
Parents: d19e6df
Author: Allan Yang 
Authored: Tue Nov 6 14:51:54 2018 +0800
Committer: Allan Yang 
Committed: Tue Nov 6 14:51:54 2018 +0800

--
 .../hadoop/hbase/master/assignment/AssignmentManager.java | 10 +-
 .../hbase/master/assignment/TestRogueRSAssignment.java|  5 +
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5b8c7673/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 4492342..2b3244d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -1007,8 +1007,7 @@ public class AssignmentManager implements ServerListener {
 }
   }
 
-  void checkOnlineRegionsReport(final ServerStateNode serverNode, final 
Set regionNames)
-  throws YouAreDeadException {
+  void checkOnlineRegionsReport(final ServerStateNode serverNode, final 
Set regionNames) {
 final ServerName serverName = serverNode.getServerName();
 try {
   for (byte[] regionName: regionNames) {
@@ -1047,9 +1046,10 @@ public class AssignmentManager implements ServerListener 
{
 }
   }
 } catch (UnexpectedStateException e) {
-  LOG.warn("Killing " + serverName + ": " + e.getMessage());
-  killRegionServer(serverName);
-  throw (YouAreDeadException)new 
YouAreDeadException(e.getMessage()).initCause(e);
+  //See HBASE-21421, we can count on reportRegionStateTransition calls
+  //We only log a warming here. It could be a network lag.
+  LOG.warn("Failed to checkOnlineRegionsReport, maybe due to network lag, "
+  + "if this message continues, be careful of double assign", e);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5b8c7673/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
index afcf446..c88e583 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
@@ -45,6 +45,7 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -137,7 +138,11 @@ public class TestRogueRSAssignment {
 admin.setBalancerRunning(true, false);
   }
 
+  /**
+   * Ignore this test, see HBASE-21421
+   */
   @Test
+  @Ignore
   public void testReportRSWithWrongRegion() throws Exception {
 final TableName tableName = TableName.valueOf(this.name.getMethodName());
 



hbase git commit: HBASE-21421 Do not kill RS if reportOnlineRegions fails

2018-11-05 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 277a67a69 -> d70308160


HBASE-21421 Do not kill RS if reportOnlineRegions fails


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d7030816
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d7030816
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d7030816

Branch: refs/heads/branch-2.0
Commit: d703081600016441dcb61c748038a957d30816e6
Parents: 277a67a
Author: Allan Yang 
Authored: Tue Nov 6 14:47:42 2018 +0800
Committer: Allan Yang 
Committed: Tue Nov 6 14:47:42 2018 +0800

--
 .../hadoop/hbase/master/assignment/AssignmentManager.java | 10 +-
 .../hbase/master/assignment/TestRogueRSAssignment.java|  5 +
 2 files changed, 10 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d7030816/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index de8281b..04a25c7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -990,8 +990,7 @@ public class AssignmentManager implements ServerListener {
 }
   }
 
-  void checkOnlineRegionsReport(final ServerStateNode serverNode, final 
Set regionNames)
-  throws YouAreDeadException {
+  void checkOnlineRegionsReport(final ServerStateNode serverNode, final 
Set regionNames) {
 final ServerName serverName = serverNode.getServerName();
 try {
   for (byte[] regionName: regionNames) {
@@ -1030,9 +1029,10 @@ public class AssignmentManager implements ServerListener 
{
 }
   }
 } catch (UnexpectedStateException e) {
-  LOG.warn("Killing " + serverName + ": " + e.getMessage());
-  killRegionServer(serverName);
-  throw (YouAreDeadException)new 
YouAreDeadException(e.getMessage()).initCause(e);
+  //See HBASE-21421, we can count on reportRegionStateTransition calls
+  //We only log a warming here. It could be a network lag.
+  LOG.warn("Failed to checkOnlineRegionsReport, maybe due to network lag, "
+  + "if this message continues, be careful of double assign", e);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d7030816/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
index afcf446..c88e583 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRogueRSAssignment.java
@@ -45,6 +45,7 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -137,7 +138,11 @@ public class TestRogueRSAssignment {
 admin.setBalancerRunning(true, false);
   }
 
+  /**
+   * Ignore this test, see HBASE-21421
+   */
   @Test
+  @Ignore
   public void testReportRSWithWrongRegion() throws Exception {
 final TableName tableName = TableName.valueOf(this.name.getMethodName());
 



hbase git commit: HBASE-21423 Procedures for meta table/region should be able to execute in separate workers(addendum)

2018-11-05 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 6b8cfd276 -> d4233f207


HBASE-21423 Procedures for meta table/region should be able to execute in 
separate workers(addendum)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4233f20
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4233f20
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4233f20

Branch: refs/heads/branch-2.0
Commit: d4233f207d0a36f6ab5327baac3a5a7a8ca06a2d
Parents: 6b8cfd2
Author: Allan Yang 
Authored: Mon Nov 5 20:39:11 2018 +0800
Committer: Allan Yang 
Committed: Mon Nov 5 20:39:11 2018 +0800

--
 .../org/apache/hadoop/hbase/procedure2/TestChildProcedures.java   | 3 ++-
 .../org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java   | 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4233f20/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java
index b837e82..9a96e88 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java
+++ 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java
@@ -69,7 +69,8 @@ public class TestChildProcedures {
 procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), procEnv, 
procStore);
 procExecutor.testing = new ProcedureExecutor.Testing();
 procStore.start(PROCEDURE_EXECUTOR_SLOTS);
-ProcedureTestingUtility.initAndStartWorkers(procExecutor, 
PROCEDURE_EXECUTOR_SLOTS, 0, false, true);
+ProcedureTestingUtility.initAndStartWorkers(procExecutor, 
PROCEDURE_EXECUTOR_SLOTS, 0, false,
+true);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hbase/blob/d4233f20/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java
index 8a2e296..1f840cd 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java
+++ 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java
@@ -74,7 +74,8 @@ public class TestYieldProcedures {
 procExecutor =
   new ProcedureExecutor<>(htu.getConfiguration(), new TestProcEnv(), 
procStore, procRunnables);
 procStore.start(PROCEDURE_EXECUTOR_SLOTS);
-ProcedureTestingUtility.initAndStartWorkers(procExecutor, 
PROCEDURE_EXECUTOR_SLOTS, 0, false, true);
+ProcedureTestingUtility.initAndStartWorkers(procExecutor, 
PROCEDURE_EXECUTOR_SLOTS, 0, false,
+true);
   }
 
   @After



hbase git commit: HBASE-21423 Procedures for meta table/region should be able to execute in separate workers

2018-11-05 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 9e52e9eb7 -> 0b7c66642


HBASE-21423 Procedures for meta table/region should be able to execute in 
separate workers


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b7c6664
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b7c6664
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b7c6664

Branch: refs/heads/branch-2.1
Commit: 0b7c66642b614f9e2eba6c2c47a9a79c760b2761
Parents: 9e52e9e
Author: Allan Yang 
Authored: Mon Nov 5 20:37:15 2018 +0800
Committer: Allan Yang 
Committed: Mon Nov 5 20:37:15 2018 +0800

--
 .../procedure2/AbstractProcedureScheduler.java  |  29 ++-
 .../hbase/procedure2/ProcedureExecutor.java |  64 ++-
 .../hbase/procedure2/ProcedureScheduler.java|  17 ++
 .../procedure2/SimpleProcedureScheduler.java|   2 +-
 .../procedure2/ProcedureTestingUtility.java |  10 +-
 .../hbase/procedure2/TestChildProcedures.java   |   3 +-
 .../hbase/procedure2/TestProcedureExecutor.java |   2 +-
 .../procedure2/TestProcedureSuspended.java  |   3 +-
 .../hbase/procedure2/TestYieldProcedures.java   |   5 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   5 +-
 .../procedure/MasterProcedureConstants.java |   7 +
 .../procedure/MasterProcedureScheduler.java |   8 +-
 .../TestSplitTableRegionProcedure.java  |   2 +
 .../master/procedure/TestProcedurePriority.java |   1 +
 .../procedure/TestServerCrashProcedure.java |   2 +
 .../procedure/TestTableDDLProcedureBase.java|   2 +
 .../procedure/TestUrgentProcedureWorker.java| 188 +++
 17 files changed, 329 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0b7c6664/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index 7ab1329..b2a2e5a 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -139,20 +139,39 @@ public abstract class AbstractProcedureScheduler 
implements ProcedureScheduler {
* NOTE: this method is called with the sched lock held.
* @return the Procedure to execute, or null if nothing is available.
*/
-  protected abstract Procedure dequeue();
+  protected Procedure dequeue() {
+return dequeue(false);
+  }
+
+  protected abstract Procedure dequeue(boolean onlyUrgent);
+
+
+  @Override
+  public Procedure poll(boolean onlyUrgent) {
+return poll(onlyUrgent, -1);
+  }
 
   @Override
   public Procedure poll() {
-return poll(-1);
+return poll(false, -1);
+  }
+
+  @Override
+  public Procedure poll(boolean onlyUrgent, long timeout, TimeUnit unit) {
+return poll(onlyUrgent, unit.toNanos(timeout));
   }
 
   @Override
   public Procedure poll(long timeout, TimeUnit unit) {
-return poll(unit.toNanos(timeout));
+return poll(false, unit.toNanos(timeout));
   }
 
-  @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP")
   public Procedure poll(final long nanos) {
+return poll(false, nanos);
+  }
+
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP")
+  public Procedure poll(final boolean onlyUrgent, final long nanos) {
 schedLock();
 try {
   if (!running) {
@@ -173,7 +192,7 @@ public abstract class AbstractProcedureScheduler implements 
ProcedureScheduler {
   return null;
 }
   }
-  final Procedure pollResult = dequeue();
+  final Procedure pollResult = dequeue(onlyUrgent);
 
   pollCalls++;
   nullPollCalls += (pollResult == null) ? 1 : 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b7c6664/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 45db2f3..c4fffa8 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -209,6 +209,11 @@ public class ProcedureExecutor {
   private CopyOnWriteArrayList workerThreads;
 
   /**
+   * Worker thread only for urgent tasks.
+   */
+  private List urgentWorkerThreads;
+
+  

hbase git commit: HBASE-21423 Procedures for meta table/region should be able to execute in separate workers

2018-11-05 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 5834a4f90 -> 6b8cfd276


HBASE-21423 Procedures for meta table/region should be able to execute in 
separate workers


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6b8cfd27
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6b8cfd27
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6b8cfd27

Branch: refs/heads/branch-2.0
Commit: 6b8cfd276fc1e33c691f93575ed9ff2df06c08e2
Parents: 5834a4f
Author: Allan Yang 
Authored: Mon Nov 5 20:23:19 2018 +0800
Committer: Allan Yang 
Committed: Mon Nov 5 20:23:19 2018 +0800

--
 .../procedure2/AbstractProcedureScheduler.java  |  29 ++-
 .../hbase/procedure2/ProcedureExecutor.java |  64 ++-
 .../hbase/procedure2/ProcedureScheduler.java|  17 ++
 .../procedure2/SimpleProcedureScheduler.java|   2 +-
 .../procedure2/ProcedureTestingUtility.java |  10 +-
 .../hbase/procedure2/TestChildProcedures.java   |   2 +-
 .../hbase/procedure2/TestProcedureExecutor.java |   2 +-
 .../procedure2/TestProcedureSuspended.java  |   3 +-
 .../hbase/procedure2/TestYieldProcedures.java   |   4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   5 +-
 .../procedure/MasterProcedureConstants.java |   7 +
 .../procedure/MasterProcedureScheduler.java |   8 +-
 .../TestSplitTableRegionProcedure.java  |   2 +
 .../master/procedure/TestProcedurePriority.java |   1 +
 .../procedure/TestServerCrashProcedure.java |   2 +
 .../procedure/TestTableDDLProcedureBase.java|   2 +
 .../procedure/TestUrgentProcedureWorker.java| 188 +++
 17 files changed, 327 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6b8cfd27/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index 7ab1329..b2a2e5a 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -139,20 +139,39 @@ public abstract class AbstractProcedureScheduler 
implements ProcedureScheduler {
* NOTE: this method is called with the sched lock held.
* @return the Procedure to execute, or null if nothing is available.
*/
-  protected abstract Procedure dequeue();
+  protected Procedure dequeue() {
+return dequeue(false);
+  }
+
+  protected abstract Procedure dequeue(boolean onlyUrgent);
+
+
+  @Override
+  public Procedure poll(boolean onlyUrgent) {
+return poll(onlyUrgent, -1);
+  }
 
   @Override
   public Procedure poll() {
-return poll(-1);
+return poll(false, -1);
+  }
+
+  @Override
+  public Procedure poll(boolean onlyUrgent, long timeout, TimeUnit unit) {
+return poll(onlyUrgent, unit.toNanos(timeout));
   }
 
   @Override
   public Procedure poll(long timeout, TimeUnit unit) {
-return poll(unit.toNanos(timeout));
+return poll(false, unit.toNanos(timeout));
   }
 
-  @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP")
   public Procedure poll(final long nanos) {
+return poll(false, nanos);
+  }
+
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP")
+  public Procedure poll(final boolean onlyUrgent, final long nanos) {
 schedLock();
 try {
   if (!running) {
@@ -173,7 +192,7 @@ public abstract class AbstractProcedureScheduler implements 
ProcedureScheduler {
   return null;
 }
   }
-  final Procedure pollResult = dequeue();
+  final Procedure pollResult = dequeue(onlyUrgent);
 
   pollCalls++;
   nullPollCalls += (pollResult == null) ? 1 : 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6b8cfd27/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index b1f3de3..3bd5e0f 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -209,6 +209,11 @@ public class ProcedureExecutor {
   private CopyOnWriteArrayList workerThreads;
 
   /**
+   * Worker thread only for urgent tasks.
+   */
+  private List urgentWorkerThreads;
+
+  

hbase git commit: HBASE-21395 Abort split/merge procedure if there is a table procedure of the same table going on

2018-11-05 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 da630c25e -> 5834a4f90


HBASE-21395 Abort split/merge procedure if there is a table procedure of the 
same table going on


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5834a4f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5834a4f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5834a4f9

Branch: refs/heads/branch-2.0
Commit: 5834a4f90917154598afed6acafdb6e973c29052
Parents: da630c2
Author: Allan Yang 
Authored: Mon Nov 5 20:08:42 2018 +0800
Committer: Allan Yang 
Committed: Mon Nov 5 20:08:42 2018 +0800

--
 .../assignment/MergeTableRegionsProcedure.java  | 20 ++
 .../assignment/SplitTableRegionProcedure.java   | 22 
 .../hbase/master/procedure/TableQueue.java  |  4 ++--
 3 files changed, 44 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5834a4f9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 20ae444..c4ac8ed 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+import java.util.stream.Collectors;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -51,6 +52,7 @@ import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
 import 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
+import org.apache.hadoop.hbase.master.procedure.TableQueue;
 import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.quotas.QuotaExceededException;
@@ -527,6 +529,24 @@ public class MergeTableRegionsProcedure
   new IOException("Merge of " + regionsStr + " failed because merge 
switch is off"));
   return false;
 }
+// See HBASE-21395, for 2.0.x and 2.1.x only.
+// A safe fence here, if there is a table procedure going on, abort the 
merge.
+// There some cases that may lead to table procedure roll back (more 
serious
+// than roll back the merge procedure here), or the merged regions was 
brought online
+// by the table procedure because of the race between merge procedure and 
table procedure
+List tableProcedures = env
+.getMasterServices().getProcedures().stream()
+.filter(p -> p instanceof AbstractStateMachineTableProcedure)
+.map(p -> (AbstractStateMachineTableProcedure) p)
+.filter(p -> p.getProcId() != this.getProcId() && p.getTableName()
+.equals(regionsToMerge[0].getTable()) && !p.isFinished()
+&& TableQueue.requireTableExclusiveLock(p))
+.collect(Collectors.toList());
+if (tableProcedures != null && tableProcedures.size() > 0) {
+  throw new MergeRegionException(tableProcedures.get(0).toString()
+  + " is going on against the same table, abort the merge of " + this
+  .toString());
+}
 
 // Ask the remote regionserver if regions are mergeable. If we get an IOE, 
report it
 // along with the failure, so we can see why regions are not mergeable at 
this time.

http://git-wip-us.apache.org/repos/asf/hbase/blob/5834a4f9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 411077f..e34415b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -32,6 +32,8 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+
 import 

hbase git commit: HBASE-21384 Procedure with holdlock=false should not be restored lock when restarts

2018-10-25 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 614612a9d -> 66469733e


HBASE-21384 Procedure with holdlock=false should not be restored lock when 
restarts


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/66469733
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/66469733
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/66469733

Branch: refs/heads/master
Commit: 66469733ec9bffb236c143b858e5748182ad71b3
Parents: 614612a
Author: Allan Yang 
Authored: Thu Oct 25 14:23:36 2018 +0800
Committer: Allan Yang 
Committed: Thu Oct 25 14:23:36 2018 +0800

--
 .../apache/hadoop/hbase/procedure2/LockAndQueue.java |  3 ++-
 .../apache/hadoop/hbase/procedure2/Procedure.java|  8 +++-
 .../hadoop/hbase/procedure2/ProcedureExecutor.java   | 15 ++-
 3 files changed, 23 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/66469733/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
index ae8daa2..4365a2c 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
@@ -73,7 +73,8 @@ public class LockAndQueue implements LockStatus {
 
   @Override
   public boolean hasParentLock(Procedure proc) {
-// TODO: need to check all the ancestors
+// TODO: need to check all the ancestors. need to passed in the procedures
+// to find the ancestors.
 return proc.hasParent() &&
   (isLockOwner(proc.getParentProcId()) || 
isLockOwner(proc.getRootProcId()));
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/66469733/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 81073e1..a85ccb1 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -987,7 +987,13 @@ public abstract class Procedure implements 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/66469733/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index bd75827..01d2b2b 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -653,7 +653,20 @@ public class ProcedureExecutor {
 sendProcedureLoadedNotification(p.getProcId());
   }
   // If the procedure holds the lock, put the procedure in front
-  if (p.isLockedWhenLoading()) {
+  // If its parent holds the lock, put the procedure in front
+  // TODO. Is that possible that its ancestor holds the lock?
+  // For now, the deepest procedure hierarchy is:
+  // ModifyTableProcedure -> ReopenTableProcedure ->
+  // MoveTableProcedure -> Unassign/AssignProcedure
+  // But ModifyTableProcedure and ReopenTableProcedure won't hold the lock
+  // So, check parent lock is enough(a tricky case is resovled by 
HBASE-21384).
+  // If some one change or add new procedures making 'grandpa' procedure
+  // holds the lock, but parent procedure don't hold the lock, there will
+  // be a problem here. We have to check one procedure's ancestors.
+  // And we need to change LockAndQueue.hasParentLock(Procedure proc) 
method
+  // to check all ancestors too.
+  if (p.isLockedWhenLoading() || (p.hasParent() && procedures
+  .get(p.getParentProcId()).isLockedWhenLoading())) {
 scheduler.addFront(p, false);
   } else {
 // if it was not, it can wait.



hbase git commit: HBASE-21384 Procedure with holdlock=false should not be restored lock when restarts

2018-10-25 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 141d4e8b0 -> 11c9165cd


HBASE-21384 Procedure with holdlock=false should not be restored lock when 
restarts


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/11c9165c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/11c9165c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/11c9165c

Branch: refs/heads/branch-2
Commit: 11c9165cd72ee0405dabf217554239ef8f04f865
Parents: 141d4e8
Author: Allan Yang 
Authored: Thu Oct 25 14:15:25 2018 +0800
Committer: Allan Yang 
Committed: Thu Oct 25 14:15:25 2018 +0800

--
 .../apache/hadoop/hbase/procedure2/LockAndQueue.java |  3 ++-
 .../apache/hadoop/hbase/procedure2/Procedure.java|  8 +++-
 .../hadoop/hbase/procedure2/ProcedureExecutor.java   | 15 ++-
 3 files changed, 23 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/11c9165c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
index ae8daa2..4365a2c 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
@@ -73,7 +73,8 @@ public class LockAndQueue implements LockStatus {
 
   @Override
   public boolean hasParentLock(Procedure proc) {
-// TODO: need to check all the ancestors
+// TODO: need to check all the ancestors. need to passed in the procedures
+// to find the ancestors.
 return proc.hasParent() &&
   (isLockOwner(proc.getParentProcId()) || 
isLockOwner(proc.getRootProcId()));
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/11c9165c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index cf81b8a..4ac7d26 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -990,7 +990,13 @@ public abstract class Procedure implements 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/11c9165c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 916c4af..7bfcba4 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -653,7 +653,20 @@ public class ProcedureExecutor {
 sendProcedureLoadedNotification(p.getProcId());
   }
   // If the procedure holds the lock, put the procedure in front
-  if (p.isLockedWhenLoading()) {
+  // If its parent holds the lock, put the procedure in front
+  // TODO. Is that possible that its ancestor holds the lock?
+  // For now, the deepest procedure hierarchy is:
+  // ModifyTableProcedure -> ReopenTableProcedure ->
+  // MoveTableProcedure -> Unassign/AssignProcedure
+  // But ModifyTableProcedure and ReopenTableProcedure won't hold the lock
+  // So, check parent lock is enough(a tricky case is resovled by 
HBASE-21384).
+  // If some one change or add new procedures making 'grandpa' procedure
+  // holds the lock, but parent procedure don't hold the lock, there will
+  // be a problem here. We have to check one procedure's ancestors.
+  // And we need to change LockAndQueue.hasParentLock(Procedure proc) 
method
+  // to check all ancestors too.
+  if (p.isLockedWhenLoading() || (p.hasParent() && procedures
+  .get(p.getParentProcId()).isLockedWhenLoading())) {
 scheduler.addFront(p, false);
   } else {
 // if it was not, it can wait.



hbase git commit: HBASE-21384 Procedure with holdlock=false should not be restored lock when restarts

2018-10-24 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 5309c3389 -> e71c05707


HBASE-21384 Procedure with holdlock=false should not be restored lock when 
restarts


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e71c0570
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e71c0570
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e71c0570

Branch: refs/heads/branch-2.1
Commit: e71c05707e0717a1f1487b648e830fe7fd90d2eb
Parents: 5309c33
Author: Allan Yang 
Authored: Thu Oct 25 13:58:50 2018 +0800
Committer: Allan Yang 
Committed: Thu Oct 25 13:58:50 2018 +0800

--
 .../apache/hadoop/hbase/procedure2/LockAndQueue.java |  3 ++-
 .../apache/hadoop/hbase/procedure2/Procedure.java|  8 +++-
 .../hadoop/hbase/procedure2/ProcedureExecutor.java   | 15 ++-
 3 files changed, 23 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e71c0570/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
index ae8daa2..4365a2c 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
@@ -73,7 +73,8 @@ public class LockAndQueue implements LockStatus {
 
   @Override
   public boolean hasParentLock(Procedure proc) {
-// TODO: need to check all the ancestors
+// TODO: need to check all the ancestors. need to passed in the procedures
+// to find the ancestors.
 return proc.hasParent() &&
   (isLockOwner(proc.getParentProcId()) || 
isLockOwner(proc.getRootProcId()));
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e71c0570/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 472a0d1..74082bf 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -992,7 +992,13 @@ public abstract class Procedure implements 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/e71c0570/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index c6c34df..ede2c90 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -653,7 +653,20 @@ public class ProcedureExecutor {
 sendProcedureLoadedNotification(p.getProcId());
   }
   // If the procedure holds the lock, put the procedure in front
-  if (p.isLockedWhenLoading()) {
+  // If its parent holds the lock, put the procedure in front
+  // TODO. Is that possible that its ancestor holds the lock?
+  // For now, the deepest procedure hierarchy is:
+  // ModifyTableProcedure -> ReopenTableProcedure ->
+  // MoveTableProcedure -> Unassign/AssignProcedure
+  // But ModifyTableProcedure and ReopenTableProcedure won't hold the lock
+  // So, check parent lock is enough(a tricky case is resovled by 
HBASE-21384).
+  // If some one change or add new procedures making 'grandpa' procedure
+  // holds the lock, but parent procedure don't hold the lock, there will
+  // be a problem here. We have to check one procedure's ancestors.
+  // And we need to change LockAndQueue.hasParentLock(Procedure proc) 
method
+  // to check all ancestors too.
+  if (p.isLockedWhenLoading() || (p.hasParent() && procedures
+  .get(p.getParentProcId()).isLockedWhenLoading())) {
 scheduler.addFront(p, false);
   } else {
 // if it was not, it can wait.



hbase git commit: HBASE-21384 Procedure with holdlock=false should not be restored lock when restarts

2018-10-24 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 97578babc -> 85ee79904


HBASE-21384 Procedure with holdlock=false should not be restored lock when 
restarts


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/85ee7990
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/85ee7990
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/85ee7990

Branch: refs/heads/branch-2.0
Commit: 85ee79904977444553910f31c80d3af84f74678e
Parents: 97578ba
Author: Allan Yang 
Authored: Thu Oct 25 13:51:41 2018 +0800
Committer: Allan Yang 
Committed: Thu Oct 25 13:52:10 2018 +0800

--
 .../apache/hadoop/hbase/procedure2/LockAndQueue.java |  3 ++-
 .../apache/hadoop/hbase/procedure2/Procedure.java|  8 +++-
 .../hadoop/hbase/procedure2/ProcedureExecutor.java   | 15 ++-
 3 files changed, 23 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/85ee7990/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
index ae8daa2..4365a2c 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
@@ -73,7 +73,8 @@ public class LockAndQueue implements LockStatus {
 
   @Override
   public boolean hasParentLock(Procedure proc) {
-// TODO: need to check all the ancestors
+// TODO: need to check all the ancestors. need to passed in the procedures
+// to find the ancestors.
 return proc.hasParent() &&
   (isLockOwner(proc.getParentProcId()) || 
isLockOwner(proc.getRootProcId()));
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/85ee7990/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index a271d8f..4cc9b61 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -993,7 +993,13 @@ public abstract class Procedure implements 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/85ee7990/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index a410bc9..7dea3e7 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -653,7 +653,20 @@ public class ProcedureExecutor {
 sendProcedureLoadedNotification(p.getProcId());
   }
   // If the procedure holds the lock, put the procedure in front
-  if (p.isLockedWhenLoading()) {
+  // If its parent holds the lock, put the procedure in front
+  // TODO. Is that possible that its ancestor holds the lock?
+  // For now, the deepest procedure hierarchy is:
+  // ModifyTableProcedure -> ReopenTableProcedure ->
+  // MoveTableProcedure -> Unassign/AssignProcedure
+  // But ModifyTableProcedure and ReopenTableProcedure won't hold the lock
+  // So, check parent lock is enough(a tricky case is resovled by 
HBASE-21384).
+  // If some one change or add new procedures making 'grandpa' procedure
+  // holds the lock, but parent procedure don't hold the lock, there will
+  // be a problem here. We have to check one procedure's ancestors.
+  // And we need to change LockAndQueue.hasParentLock(Procedure proc) 
method
+  // to check all ancestors too.
+  if (p.isLockedWhenLoading() || (p.hasParent() && procedures
+  .get(p.getParentProcId()).isLockedWhenLoading())) {
 scheduler.addFront(p, false);
   } else {
 // if it was not, it can wait.



hbase git commit: HBASE-21364 Procedure holds the lock should put to front of the queue after restart

2018-10-24 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 5dde5b787 -> 614612a9d


HBASE-21364 Procedure holds the lock should put to front of the queue after 
restart


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/614612a9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/614612a9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/614612a9

Branch: refs/heads/master
Commit: 614612a9d8b38ea1b8b960ee9ff55e90c09e45e1
Parents: 5dde5b7
Author: Allan Yang 
Authored: Thu Oct 25 12:05:28 2018 +0800
Committer: Allan Yang 
Committed: Thu Oct 25 12:05:28 2018 +0800

--
 .../hbase/procedure2/AbstractProcedureScheduler.java  | 10 ++
 .../org/apache/hadoop/hbase/procedure2/Procedure.java |  4 
 .../hadoop/hbase/procedure2/ProcedureExecutor.java| 11 ++-
 .../hadoop/hbase/procedure2/ProcedureScheduler.java   | 14 ++
 4 files changed, 38 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/614612a9/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index 5645f89..7ab1329 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -86,6 +86,11 @@ public abstract class AbstractProcedureScheduler implements 
ProcedureScheduler {
   }
 
   @Override
+  public void addFront(final Procedure procedure, boolean notify) {
+push(procedure, true, notify);
+  }
+
+  @Override
   public void addFront(Iterator procedureIterator) {
 schedLock();
 try {
@@ -109,6 +114,11 @@ public abstract class AbstractProcedureScheduler 
implements ProcedureScheduler {
 push(procedure, false, true);
   }
 
+  @Override
+  public void addBack(final Procedure procedure, boolean notify) {
+push(procedure, false, notify);
+  }
+
   protected void push(final Procedure procedure, final boolean addFront, final 
boolean notify) {
 schedLock();
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/614612a9/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index d4d945d..81073e1 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -717,6 +717,10 @@ public abstract class Procedure implements 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/614612a9/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 438b495..bd75827 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -652,8 +652,17 @@ public class ProcedureExecutor {
   if (!p.hasParent()) {
 sendProcedureLoadedNotification(p.getProcId());
   }
-  scheduler.addBack(p);
+  // If the procedure holds the lock, put the procedure in front
+  if (p.isLockedWhenLoading()) {
+scheduler.addFront(p, false);
+  } else {
+// if it was not, it can wait.
+scheduler.addBack(p, false);
+  }
 });
+// After all procedures put into the queue, signal the worker threads.
+// Otherwise, there is a race condition. See HBASE-21364.
+scheduler.signalAll();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/614612a9/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
index e7e1cdb..9489f52 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
+++ 

hbase git commit: HBASE-21364 Procedure holds the lock should put to front of the queue after restart

2018-10-24 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 47c9879fa -> 141d4e8b0


HBASE-21364 Procedure holds the lock should put to front of the queue after 
restart


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/141d4e8b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/141d4e8b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/141d4e8b

Branch: refs/heads/branch-2
Commit: 141d4e8b032a7fd2f9bd4e66eaa3330154f02b67
Parents: 47c9879
Author: Allan Yang 
Authored: Thu Oct 25 12:03:20 2018 +0800
Committer: Allan Yang 
Committed: Thu Oct 25 12:03:20 2018 +0800

--
 .../hbase/procedure2/AbstractProcedureScheduler.java  | 10 ++
 .../org/apache/hadoop/hbase/procedure2/Procedure.java |  4 
 .../hadoop/hbase/procedure2/ProcedureExecutor.java| 11 ++-
 .../hadoop/hbase/procedure2/ProcedureScheduler.java   | 14 ++
 4 files changed, 38 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/141d4e8b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index 5645f89..7ab1329 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -86,6 +86,11 @@ public abstract class AbstractProcedureScheduler implements 
ProcedureScheduler {
   }
 
   @Override
+  public void addFront(final Procedure procedure, boolean notify) {
+push(procedure, true, notify);
+  }
+
+  @Override
   public void addFront(Iterator procedureIterator) {
 schedLock();
 try {
@@ -109,6 +114,11 @@ public abstract class AbstractProcedureScheduler 
implements ProcedureScheduler {
 push(procedure, false, true);
   }
 
+  @Override
+  public void addBack(final Procedure procedure, boolean notify) {
+push(procedure, false, notify);
+  }
+
   protected void push(final Procedure procedure, final boolean addFront, final 
boolean notify) {
 schedLock();
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/141d4e8b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 089caa0..cf81b8a 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -720,6 +720,10 @@ public abstract class Procedure implements 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/141d4e8b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index d67b420..916c4af 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -652,8 +652,17 @@ public class ProcedureExecutor {
   if (!p.hasParent()) {
 sendProcedureLoadedNotification(p.getProcId());
   }
-  scheduler.addBack(p);
+  // If the procedure holds the lock, put the procedure in front
+  if (p.isLockedWhenLoading()) {
+scheduler.addFront(p, false);
+  } else {
+// if it was not, it can wait.
+scheduler.addBack(p, false);
+  }
 });
+// After all procedures put into the queue, signal the worker threads.
+// Otherwise, there is a race condition. See HBASE-21364.
+scheduler.signalAll();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/141d4e8b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
index e7e1cdb..9489f52 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
+++ 

hbase git commit: HBASE-21357 RS should abort if OOM in Reader thread

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-1 f73d6193e -> 5f5f4e82b


HBASE-21357 RS should abort if OOM in Reader thread


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5f5f4e82
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5f5f4e82
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5f5f4e82

Branch: refs/heads/branch-1
Commit: 5f5f4e82b126b27f833f643abeded67dae3af05d
Parents: f73d619
Author: Allan Yang 
Authored: Wed Oct 24 11:10:20 2018 +0800
Committer: Allan Yang 
Committed: Wed Oct 24 11:10:20 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/ipc/RpcServer.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5f5f4e82/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 3f11233..a32040c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -732,6 +732,17 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 LOG.error(getName() + ": CancelledKeyException in Reader", e);
   } catch (IOException ex) {
 LOG.info(getName() + ": IOException in Reader", ex);
+  } catch (OutOfMemoryError e) {
+if (getErrorHandler() != null) {
+  if (getErrorHandler().checkOOME(e)) {
+RpcServer.LOG.info(Thread.currentThread().getName()
++ ": exiting on OutOfMemoryError");
+return;
+  }
+} else {
+  // rethrow if no handler
+  throw e;
+}
   }
 }
   }



hbase git commit: HBASE-21364 Procedure holds the lock should put to front of the queue after restart

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 d35f65f39 -> 6c9e3d067


HBASE-21364 Procedure holds the lock should put to front of the queue after 
restart


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c9e3d06
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c9e3d06
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c9e3d06

Branch: refs/heads/branch-2.1
Commit: 6c9e3d0670bef6c159981850b9c138f60b2c8317
Parents: d35f65f
Author: Allan Yang 
Authored: Wed Oct 24 10:52:52 2018 +0800
Committer: Allan Yang 
Committed: Wed Oct 24 10:52:52 2018 +0800

--
 .../procedure2/AbstractProcedureScheduler.java  |  10 +
 .../hadoop/hbase/procedure2/Procedure.java  |   4 +
 .../hbase/procedure2/ProcedureExecutor.java |  11 +-
 .../hbase/procedure2/ProcedureScheduler.java|  14 ++
 .../TestMasterProcedureSchedulerOnRestart.java  | 207 +++
 5 files changed, 245 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c9e3d06/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index 5645f89..7ab1329 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -86,6 +86,11 @@ public abstract class AbstractProcedureScheduler implements 
ProcedureScheduler {
   }
 
   @Override
+  public void addFront(final Procedure procedure, boolean notify) {
+push(procedure, true, notify);
+  }
+
+  @Override
   public void addFront(Iterator procedureIterator) {
 schedLock();
 try {
@@ -109,6 +114,11 @@ public abstract class AbstractProcedureScheduler 
implements ProcedureScheduler {
 push(procedure, false, true);
   }
 
+  @Override
+  public void addBack(final Procedure procedure, boolean notify) {
+push(procedure, false, notify);
+  }
+
   protected void push(final Procedure procedure, final boolean addFront, final 
boolean notify) {
 schedLock();
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c9e3d06/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 01dc1be..472a0d1 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -719,6 +719,10 @@ public abstract class Procedure implements 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/6c9e3d06/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 3548e6e..c6c34df 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -652,8 +652,17 @@ public class ProcedureExecutor {
   if (!p.hasParent()) {
 sendProcedureLoadedNotification(p.getProcId());
   }
-  scheduler.addBack(p);
+  // If the procedure holds the lock, put the procedure in front
+  if (p.isLockedWhenLoading()) {
+scheduler.addFront(p, false);
+  } else {
+// if it was not, it can wait.
+scheduler.addBack(p, false);
+  }
 });
+// After all procedures put into the queue, signal the worker threads.
+// Otherwise, there is a race condition. See HBASE-21364.
+scheduler.signalAll();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c9e3d06/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
index e7e1cdb..9489f52 100644
--- 

hbase git commit: HBASE-21364 Procedure holds the lock should put to front of the queue after restart

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 c8bfc70d1 -> 169e3bafc


HBASE-21364 Procedure holds the lock should put to front of the queue after 
restart


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/169e3baf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/169e3baf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/169e3baf

Branch: refs/heads/branch-2.0
Commit: 169e3bafc889df2b299c9ecfdd57f8d6ff2060dd
Parents: c8bfc70
Author: Allan Yang 
Authored: Wed Oct 24 10:46:09 2018 +0800
Committer: Allan Yang 
Committed: Wed Oct 24 10:46:09 2018 +0800

--
 .../procedure2/AbstractProcedureScheduler.java  |  10 +
 .../hadoop/hbase/procedure2/Procedure.java  |   4 +
 .../hbase/procedure2/ProcedureExecutor.java |  11 +-
 .../hbase/procedure2/ProcedureScheduler.java|  14 ++
 .../TestMasterProcedureSchedulerOnRestart.java  | 207 +++
 5 files changed, 245 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/169e3baf/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index 5645f89..7ab1329 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -86,6 +86,11 @@ public abstract class AbstractProcedureScheduler implements 
ProcedureScheduler {
   }
 
   @Override
+  public void addFront(final Procedure procedure, boolean notify) {
+push(procedure, true, notify);
+  }
+
+  @Override
   public void addFront(Iterator procedureIterator) {
 schedLock();
 try {
@@ -109,6 +114,11 @@ public abstract class AbstractProcedureScheduler 
implements ProcedureScheduler {
 push(procedure, false, true);
   }
 
+  @Override
+  public void addBack(final Procedure procedure, boolean notify) {
+push(procedure, false, notify);
+  }
+
   protected void push(final Procedure procedure, final boolean addFront, final 
boolean notify) {
 schedLock();
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/169e3baf/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index a1391a5..a271d8f 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -720,6 +720,10 @@ public abstract class Procedure implements 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/169e3baf/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 43663ef..a410bc9 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -652,8 +652,17 @@ public class ProcedureExecutor {
   if (!p.hasParent()) {
 sendProcedureLoadedNotification(p.getProcId());
   }
-  scheduler.addBack(p);
+  // If the procedure holds the lock, put the procedure in front
+  if (p.isLockedWhenLoading()) {
+scheduler.addFront(p, false);
+  } else {
+// if it was not, it can wait.
+scheduler.addBack(p, false);
+  }
 });
+// After all procedures put into the queue, signal the worker threads.
+// Otherwise, there is a race condition. See HBASE-21364.
+scheduler.signalAll();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/169e3baf/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
index e7e1cdb..9489f52 100644
--- 

hbase git commit: HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 80ac2f969 -> 1b1dabd1f


HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1b1dabd1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1b1dabd1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1b1dabd1

Branch: refs/heads/branch-2
Commit: 1b1dabd1f5c09ab75887b84132aa5e766537cb07
Parents: 80ac2f9
Author: Allan Yang 
Authored: Tue Oct 23 16:15:35 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 16:15:35 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java   | 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1b1dabd1/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 64479b2..9f99e26 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -376,6 +376,24 @@ public class ProcedureStoreTracker {
   }
 
   private BitSetNode getOrCreateNode(long procId) {
+// See HBASE-20973, grow or merge can lead to 
ArrayIndexOutOfBoundsException
+// The root cause is not revealed yet, disable grow or merge for now
+return getOrCreateNodeNoGrowOrMerge(procId);
+  }
+
+  private BitSetNode getOrCreateNodeNoGrowOrMerge(long procId) {
+Map.Entry entry = map.floorEntry(procId);
+if (entry != null && entry.getValue().contains(procId)) {
+  return entry.getValue();
+} else {
+  BitSetNode node = new BitSetNode(procId, partial);
+  assert !map.containsKey(node.getStart());
+  map.put(node.getStart(), node);
+  return node;
+}
+  }
+
+  private BitSetNode getOrCreateNodeWithGrowOrMerge(long procId) {
 // If procId can fit in left node (directly or by growing it)
 BitSetNode leftNode = null;
 boolean leftCanGrow = false;



hbase git commit: HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 7c04a95f4 -> e29ce9f93


HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e29ce9f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e29ce9f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e29ce9f9

Branch: refs/heads/branch-2.1
Commit: e29ce9f93753d79edfa4e8b864c31c34e33ea635
Parents: 7c04a95
Author: Allan Yang 
Authored: Tue Oct 23 16:13:24 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 16:13:24 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java   | 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e29ce9f9/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 64479b2..9f99e26 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -376,6 +376,24 @@ public class ProcedureStoreTracker {
   }
 
   private BitSetNode getOrCreateNode(long procId) {
+// See HBASE-20973, grow or merge can lead to 
ArrayIndexOutOfBoundsException
+// The root cause is not revealed yet, disable grow or merge for now
+return getOrCreateNodeNoGrowOrMerge(procId);
+  }
+
+  private BitSetNode getOrCreateNodeNoGrowOrMerge(long procId) {
+Map.Entry entry = map.floorEntry(procId);
+if (entry != null && entry.getValue().contains(procId)) {
+  return entry.getValue();
+} else {
+  BitSetNode node = new BitSetNode(procId, partial);
+  assert !map.containsKey(node.getStart());
+  map.put(node.getStart(), node);
+  return node;
+}
+  }
+
+  private BitSetNode getOrCreateNodeWithGrowOrMerge(long procId) {
 // If procId can fit in left node (directly or by growing it)
 BitSetNode leftNode = null;
 boolean leftCanGrow = false;



hbase git commit: HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 603bf4c55 -> 3b68e5393


HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b68e539
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b68e539
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b68e539

Branch: refs/heads/master
Commit: 3b68e5393edba011146962c7457faffc1e3c0ee7
Parents: 603bf4c
Author: Allan Yang 
Authored: Tue Oct 23 16:09:05 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 16:09:05 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java   | 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b68e539/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 64479b2..9f99e26 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -376,6 +376,24 @@ public class ProcedureStoreTracker {
   }
 
   private BitSetNode getOrCreateNode(long procId) {
+// See HBASE-20973, grow or merge can lead to 
ArrayIndexOutOfBoundsException
+// The root cause is not revealed yet, disable grow or merge for now
+return getOrCreateNodeNoGrowOrMerge(procId);
+  }
+
+  private BitSetNode getOrCreateNodeNoGrowOrMerge(long procId) {
+Map.Entry entry = map.floorEntry(procId);
+if (entry != null && entry.getValue().contains(procId)) {
+  return entry.getValue();
+} else {
+  BitSetNode node = new BitSetNode(procId, partial);
+  assert !map.containsKey(node.getStart());
+  map.put(node.getStart(), node);
+  return node;
+}
+  }
+
+  private BitSetNode getOrCreateNodeWithGrowOrMerge(long procId) {
 // If procId can fit in left node (directly or by growing it)
 BitSetNode leftNode = null;
 boolean leftCanGrow = false;



hbase git commit: HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure

2018-10-23 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 01d94d710 -> a31e71564


HBASE-20973 ArrayIndexOutOfBoundsException when rolling back procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a31e7156
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a31e7156
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a31e7156

Branch: refs/heads/branch-2.0
Commit: a31e71564fd5c276cef7b8381e5285b8703eef13
Parents: 01d94d7
Author: Allan Yang 
Authored: Tue Oct 23 16:06:19 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 16:06:19 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java   | 18 ++
 1 file changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a31e7156/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 64479b2..9f99e26 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -376,6 +376,24 @@ public class ProcedureStoreTracker {
   }
 
   private BitSetNode getOrCreateNode(long procId) {
+// See HBASE-20973, grow or merge can lead to 
ArrayIndexOutOfBoundsException
+// The root cause is not revealed yet, disable grow or merge for now
+return getOrCreateNodeNoGrowOrMerge(procId);
+  }
+
+  private BitSetNode getOrCreateNodeNoGrowOrMerge(long procId) {
+Map.Entry entry = map.floorEntry(procId);
+if (entry != null && entry.getValue().contains(procId)) {
+  return entry.getValue();
+} else {
+  BitSetNode node = new BitSetNode(procId, partial);
+  assert !map.containsKey(node.getStart());
+  map.put(node.getStart(), node);
+  return node;
+}
+  }
+
+  private BitSetNode getOrCreateNodeWithGrowOrMerge(long procId) {
 // If procId can fit in left node (directly or by growing it)
 BitSetNode leftNode = null;
 boolean leftCanGrow = false;



hbase git commit: HBASE-21354 Procedure may be deleted improperly during master restarts resulting in 'Corrupt'

2018-10-22 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master ae13b0b29 -> 86f23128b


HBASE-21354 Procedure may be deleted improperly during master restarts 
resulting in 'Corrupt'


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/86f23128
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/86f23128
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/86f23128

Branch: refs/heads/master
Commit: 86f23128b0d66deb70790785e63d2f7e01d5ab8d
Parents: ae13b0b
Author: Allan Yang 
Authored: Tue Oct 23 10:55:18 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 10:55:18 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java |  25 +-
 .../procedure2/store/wal/WALProcedureStore.java |  52 ++--
 .../procedure2/ProcedureTestingUtility.java |  32 ++-
 .../hbase/procedure2/TestProcedureCleanup.java  | 242 +++
 4 files changed, 324 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/86f23128/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index a5b5825..64479b2 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -203,7 +203,7 @@ public class ProcedureStoreTracker {
* then we mark it as deleted.
* @see #setDeletedIfModified(long...)
*/
-  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker) {
+  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker, 
boolean globalTracker) {
 BitSetNode trackerNode = null;
 for (BitSetNode node : map.values()) {
   final long minProcId = node.getStart();
@@ -214,9 +214,26 @@ public class ProcedureStoreTracker {
 }
 
 trackerNode = tracker.lookupClosestNode(trackerNode, procId);
-if (trackerNode == null || !trackerNode.contains(procId) ||
-  trackerNode.isModified(procId)) {
-  // the procedure was removed or modified
+if (trackerNode == null || !trackerNode.contains(procId)) {
+  // the procId is not exist in the track, we can only delete the proc
+  // if globalTracker set to true.
+  // Only if the procedure is not in the global tracker we can delete 
the
+  // the procedure. In other cases, the procedure may not update in a 
single
+  // log, we cannot delete it just because the log's track doesn't have
+  // any info for the procedure.
+  if (globalTracker) {
+node.delete(procId);
+  }
+  continue;
+}
+// Only check delete in the global tracker, only global tracker has the
+// whole picture
+if (globalTracker && trackerNode.isDeleted(procId) == DeleteState.YES) 
{
+  node.delete(procId);
+  continue;
+}
+if (trackerNode.isModified(procId)) {
+  // the procedure was modified
   node.delete(procId);
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/86f23128/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 5a5face..0a89c3f 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -97,7 +97,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedu
  * will first be initialized to the oldest file's tracker(which is stored in 
the trailer), using the
  * method {@link ProcedureStoreTracker#resetTo(ProcedureStoreTracker, 
boolean)}, and then merge it
  * with the tracker of every newer wal files, using the
- * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we 
find out
+ * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+ * If we find out
  * that all the modified procedures for the oldest wal file are modified or 
deleted in newer wal
  * files, then we can delete it. This is because that, every time we call
  * {@link 

hbase git commit: HBASE-21354 Procedure may be deleted improperly during master restarts resulting in 'Corrupt'

2018-10-22 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 bf2ce5488 -> dfd78c748


HBASE-21354 Procedure may be deleted improperly during master restarts 
resulting in 'Corrupt'


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dfd78c74
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dfd78c74
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dfd78c74

Branch: refs/heads/branch-2
Commit: dfd78c748475d0ecf7e07449fa5007f2c32c7172
Parents: bf2ce54
Author: Allan Yang 
Authored: Tue Oct 23 10:51:23 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 10:51:23 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java |  25 +-
 .../procedure2/store/wal/WALProcedureStore.java |  52 ++--
 .../procedure2/ProcedureTestingUtility.java |  32 ++-
 .../hbase/procedure2/TestProcedureCleanup.java  | 242 +++
 4 files changed, 324 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dfd78c74/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index a5b5825..64479b2 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -203,7 +203,7 @@ public class ProcedureStoreTracker {
* then we mark it as deleted.
* @see #setDeletedIfModified(long...)
*/
-  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker) {
+  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker, 
boolean globalTracker) {
 BitSetNode trackerNode = null;
 for (BitSetNode node : map.values()) {
   final long minProcId = node.getStart();
@@ -214,9 +214,26 @@ public class ProcedureStoreTracker {
 }
 
 trackerNode = tracker.lookupClosestNode(trackerNode, procId);
-if (trackerNode == null || !trackerNode.contains(procId) ||
-  trackerNode.isModified(procId)) {
-  // the procedure was removed or modified
+if (trackerNode == null || !trackerNode.contains(procId)) {
+  // the procId is not exist in the track, we can only delete the proc
+  // if globalTracker set to true.
+  // Only if the procedure is not in the global tracker we can delete 
the
+  // the procedure. In other cases, the procedure may not update in a 
single
+  // log, we cannot delete it just because the log's track doesn't have
+  // any info for the procedure.
+  if (globalTracker) {
+node.delete(procId);
+  }
+  continue;
+}
+// Only check delete in the global tracker, only global tracker has the
+// whole picture
+if (globalTracker && trackerNode.isDeleted(procId) == DeleteState.YES) 
{
+  node.delete(procId);
+  continue;
+}
+if (trackerNode.isModified(procId)) {
+  // the procedure was modified
   node.delete(procId);
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/dfd78c74/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 1aee86d..4bc668e 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -97,7 +97,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedu
  * will first be initialized to the oldest file's tracker(which is stored in 
the trailer), using the
  * method {@link ProcedureStoreTracker#resetTo(ProcedureStoreTracker, 
boolean)}, and then merge it
  * with the tracker of every newer wal files, using the
- * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we 
find out
+ * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+ * If we find out
  * that all the modified procedures for the oldest wal file are modified or 
deleted in newer wal
  * files, then we can delete it. This is because that, every time we call
  * {@link 

hbase git commit: HBASE-21354 Procedure may be deleted improperly during master restarts resulting in 'Corrupt'(addendum)

2018-10-22 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 65f9a0b57 -> 3b74656dc


HBASE-21354 Procedure may be deleted improperly during master restarts 
resulting in 'Corrupt'(addendum)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b74656d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b74656d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b74656d

Branch: refs/heads/branch-2.0
Commit: 3b74656dc622475e795d487403437178d5e71461
Parents: 65f9a0b
Author: Allan Yang 
Authored: Tue Oct 23 10:29:04 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 10:29:40 2018 +0800

--
 .../java/org/apache/hadoop/hbase/regionserver/HRegionServer.java   | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b74656d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 8edd794..771692b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -239,8 +239,6 @@ public class HRegionServer extends HasThread implements
 
   protected HeapMemoryManager hMemManager;
 
-  //trival change to trigger the UT
-
   /**
* Cluster connection to be shared by services.
* Initialized at server startup and closed when server shuts down.



hbase git commit: HBASE-21354 Procedure may be deleted improperly during master restarts resulting in 'Corrupt'

2018-10-22 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 217377030 -> c141547f3


HBASE-21354 Procedure may be deleted improperly during master restarts 
resulting in 'Corrupt'


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c141547f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c141547f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c141547f

Branch: refs/heads/branch-2.1
Commit: c141547f3b1d215e6f8096e9abd53569406b1da3
Parents: 2173770
Author: Allan Yang 
Authored: Tue Oct 23 10:27:02 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 10:27:48 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java |  25 +-
 .../procedure2/store/wal/WALProcedureStore.java |  52 ++--
 .../procedure2/ProcedureTestingUtility.java |  36 ++-
 .../hbase/procedure2/TestProcedureCleanup.java  | 242 +++
 4 files changed, 329 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c141547f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index a5b5825..64479b2 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -203,7 +203,7 @@ public class ProcedureStoreTracker {
* then we mark it as deleted.
* @see #setDeletedIfModified(long...)
*/
-  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker) {
+  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker, 
boolean globalTracker) {
 BitSetNode trackerNode = null;
 for (BitSetNode node : map.values()) {
   final long minProcId = node.getStart();
@@ -214,9 +214,26 @@ public class ProcedureStoreTracker {
 }
 
 trackerNode = tracker.lookupClosestNode(trackerNode, procId);
-if (trackerNode == null || !trackerNode.contains(procId) ||
-  trackerNode.isModified(procId)) {
-  // the procedure was removed or modified
+if (trackerNode == null || !trackerNode.contains(procId)) {
+  // the procId is not exist in the track, we can only delete the proc
+  // if globalTracker set to true.
+  // Only if the procedure is not in the global tracker we can delete 
the
+  // the procedure. In other cases, the procedure may not update in a 
single
+  // log, we cannot delete it just because the log's track doesn't have
+  // any info for the procedure.
+  if (globalTracker) {
+node.delete(procId);
+  }
+  continue;
+}
+// Only check delete in the global tracker, only global tracker has the
+// whole picture
+if (globalTracker && trackerNode.isDeleted(procId) == DeleteState.YES) 
{
+  node.delete(procId);
+  continue;
+}
+if (trackerNode.isModified(procId)) {
+  // the procedure was modified
   node.delete(procId);
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c141547f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 1aee86d..4bc668e 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -97,7 +97,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedu
  * will first be initialized to the oldest file's tracker(which is stored in 
the trailer), using the
  * method {@link ProcedureStoreTracker#resetTo(ProcedureStoreTracker, 
boolean)}, and then merge it
  * with the tracker of every newer wal files, using the
- * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we 
find out
+ * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+ * If we find out
  * that all the modified procedures for the oldest wal file are modified or 
deleted in newer wal
  * files, then we can delete it. This is because that, every time we call
  * {@link 

hbase git commit: HBASE-21354 Procedure may be deleted improperly during master restarts resulting in 'Corrupt'

2018-10-22 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 6bd36fef0 -> e20e25b64


HBASE-21354 Procedure may be deleted improperly during master restarts 
resulting in 'Corrupt'


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e20e25b6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e20e25b6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e20e25b6

Branch: refs/heads/branch-2.0
Commit: e20e25b6403c101a347b502667cb1742554951dd
Parents: 6bd36fe
Author: Allan Yang 
Authored: Tue Oct 23 10:18:24 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 23 10:18:24 2018 +0800

--
 .../procedure2/store/ProcedureStoreTracker.java |  25 +-
 .../procedure2/store/wal/WALProcedureStore.java |  52 ++--
 .../procedure2/ProcedureTestingUtility.java |  36 ++-
 .../hbase/procedure2/TestProcedureCleanup.java  | 242 +++
 .../hbase/regionserver/HRegionServer.java   |   2 +
 5 files changed, 331 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e20e25b6/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index a5b5825..64479b2 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -203,7 +203,7 @@ public class ProcedureStoreTracker {
* then we mark it as deleted.
* @see #setDeletedIfModified(long...)
*/
-  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker) {
+  public void setDeletedIfModifiedInBoth(ProcedureStoreTracker tracker, 
boolean globalTracker) {
 BitSetNode trackerNode = null;
 for (BitSetNode node : map.values()) {
   final long minProcId = node.getStart();
@@ -214,9 +214,26 @@ public class ProcedureStoreTracker {
 }
 
 trackerNode = tracker.lookupClosestNode(trackerNode, procId);
-if (trackerNode == null || !trackerNode.contains(procId) ||
-  trackerNode.isModified(procId)) {
-  // the procedure was removed or modified
+if (trackerNode == null || !trackerNode.contains(procId)) {
+  // the procId is not exist in the track, we can only delete the proc
+  // if globalTracker set to true.
+  // Only if the procedure is not in the global tracker we can delete 
the
+  // the procedure. In other cases, the procedure may not update in a 
single
+  // log, we cannot delete it just because the log's track doesn't have
+  // any info for the procedure.
+  if (globalTracker) {
+node.delete(procId);
+  }
+  continue;
+}
+// Only check delete in the global tracker, only global tracker has the
+// whole picture
+if (globalTracker && trackerNode.isDeleted(procId) == DeleteState.YES) 
{
+  node.delete(procId);
+  continue;
+}
+if (trackerNode.isModified(procId)) {
+  // the procedure was modified
   node.delete(procId);
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e20e25b6/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 9de240e..bfa78c8 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -95,7 +95,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedu
  * will first be initialized to the oldest file's tracker(which is stored in 
the trailer), using the
  * method {@link ProcedureStoreTracker#resetTo(ProcedureStoreTracker, 
boolean)}, and then merge it
  * with the tracker of every newer wal files, using the
- * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we 
find out
+ * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+ * If we find out
  * that all the modified procedures for the oldest wal file are modified or 
deleted in newer wal
  * files, then we can delete it. This is 

hbase git commit: HBASE-21288 HostingServer in UnassignProcedure is not accurate

2018-10-18 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 fffd9b9b6 -> b3c3393c1


HBASE-21288 HostingServer in UnassignProcedure is not accurate

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3c3393c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3c3393c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3c3393c

Branch: refs/heads/branch-2.1
Commit: b3c3393c19d09240588697b69dd7a9643983d357
Parents: fffd9b9
Author: Allan Yang 
Authored: Thu Oct 18 21:10:53 2018 +0800
Committer: Allan Yang 
Committed: Thu Oct 18 21:10:53 2018 +0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java | 15 +++
 .../hbase/master/assignment/UnassignProcedure.java  | 16 +++-
 2 files changed, 30 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3c3393c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 74ce20e..af21012 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.PleaseHoldException;
 import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
@@ -1587,6 +1588,20 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   this.serverManager.getDeadServers());
 return false;
   }
+  Map onlineServers = 
serverManager.getOnlineServers();
+  int regionNotOnOnlineServer = 0;
+  for (RegionState regionState : 
assignmentManager.getRegionStates().getRegionStates()) {
+if (regionState.isOpened() && !onlineServers
+.containsKey(regionState.getServerName())) {
+  LOG.warn("{} 's server is not in the online server list.", 
regionState);
+  regionNotOnOnlineServer++;
+}
+  }
+  if (regionNotOnOnlineServer > 0) {
+LOG.info("Not running balancer because {} regions found not on an 
online server",
+regionNotOnOnlineServer);
+return false;
+  }
 
   if (this.cpHost != null) {
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3c3393c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
index 589b732..23b2de7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
@@ -77,6 +77,8 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
 
   /**
* Where to send the unassign RPC.
+   * this one may not accurate since another RTP may change this location for
+   * the region. The hostingServer will be updated in updateTransition
*/
   protected volatile ServerName hostingServer;
   /**
@@ -198,6 +200,13 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
   return false;
 }
 
+if (regionNode.getRegionLocation() != null && !regionNode
+.getRegionLocation().equals(hostingServer)) {
+  LOG.info("HostingServer changed from {} to {} for {}", hostingServer,
+  regionNode.getRegionLocation(), this);
+  this.hostingServer = regionNode.getRegionLocation();
+}
+
 
 // Mark the region as CLOSING.
 env.getAssignmentManager().markRegionAsClosing(regionNode);
@@ -357,7 +366,12 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
 
   @Override
   public ServerName getServer(final MasterProcedureEnv env) {
-return this.hostingServer;
+RegionStateNode node =
+
env.getAssignmentManager().getRegionStates().getRegionStateNode(this.getRegionInfo());
+if (node == null) {
+  return null;
+}
+return node.getRegionLocation();
   }
 
   @Override



hbase git commit: HBASE-21288 HostingServer in UnassignProcedure is not accurate

2018-10-18 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 fef4fb36b -> 9cc5f86e8


HBASE-21288 HostingServer in UnassignProcedure is not accurate


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9cc5f86e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9cc5f86e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9cc5f86e

Branch: refs/heads/branch-2.0
Commit: 9cc5f86e8fa4fe9fbf25844ddd5e52acb7d7df54
Parents: fef4fb3
Author: Allan Yang 
Authored: Thu Oct 18 20:47:27 2018 +0800
Committer: Allan Yang 
Committed: Thu Oct 18 20:47:27 2018 +0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java | 15 +++
 .../hbase/master/assignment/UnassignProcedure.java  | 16 +++-
 2 files changed, 30 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9cc5f86e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index a8ba3af..67152e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.PleaseHoldException;
+import org.apache.hadoop.hbase.ServerMetrics;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
@@ -1552,6 +1553,20 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   this.serverManager.getDeadServers());
 return false;
   }
+  Map onlineServers = 
serverManager.getOnlineServers();
+  int regionNotOnOnlineServer = 0;
+  for (RegionState regionState : 
assignmentManager.getRegionStates().getRegionStates()) {
+if (regionState.isOpened() && !onlineServers
+.containsKey(regionState.getServerName())) {
+  LOG.warn("{} 's server is not in the online server list.", 
regionState);
+  regionNotOnOnlineServer++;
+}
+  }
+  if (regionNotOnOnlineServer > 0) {
+LOG.info("Not running balancer because {} regions found not on an 
online server",
+regionNotOnOnlineServer);
+return false;
+  }
 
   if (this.cpHost != null) {
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9cc5f86e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
index 589b732..23b2de7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
@@ -77,6 +77,8 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
 
   /**
* Where to send the unassign RPC.
+   * this one may not accurate since another RTP may change this location for
+   * the region. The hostingServer will be updated in updateTransition
*/
   protected volatile ServerName hostingServer;
   /**
@@ -198,6 +200,13 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
   return false;
 }
 
+if (regionNode.getRegionLocation() != null && !regionNode
+.getRegionLocation().equals(hostingServer)) {
+  LOG.info("HostingServer changed from {} to {} for {}", hostingServer,
+  regionNode.getRegionLocation(), this);
+  this.hostingServer = regionNode.getRegionLocation();
+}
+
 
 // Mark the region as CLOSING.
 env.getAssignmentManager().markRegionAsClosing(regionNode);
@@ -357,7 +366,12 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
 
   @Override
   public ServerName getServer(final MasterProcedureEnv env) {
-return this.hostingServer;
+RegionStateNode node =
+
env.getAssignmentManager().getRegionStates().getRegionStateNode(this.getRegionInfo());
+if (node == null) {
+  return null;
+}
+return node.getRegionLocation();
   }
 
   @Override



hbase git commit: HBASE-21291 Add a test for bypassing stuck state-machine procedures

2018-10-16 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 deae1316a -> 85d81fe08


HBASE-21291 Add a test for bypassing stuck state-machine procedures

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/85d81fe0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/85d81fe0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/85d81fe0

Branch: refs/heads/branch-2
Commit: 85d81fe083380f142ae554b5f97f79214b1edb28
Parents: deae131
Author: Jingyun Tian 
Authored: Tue Oct 16 22:57:50 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 16 22:57:50 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java |  1 +
 .../procedure2/ProcedureTestingUtility.java | 40 +
 .../hbase/procedure2/TestProcedureBypass.java   | 63 
 3 files changed, 104 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/85d81fe0/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 7c33284..d57fcb5 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1054,6 +1054,7 @@ public class ProcedureExecutor {
   }
 
   boolean bypassProcedure(long pid, long lockWait, boolean force) throws 
IOException {
+Preconditions.checkArgument(lockWait > 0, "lockWait should be positive");
 Procedure procedure = getProcedure(pid);
 if (procedure == null) {
   LOG.debug("Procedure with id={} does not exist, skipping bypass", pid);

http://git-wip-us.apache.org/repos/asf/hbase/blob/85d81fe0/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
index d52b6bb..4d06e2f 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
+++ 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
@@ -400,6 +400,46 @@ public class ProcedureTestingUtility {
 }
   }
 
+  public static class NoopStateMachineProcedure
+  extends StateMachineProcedure {
+private TState initialState;
+private TEnv env;
+
+public NoopStateMachineProcedure() {
+}
+
+public NoopStateMachineProcedure(TEnv env, TState initialState) {
+  this.env = env;
+  this.initialState = initialState;
+}
+
+@Override
+protected Flow executeFromState(TEnv env, TState tState)
+throws ProcedureSuspendedException, ProcedureYieldException, 
InterruptedException {
+  return null;
+}
+
+@Override
+protected void rollbackState(TEnv env, TState tState) throws IOException, 
InterruptedException {
+
+}
+
+@Override
+protected TState getState(int stateId) {
+  return null;
+}
+
+@Override
+protected int getStateId(TState tState) {
+  return 0;
+}
+
+@Override
+protected TState getInitialState() {
+  return initialState;
+}
+  }
+
   public static class TestProcedure extends NoopProcedure {
 private byte[] data = null;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/85d81fe0/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
index d58d57e..0c59f30 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
+++ 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hbase.procedure2;
 
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.fs.FileSystem;
@@ -119,6 +121,20 @@ public class TestProcedureBypass {
 LOG.info("{} finished", proc);
   }
 
+  @Test
+  public void testBypassingStuckStateMachineProcedure() throws Exception {
+final StuckStateMachineProcedure 

hbase git commit: HBASE-21291 Add a test for bypassing stuck state-machine procedures

2018-10-16 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master fa652cc61 -> 821e4d7de


HBASE-21291 Add a test for bypassing stuck state-machine procedures

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/821e4d7d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/821e4d7d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/821e4d7d

Branch: refs/heads/master
Commit: 821e4d7de2d576189f4288d1c2acf9e9a9471f5c
Parents: fa652cc
Author: Jingyun Tian 
Authored: Tue Oct 16 22:26:58 2018 +0800
Committer: Allan Yang 
Committed: Tue Oct 16 22:26:58 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java |  1 +
 .../procedure2/ProcedureTestingUtility.java | 40 +
 .../hbase/procedure2/TestProcedureBypass.java   | 63 
 3 files changed, 104 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/821e4d7d/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 9412fbd..8a295f3 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1054,6 +1054,7 @@ public class ProcedureExecutor {
   }
 
   boolean bypassProcedure(long pid, long lockWait, boolean force) throws 
IOException {
+Preconditions.checkArgument(lockWait > 0, "lockWait should be positive");
 Procedure procedure = getProcedure(pid);
 if (procedure == null) {
   LOG.debug("Procedure with id={} does not exist, skipping bypass", pid);

http://git-wip-us.apache.org/repos/asf/hbase/blob/821e4d7d/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
index d52b6bb..4d06e2f 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
+++ 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
@@ -400,6 +400,46 @@ public class ProcedureTestingUtility {
 }
   }
 
+  public static class NoopStateMachineProcedure
+  extends StateMachineProcedure {
+private TState initialState;
+private TEnv env;
+
+public NoopStateMachineProcedure() {
+}
+
+public NoopStateMachineProcedure(TEnv env, TState initialState) {
+  this.env = env;
+  this.initialState = initialState;
+}
+
+@Override
+protected Flow executeFromState(TEnv env, TState tState)
+throws ProcedureSuspendedException, ProcedureYieldException, 
InterruptedException {
+  return null;
+}
+
+@Override
+protected void rollbackState(TEnv env, TState tState) throws IOException, 
InterruptedException {
+
+}
+
+@Override
+protected TState getState(int stateId) {
+  return null;
+}
+
+@Override
+protected int getStateId(TState tState) {
+  return 0;
+}
+
+@Override
+protected TState getInitialState() {
+  return initialState;
+}
+  }
+
   public static class TestProcedure extends NoopProcedure {
 private byte[] data = null;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/821e4d7d/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
index d58d57e..0c59f30 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
+++ 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hbase.procedure2;
 
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.fs.FileSystem;
@@ -119,6 +121,20 @@ public class TestProcedureBypass {
 LOG.info("{} finished", proc);
   }
 
+  @Test
+  public void testBypassingStuckStateMachineProcedure() throws Exception {
+final StuckStateMachineProcedure proc 

hbase git commit: Revert "HBASE-21237 Use CompatRemoteProcedureResolver to dispatch open/close region requests to RS"

2018-09-28 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 4947e72f6 -> f6c05facc


Revert "HBASE-21237 Use CompatRemoteProcedureResolver to dispatch open/close 
region requests to RS"


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f6c05fac
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f6c05fac
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f6c05fac

Branch: refs/heads/branch-2.1
Commit: f6c05faccfc0167508da4ccf99b42e44c085b1b8
Parents: 4947e72
Author: Allan Yang 
Authored: Fri Sep 28 14:06:54 2018 +0800
Committer: Allan Yang 
Committed: Fri Sep 28 14:07:40 2018 +0800

--
 .../hbase/master/procedure/RSProcedureDispatcher.java   | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f6c05fac/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index 141b855..638f9d3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -109,12 +109,16 @@ public class RSProcedureDispatcher
   protected void remoteDispatch(final ServerName serverName,
   final Set remoteProcedures) {
 final int rsVersion = 
master.getServerManager().getVersionNumber(serverName);
-if (rsVersion == 0 && 
!master.getServerManager().isServerOnline(serverName)) {
+if (rsVersion >= RS_VERSION_WITH_EXEC_PROCS) {
+  LOG.trace("Using procedure batch rpc execution for serverName={} 
version={}", serverName,
+rsVersion);
+  submitTask(new ExecuteProceduresRemoteCall(serverName, 
remoteProcedures));
+} else if (rsVersion == 0 && 
!master.getServerManager().isServerOnline(serverName)) {
   submitTask(new DeadRSRemoteCall(serverName, remoteProcedures));
 } else {
-  // See HBASE-21237, fallback to CompatRemoteProcedureResolver for now. 
Since
-  // ExecuteProceduresRemoteCall will group all the open/close requests. 
If one
-  // fails, master will regard all the requests as failure and then cause 
some trouble.
+  LOG.info(String.format(
+"Fallback to compat rpc execution for serverName=%s version=%s",
+serverName, rsVersion));
   submitTask(new CompatRemoteProcedureResolver(serverName, 
remoteProcedures));
 }
   }



hbase git commit: HBASE-21237 Use CompatRemoteProcedureResolver to dispatch open/close region requests to RS

2018-09-27 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 eb2725126 -> 0290f57c3


HBASE-21237 Use CompatRemoteProcedureResolver to dispatch open/close region 
requests to RS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0290f57c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0290f57c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0290f57c

Branch: refs/heads/branch-2.1
Commit: 0290f57c3aade51a2ed920adeb534f78e879a37c
Parents: eb27251
Author: Allan Yang 
Authored: Fri Sep 28 09:41:31 2018 +0800
Committer: Allan Yang 
Committed: Fri Sep 28 09:41:31 2018 +0800

--
 .../hbase/master/procedure/RSProcedureDispatcher.java   | 12 
 1 file changed, 4 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0290f57c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index 638f9d3..141b855 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -109,16 +109,12 @@ public class RSProcedureDispatcher
   protected void remoteDispatch(final ServerName serverName,
   final Set remoteProcedures) {
 final int rsVersion = 
master.getServerManager().getVersionNumber(serverName);
-if (rsVersion >= RS_VERSION_WITH_EXEC_PROCS) {
-  LOG.trace("Using procedure batch rpc execution for serverName={} 
version={}", serverName,
-rsVersion);
-  submitTask(new ExecuteProceduresRemoteCall(serverName, 
remoteProcedures));
-} else if (rsVersion == 0 && 
!master.getServerManager().isServerOnline(serverName)) {
+if (rsVersion == 0 && 
!master.getServerManager().isServerOnline(serverName)) {
   submitTask(new DeadRSRemoteCall(serverName, remoteProcedures));
 } else {
-  LOG.info(String.format(
-"Fallback to compat rpc execution for serverName=%s version=%s",
-serverName, rsVersion));
+  // See HBASE-21237, fallback to CompatRemoteProcedureResolver for now. 
Since
+  // ExecuteProceduresRemoteCall will group all the open/close requests. 
If one
+  // fails, master will regard all the requests as failure and then cause 
some trouble.
   submitTask(new CompatRemoteProcedureResolver(serverName, 
remoteProcedures));
 }
   }



hbase git commit: HBASE-21237 Use CompatRemoteProcedureResolver to dispatch open/close region requests to RS

2018-09-27 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 493ca10e6 -> 073198d2b


HBASE-21237 Use CompatRemoteProcedureResolver to dispatch open/close region 
requests to RS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/073198d2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/073198d2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/073198d2

Branch: refs/heads/branch-2.0
Commit: 073198d2b6df1e769db7e1bf4bab6be3fc2895aa
Parents: 493ca10
Author: Allan Yang 
Authored: Thu Sep 27 16:49:28 2018 +0800
Committer: Allan Yang 
Committed: Thu Sep 27 16:49:28 2018 +0800

--
 .../hbase/master/procedure/RSProcedureDispatcher.java   | 12 
 1 file changed, 4 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/073198d2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index 2d12ec9..ad0451d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -108,16 +108,12 @@ public class RSProcedureDispatcher
   protected void remoteDispatch(final ServerName serverName,
   final Set remoteProcedures) {
 final int rsVersion = 
master.getServerManager().getServerVersion(serverName);
-if (rsVersion >= RS_VERSION_WITH_EXEC_PROCS) {
-  LOG.trace("Using procedure batch rpc execution for serverName={} 
version={}", serverName,
-rsVersion);
-  submitTask(new ExecuteProceduresRemoteCall(serverName, 
remoteProcedures));
-} else if (rsVersion == 0 && 
!master.getServerManager().isServerOnline(serverName)) {
+if (rsVersion == 0 && 
!master.getServerManager().isServerOnline(serverName)) {
   submitTask(new DeadRSRemoteCall(serverName, remoteProcedures));
 } else {
-  LOG.info(String.format(
-"Fallback to compat rpc execution for serverName=%s version=%s",
-serverName, rsVersion));
+  // See HBASE-21237, fallback to CompatRemoteProcedureResolver for now. 
Since
+  // ExecuteProceduresRemoteCall will group all the open/close requests. 
If one
+  // fails, master will regard all the requests as failure and then cause 
some trouble.
   submitTask(new CompatRemoteProcedureResolver(serverName, 
remoteProcedures));
 }
   }



hbase git commit: HBASE-21228 Memory leak since AbstractFSWAL caches Thread object and never clean later

2018-09-27 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-1 04c732e9d -> 0b6e2907b


HBASE-21228 Memory leak since AbstractFSWAL caches Thread object and never 
clean later


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b6e2907
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b6e2907
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b6e2907

Branch: refs/heads/branch-1
Commit: 0b6e2907bdc623a0b15358bd4a95ce6097e3
Parents: 04c732e
Author: Allan Yang 
Authored: Thu Sep 27 16:44:46 2018 +0800
Committer: Allan Yang 
Committed: Thu Sep 27 16:44:46 2018 +0800

--
 .../hadoop/hbase/regionserver/wal/FSHLog.java   | 26 +---
 1 file changed, 12 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0b6e2907/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 141eafb..423ceb2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -189,12 +189,11 @@ public class FSHLog implements WAL {
   private final RingBufferEventHandler ringBufferEventHandler;
 
   /**
-   * Map of {@link SyncFuture}s keyed by Handler objects.  Used so we reuse 
SyncFutures.
-   * TODO: Reus FSWALEntry's rather than create them anew each time as we do 
SyncFutures here.
-   * TODO: Add a FSWalEntry and SyncFuture as thread locals on handlers rather 
than have them
-   * get them from this Map?
+   * Map of {@link SyncFuture}s owned by Thread objects. Used so we reuse 
SyncFutures.
+   * Thread local is used so JVM can GC the terminated thread for us. See 
HBASE-21228
+   * 
*/
-  private final Map syncFuturesByHandler;
+  private final ThreadLocal cachedSyncFutures;
 
   /**
* The highest known outstanding unsync'd WALEdit sequence number where 
sequence number is the
@@ -573,8 +572,12 @@ public class FSHLog implements WAL {
 maxHandlersCount);
 this.disruptor.handleExceptionsWith(new RingBufferExceptionHandler());
 this.disruptor.handleEventsWith(new RingBufferEventHandler [] 
{this.ringBufferEventHandler});
-// Presize our map of SyncFutures by handler objects.
-this.syncFuturesByHandler = new ConcurrentHashMap(maxHandlersCount);
+this.cachedSyncFutures = new ThreadLocal() {
+  @Override
+  protected SyncFuture initialValue() {
+return new SyncFuture();
+  }
+};
 // Starting up threads in constructor is a no no; Interface should have an 
init call.
 this.disruptor.start();
   }
@@ -1426,7 +1429,7 @@ public class FSHLog implements WAL {
   // SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
   // still refer to it, so if this thread use it next time may get a wrong
   // result.
-  this.syncFuturesByHandler.remove(Thread.currentThread());
+  this.cachedSyncFutures.remove();
   throw tioe;
 } catch (InterruptedException ie) {
   LOG.warn("Interrupted", ie);
@@ -1444,12 +1447,7 @@ public class FSHLog implements WAL {
   }
 
   private SyncFuture getSyncFuture(final long sequence, Span span) {
-SyncFuture syncFuture = 
this.syncFuturesByHandler.get(Thread.currentThread());
-if (syncFuture == null) {
-  syncFuture = new SyncFuture();
-  this.syncFuturesByHandler.put(Thread.currentThread(), syncFuture);
-}
-return syncFuture.reset(sequence, span);
+return cachedSyncFutures.get().reset(sequence);
   }
 
   private void postSync(final long timeInNanos, final int handlerSyncs) {



hbase git commit: HBASE-21228 Memory leak since AbstractFSWAL caches Thread object and never clean later

2018-09-27 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 98b1feac7 -> 86cb8e48a


HBASE-21228 Memory leak since AbstractFSWAL caches Thread object and never 
clean later


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/86cb8e48
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/86cb8e48
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/86cb8e48

Branch: refs/heads/master
Commit: 86cb8e48ad8aecf52bca1169a98607c76198c70b
Parents: 98b1fea
Author: Allan Yang 
Authored: Thu Sep 27 16:24:17 2018 +0800
Committer: Allan Yang 
Committed: Thu Sep 27 16:24:17 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   | 27 
 1 file changed, 11 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/86cb8e48/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 2b45a04..b342eff 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -34,8 +34,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.OptionalLong;
 import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
@@ -62,7 +60,6 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -268,14 +265,11 @@ public abstract class AbstractFSWAL 
implements WAL {
 new ConcurrentSkipListMap<>(LOG_NAME_COMPARATOR);
 
   /**
-   * Map of {@link SyncFuture}s keyed by Handler objects. Used so we reuse 
SyncFutures.
+   * Map of {@link SyncFuture}s owned by Thread objects. Used so we reuse 
SyncFutures.
+   * Thread local is used so JVM can GC the terminated thread for us. See 
HBASE-21228
* 
-   * TODO: Reuse FSWALEntry's rather than create them anew each time as we do 
SyncFutures here.
-   * 
-   * TODO: Add a FSWalEntry and SyncFuture as thread locals on handlers rather 
than have them get
-   * them from this Map?
*/
-  private final ConcurrentMap syncFuturesByHandler;
+  private final ThreadLocal cachedSyncFutures;
 
   /**
* The class name of the runtime implementation, used as prefix for 
logging/tracing.
@@ -429,9 +423,12 @@ public abstract class AbstractFSWAL 
implements WAL {
 .toNanos(conf.getInt("hbase.regionserver.hlog.slowsync.ms", 
DEFAULT_SLOW_SYNC_TIME_MS));
 this.walSyncTimeoutNs = TimeUnit.MILLISECONDS
 .toNanos(conf.getLong("hbase.regionserver.hlog.sync.timeout", 
DEFAULT_WAL_SYNC_TIMEOUT_MS));
-int maxHandlersCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, 
200);
-// Presize our map of SyncFutures by handler objects.
-this.syncFuturesByHandler = new ConcurrentHashMap<>(maxHandlersCount);
+this.cachedSyncFutures = new ThreadLocal() {
+  @Override
+  protected SyncFuture initialValue() {
+return new SyncFuture();
+  }
+};
 this.implClassName = getClass().getSimpleName();
   }
 
@@ -730,7 +727,7 @@ public abstract class AbstractFSWAL 
implements WAL {
   // SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
   // still refer to it, so if this thread use it next time may get a wrong
   // result.
-  this.syncFuturesByHandler.remove(Thread.currentThread());
+  this.cachedSyncFutures.remove();
   throw tioe;
 } catch (InterruptedException ie) {
   LOG.warn("Interrupted", ie);
@@ -879,9 +876,7 @@ public abstract class AbstractFSWAL 
implements WAL {
   }
 
   protected final SyncFuture getSyncFuture(long sequence) {
-return CollectionUtils
-.computeIfAbsent(syncFuturesByHandler, Thread.currentThread(), 
SyncFuture::new)
-.reset(sequence);
+return cachedSyncFutures.get().reset(sequence);
   }
 
   protected final void requestLogRoll(boolean tooFewReplicas) {



hbase git commit: HBASE-21228 Memory leak since AbstractFSWAL caches Thread object and never clean later

2018-09-27 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 12ea0cf37 -> 7b71903ba


HBASE-21228 Memory leak since AbstractFSWAL caches Thread object and never 
clean later


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7b71903b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7b71903b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7b71903b

Branch: refs/heads/branch-2
Commit: 7b71903bafc2da4111bd31ef99faac5031ba45df
Parents: 12ea0cf
Author: Allan Yang 
Authored: Thu Sep 27 16:11:47 2018 +0800
Committer: Allan Yang 
Committed: Thu Sep 27 16:11:47 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   | 27 
 1 file changed, 11 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7b71903b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 9b31834..b493fd6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -34,8 +34,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.OptionalLong;
 import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
@@ -62,7 +60,6 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -268,14 +265,11 @@ public abstract class AbstractFSWAL 
implements WAL {
 new ConcurrentSkipListMap<>(LOG_NAME_COMPARATOR);
 
   /**
-   * Map of {@link SyncFuture}s keyed by Handler objects. Used so we reuse 
SyncFutures.
+   * Map of {@link SyncFuture}s owned by Thread objects. Used so we reuse 
SyncFutures.
+   * Thread local is used so JVM can GC the terminated thread for us. See 
HBASE-21228
* 
-   * TODO: Reuse FSWALEntry's rather than create them anew each time as we do 
SyncFutures here.
-   * 
-   * TODO: Add a FSWalEntry and SyncFuture as thread locals on handlers rather 
than have them get
-   * them from this Map?
*/
-  private final ConcurrentMap syncFuturesByHandler;
+  private final ThreadLocal cachedSyncFutures;
 
   /**
* The class name of the runtime implementation, used as prefix for 
logging/tracing.
@@ -429,9 +423,12 @@ public abstract class AbstractFSWAL 
implements WAL {
 .toNanos(conf.getInt("hbase.regionserver.hlog.slowsync.ms", 
DEFAULT_SLOW_SYNC_TIME_MS));
 this.walSyncTimeoutNs = TimeUnit.MILLISECONDS
 .toNanos(conf.getLong("hbase.regionserver.hlog.sync.timeout", 
DEFAULT_WAL_SYNC_TIMEOUT_MS));
-int maxHandlersCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, 
200);
-// Presize our map of SyncFutures by handler objects.
-this.syncFuturesByHandler = new ConcurrentHashMap<>(maxHandlersCount);
+this.cachedSyncFutures = new ThreadLocal() {
+  @Override
+  protected SyncFuture initialValue() {
+return new SyncFuture();
+  }
+};
 this.implClassName = getClass().getSimpleName();
   }
 
@@ -723,7 +720,7 @@ public abstract class AbstractFSWAL 
implements WAL {
   // SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
   // still refer to it, so if this thread use it next time may get a wrong
   // result.
-  this.syncFuturesByHandler.remove(Thread.currentThread());
+  this.cachedSyncFutures.remove();
   throw tioe;
 } catch (InterruptedException ie) {
   LOG.warn("Interrupted", ie);
@@ -873,9 +870,7 @@ public abstract class AbstractFSWAL 
implements WAL {
   }
 
   protected final SyncFuture getSyncFuture(long sequence) {
-return CollectionUtils
-.computeIfAbsent(syncFuturesByHandler, Thread.currentThread(), 
SyncFuture::new)
-.reset(sequence);
+return cachedSyncFutures.get().reset(sequence);
   }
 
   protected final void requestLogRoll(boolean tooFewReplicas) {



hbase git commit: HBASE-21228 Memory leak since AbstractFSWAL caches Thread object and never clean later

2018-09-27 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 5169cfc8c -> eb2725126


HBASE-21228 Memory leak since AbstractFSWAL caches Thread object and never 
clean later


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eb272512
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eb272512
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eb272512

Branch: refs/heads/branch-2.1
Commit: eb272512650a1926214ec37da1d4976a18228fb6
Parents: 5169cfc
Author: Allan Yang 
Authored: Thu Sep 27 15:07:07 2018 +0800
Committer: Allan Yang 
Committed: Thu Sep 27 15:07:07 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   | 27 
 1 file changed, 11 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eb272512/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 9b31834..b493fd6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -34,8 +34,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.OptionalLong;
 import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
@@ -62,7 +60,6 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -268,14 +265,11 @@ public abstract class AbstractFSWAL 
implements WAL {
 new ConcurrentSkipListMap<>(LOG_NAME_COMPARATOR);
 
   /**
-   * Map of {@link SyncFuture}s keyed by Handler objects. Used so we reuse 
SyncFutures.
+   * Map of {@link SyncFuture}s owned by Thread objects. Used so we reuse 
SyncFutures.
+   * Thread local is used so JVM can GC the terminated thread for us. See 
HBASE-21228
* 
-   * TODO: Reuse FSWALEntry's rather than create them anew each time as we do 
SyncFutures here.
-   * 
-   * TODO: Add a FSWalEntry and SyncFuture as thread locals on handlers rather 
than have them get
-   * them from this Map?
*/
-  private final ConcurrentMap syncFuturesByHandler;
+  private final ThreadLocal cachedSyncFutures;
 
   /**
* The class name of the runtime implementation, used as prefix for 
logging/tracing.
@@ -429,9 +423,12 @@ public abstract class AbstractFSWAL 
implements WAL {
 .toNanos(conf.getInt("hbase.regionserver.hlog.slowsync.ms", 
DEFAULT_SLOW_SYNC_TIME_MS));
 this.walSyncTimeoutNs = TimeUnit.MILLISECONDS
 .toNanos(conf.getLong("hbase.regionserver.hlog.sync.timeout", 
DEFAULT_WAL_SYNC_TIMEOUT_MS));
-int maxHandlersCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, 
200);
-// Presize our map of SyncFutures by handler objects.
-this.syncFuturesByHandler = new ConcurrentHashMap<>(maxHandlersCount);
+this.cachedSyncFutures = new ThreadLocal() {
+  @Override
+  protected SyncFuture initialValue() {
+return new SyncFuture();
+  }
+};
 this.implClassName = getClass().getSimpleName();
   }
 
@@ -723,7 +720,7 @@ public abstract class AbstractFSWAL 
implements WAL {
   // SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
   // still refer to it, so if this thread use it next time may get a wrong
   // result.
-  this.syncFuturesByHandler.remove(Thread.currentThread());
+  this.cachedSyncFutures.remove();
   throw tioe;
 } catch (InterruptedException ie) {
   LOG.warn("Interrupted", ie);
@@ -873,9 +870,7 @@ public abstract class AbstractFSWAL 
implements WAL {
   }
 
   protected final SyncFuture getSyncFuture(long sequence) {
-return CollectionUtils
-.computeIfAbsent(syncFuturesByHandler, Thread.currentThread(), 
SyncFuture::new)
-.reset(sequence);
+return cachedSyncFutures.get().reset(sequence);
   }
 
   protected final void requestLogRoll(boolean tooFewReplicas) {



hbase git commit: HBASE-21228 Memory leak since AbstractFSWAL caches Thread object and never clean later

2018-09-27 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 b4818fffd -> 493ca10e6


HBASE-21228 Memory leak since AbstractFSWAL caches Thread object and never 
clean later


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/493ca10e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/493ca10e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/493ca10e

Branch: refs/heads/branch-2.0
Commit: 493ca10e6a911221f072f15a52ecd373c59ea316
Parents: b4818ff
Author: Allan Yang 
Authored: Thu Sep 27 15:00:30 2018 +0800
Committer: Allan Yang 
Committed: Thu Sep 27 15:00:30 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   | 27 
 1 file changed, 11 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/493ca10e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 9b31834..b493fd6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -34,8 +34,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.OptionalLong;
 import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
@@ -62,7 +60,6 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -268,14 +265,11 @@ public abstract class AbstractFSWAL 
implements WAL {
 new ConcurrentSkipListMap<>(LOG_NAME_COMPARATOR);
 
   /**
-   * Map of {@link SyncFuture}s keyed by Handler objects. Used so we reuse 
SyncFutures.
+   * Map of {@link SyncFuture}s owned by Thread objects. Used so we reuse 
SyncFutures.
+   * Thread local is used so JVM can GC the terminated thread for us. See 
HBASE-21228
* 
-   * TODO: Reuse FSWALEntry's rather than create them anew each time as we do 
SyncFutures here.
-   * 
-   * TODO: Add a FSWalEntry and SyncFuture as thread locals on handlers rather 
than have them get
-   * them from this Map?
*/
-  private final ConcurrentMap syncFuturesByHandler;
+  private final ThreadLocal cachedSyncFutures;
 
   /**
* The class name of the runtime implementation, used as prefix for 
logging/tracing.
@@ -429,9 +423,12 @@ public abstract class AbstractFSWAL 
implements WAL {
 .toNanos(conf.getInt("hbase.regionserver.hlog.slowsync.ms", 
DEFAULT_SLOW_SYNC_TIME_MS));
 this.walSyncTimeoutNs = TimeUnit.MILLISECONDS
 .toNanos(conf.getLong("hbase.regionserver.hlog.sync.timeout", 
DEFAULT_WAL_SYNC_TIMEOUT_MS));
-int maxHandlersCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, 
200);
-// Presize our map of SyncFutures by handler objects.
-this.syncFuturesByHandler = new ConcurrentHashMap<>(maxHandlersCount);
+this.cachedSyncFutures = new ThreadLocal() {
+  @Override
+  protected SyncFuture initialValue() {
+return new SyncFuture();
+  }
+};
 this.implClassName = getClass().getSimpleName();
   }
 
@@ -723,7 +720,7 @@ public abstract class AbstractFSWAL 
implements WAL {
   // SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
   // still refer to it, so if this thread use it next time may get a wrong
   // result.
-  this.syncFuturesByHandler.remove(Thread.currentThread());
+  this.cachedSyncFutures.remove();
   throw tioe;
 } catch (InterruptedException ie) {
   LOG.warn("Interrupted", ie);
@@ -873,9 +870,7 @@ public abstract class AbstractFSWAL 
implements WAL {
   }
 
   protected final SyncFuture getSyncFuture(long sequence) {
-return CollectionUtils
-.computeIfAbsent(syncFuturesByHandler, Thread.currentThread(), 
SyncFuture::new)
-.reset(sequence);
+return cachedSyncFutures.get().reset(sequence);
   }
 
   protected final void requestLogRoll(boolean tooFewReplicas) {



hbase git commit: HBASE-21212 Wrong flush time when update flush metric

2018-09-26 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-1 a00adb0b4 -> 823b79000


HBASE-21212 Wrong flush time when update flush metric


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/823b7900
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/823b7900
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/823b7900

Branch: refs/heads/branch-1
Commit: 823b7942602467c7595136ec706d0b767062
Parents: a00adb0
Author: Allan Yang 
Authored: Wed Sep 26 19:40:43 2018 +0800
Committer: Allan Yang 
Committed: Wed Sep 26 19:40:43 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/823b7900/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 0605663..90a5f92 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2734,7 +2734,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 status.setStatus(msg);
 
 if (rsServices != null && rsServices.getMetrics() != null) {
-  rsServices.getMetrics().updateFlush(time - startTime,
+  rsServices.getMetrics().updateFlush(time,
 totalFlushableSizeOfFlushableStores, flushedOutputFileSize);
 }
 



hbase git commit: HBASE-21212 Wrong flush time when update flush metric

2018-09-26 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 0e173d38b -> 7b2f5950e


HBASE-21212 Wrong flush time when update flush metric


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7b2f5950
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7b2f5950
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7b2f5950

Branch: refs/heads/master
Commit: 7b2f5950edeec0153c01d4da85280d40eedc169a
Parents: 0e173d3
Author: Allan Yang 
Authored: Wed Sep 26 19:38:15 2018 +0800
Committer: Allan Yang 
Committed: Wed Sep 26 19:38:15 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7b2f5950/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 3e5a543..fc33512 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2858,7 +2858,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
 if (rsServices != null && rsServices.getMetrics() != null) {
   
rsServices.getMetrics().updateFlush(getTableDescriptor().getTableName().getNameAsString(),
-  time - startTime,
+  time,
   mss.getDataSize(), flushedOutputFileSize);
 }
 



hbase git commit: HBASE-21212 Wrong flush time when update flush metric

2018-09-26 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 21fafbaf5 -> f3c3c7c42


HBASE-21212 Wrong flush time when update flush metric


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f3c3c7c4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f3c3c7c4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f3c3c7c4

Branch: refs/heads/branch-2
Commit: f3c3c7c4226941ec34900d9d3d16521f40a66a2f
Parents: 21fafba
Author: Allan Yang 
Authored: Wed Sep 26 19:17:35 2018 +0800
Committer: Allan Yang 
Committed: Wed Sep 26 19:17:35 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f3c3c7c4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 7afd46f..e63f0c4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2818,7 +2818,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
 if (rsServices != null && rsServices.getMetrics() != null) {
   
rsServices.getMetrics().updateFlush(getTableDescriptor().getTableName().getNameAsString(),
-  time - startTime,
+  time,
   mss.getDataSize(), flushedOutputFileSize);
 }
 



hbase git commit: HBASE-21212 Wrong flush time when update flush metric

2018-09-26 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 a4e72544f -> ba8a25216


HBASE-21212 Wrong flush time when update flush metric


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ba8a2521
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ba8a2521
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ba8a2521

Branch: refs/heads/branch-2.1
Commit: ba8a252167ea3f671446ab2d8c718368e6ff19b7
Parents: a4e7254
Author: Allan Yang 
Authored: Wed Sep 26 19:11:23 2018 +0800
Committer: Allan Yang 
Committed: Wed Sep 26 19:11:23 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ba8a2521/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 088a800..f713df0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2787,7 +2787,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 status.setStatus(msg);
 
 if (rsServices != null && rsServices.getMetrics() != null) {
-  rsServices.getMetrics().updateFlush(time - startTime,
+  rsServices.getMetrics().updateFlush(time,
   mss.getDataSize(), flushedOutputFileSize);
 }
 



hbase git commit: HBASE-21212 Wrong flush time when update flush metric

2018-09-26 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 cf915f9c7 -> b4818fffd


HBASE-21212 Wrong flush time when update flush metric


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4818fff
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4818fff
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4818fff

Branch: refs/heads/branch-2.0
Commit: b4818fffd38129b2420ef832b16ac1f3d34377ee
Parents: cf915f9
Author: Allan Yang 
Authored: Wed Sep 26 19:05:38 2018 +0800
Committer: Allan Yang 
Committed: Wed Sep 26 19:05:38 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b4818fff/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 7a92d50..298e22a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2770,7 +2770,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 status.setStatus(msg);
 
 if (rsServices != null && rsServices.getMetrics() != null) {
-  rsServices.getMetrics().updateFlush(time - startTime,
+  rsServices.getMetrics().updateFlush(time,
   mss.getDataSize(), flushedOutputFileSize);
 }
 



hbase git commit: HBASE-21113 Apply the branch-2 version of HBASE-21095, The timeout retry logic for several procedures are broken after master restarts(addendum)

2018-08-26 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 d954031d5 -> 33fa32d71


HBASE-21113 Apply the branch-2 version of HBASE-21095, The timeout retry logic 
for several procedures are broken after master restarts(addendum)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/33fa32d7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/33fa32d7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/33fa32d7

Branch: refs/heads/branch-2.1
Commit: 33fa32d7116b86118ff007cc6447d7d63f7976f9
Parents: d954031
Author: Allan Yang 
Authored: Sun Aug 26 22:15:49 2018 +0800
Committer: Allan Yang 
Committed: Sun Aug 26 22:15:49 2018 +0800

--
 .../hbase/master/assignment/TestUnexpectedStateException.java  | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/33fa32d7/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestUnexpectedStateException.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestUnexpectedStateException.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestUnexpectedStateException.java
index 16648c0..868b93b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestUnexpectedStateException.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestUnexpectedStateException.java
@@ -69,8 +69,6 @@ public class TestUnexpectedStateException {
 
   @BeforeClass
   public static void beforeClass() throws Exception {
-
TEST_UTIL.getConfiguration().setBoolean("hbase.localcluster.assign.random.ports",
 false);
-TEST_UTIL.getConfiguration().setInt(HConstants.MASTER_INFO_PORT, 50655);
 TEST_UTIL.startMiniCluster();
   }
 



  1   2   >