hbase git commit: HBASE-18093 Overloading the meaning of 'enabled' in Quota Manager to indicate either quota disabled or quota manager not ready is not good (Stephen Yuan Jiang)

2017-05-23 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 dbd72f9b0 -> c964b660d


HBASE-18093 Overloading the meaning of 'enabled' in Quota Manager to indicate 
either quota disabled or quota manager not ready is not good (Stephen Yuan 
Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c964b660
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c964b660
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c964b660

Branch: refs/heads/branch-1.1
Commit: c964b660df7321d23c7229010f717ba1bec5f1ef
Parents: dbd72f9
Author: Stephen Yuan Jiang 
Authored: Tue May 23 13:10:07 2017 -0700
Committer: Stephen Yuan Jiang 
Committed: Tue May 23 13:15:52 2017 -0700

--
 .../hbase/master/snapshot/SnapshotManager.java  |  6 +--
 .../hadoop/hbase/quotas/MasterQuotaManager.java | 45 ++--
 .../hbase/namespace/TestNamespaceAuditor.java   |  6 +--
 3 files changed, 37 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c964b660/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index a51d4c6..25d9972 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -794,7 +794,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
   
   private void checkAndUpdateNamespaceQuota(SnapshotManifest manifest, 
TableName tableName)
   throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   
this.master.getMasterQuotaManager().checkNamespaceTableAndRegionQuota(tableName,
 manifest.getRegionManifestsMap().size());
 }
@@ -802,7 +802,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
 
   private void checkAndUpdateNamespaceRegionQuota(int updatedRegionCount, 
TableName tableName)
   throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   
this.master.getMasterQuotaManager().checkAndUpdateNamespaceRegionQuota(tableName,
 updatedRegionCount);
 }
@@ -812,7 +812,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
* @return cached region count, or -1 if quota manager is disabled or table 
status not found
   */
   private int getRegionCountOfTable(TableName tableName) throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   return 
this.master.getMasterQuotaManager().getRegionCountOfTable(tableName);
 }
 return -1;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c964b660/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 38d334f..25a3fad 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Master Quota Manager. It is responsible for initialize the quota table on 
the first-run and
@@ -50,7 +51,7 @@ public class MasterQuotaManager implements 
RegionStateListener {
   private NamedLock namespaceLocks;
   private NamedLock tableLocks;
   private NamedLock userLocks;
-  private boolean enabled = false;
+  private boolean initialized = false;
   private NamespaceAuditor namespaceQuotaManager;
 
   public MasterQuotaManager(final MasterServices masterServices) {
@@ -78,14 +79,14 @@ public class MasterQuotaManager implements 
RegionStateListener {
 
 namespaceQuotaManager = new NamespaceAuditor(masterServices);
 

hbase git commit: HBASE-18093 Overloading the meaning of 'enabled' in Quota Manager to indicate either quota disabled or quota manager not ready is not good (Stephen Yuan Jiang)

2017-05-23 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 6ee7a4932 -> e5e47fc99


HBASE-18093 Overloading the meaning of 'enabled' in Quota Manager to indicate 
either quota disabled or quota manager not ready is not good (Stephen Yuan 
Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e5e47fc9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e5e47fc9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e5e47fc9

Branch: refs/heads/branch-1.2
Commit: e5e47fc99a557e7986fb39b2c035384f4137da90
Parents: 6ee7a49
Author: Stephen Yuan Jiang 
Authored: Tue May 23 13:10:07 2017 -0700
Committer: Stephen Yuan Jiang 
Committed: Tue May 23 13:13:31 2017 -0700

--
 .../hbase/master/snapshot/SnapshotManager.java  |  6 +--
 .../hadoop/hbase/quotas/MasterQuotaManager.java | 45 ++--
 .../hbase/namespace/TestNamespaceAuditor.java   |  6 +--
 3 files changed, 37 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e5e47fc9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 2ee7bc9..1e63084 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -793,7 +793,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
 
   private void checkAndUpdateNamespaceQuota(SnapshotManifest manifest, 
TableName tableName)
   throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   
this.master.getMasterQuotaManager().checkNamespaceTableAndRegionQuota(tableName,
 manifest.getRegionManifestsMap().size());
 }
@@ -801,7 +801,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
 
   private void checkAndUpdateNamespaceRegionQuota(int updatedRegionCount, 
TableName tableName)
   throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   
this.master.getMasterQuotaManager().checkAndUpdateNamespaceRegionQuota(tableName,
 updatedRegionCount);
 }
@@ -811,7 +811,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
* @return cached region count, or -1 if quota manager is disabled or table 
status not found
   */
   private int getRegionCountOfTable(TableName tableName) throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   return 
this.master.getMasterQuotaManager().getRegionCountOfTable(tableName);
 }
 return -1;

http://git-wip-us.apache.org/repos/asf/hbase/blob/e5e47fc9/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 1beecf7..8d3c684 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Master Quota Manager. It is responsible for initialize the quota table on 
the first-run and
@@ -50,7 +51,7 @@ public class MasterQuotaManager implements 
RegionStateListener {
   private NamedLock namespaceLocks;
   private NamedLock tableLocks;
   private NamedLock userLocks;
-  private boolean enabled = false;
+  private boolean initialized = false;
   private NamespaceAuditor namespaceQuotaManager;
 
   public MasterQuotaManager(final MasterServices masterServices) {
@@ -78,14 +79,14 @@ public class MasterQuotaManager implements 
RegionStateListener {
 
 namespaceQuotaManager = new NamespaceAuditor(masterServices);
 namespaceQuotaManager.start();

hbase git commit: HBASE-18093 Overloading the meaning of 'enabled' in Quota Manager to indicate either quota disabled or quota manager not ready is not good (Stephen Yuan Jiang)

2017-05-23 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 d1b1eab10 -> fbde5ed6b


HBASE-18093 Overloading the meaning of 'enabled' in Quota Manager to indicate 
either quota disabled or quota manager not ready is not good (Stephen Yuan 
Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fbde5ed6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fbde5ed6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fbde5ed6

Branch: refs/heads/branch-1.3
Commit: fbde5ed6bda13ed6261ec6fefcbcfc835da57fd6
Parents: d1b1eab
Author: Stephen Yuan Jiang 
Authored: Tue May 23 13:10:07 2017 -0700
Committer: Stephen Yuan Jiang 
Committed: Tue May 23 13:11:28 2017 -0700

--
 .../hbase/master/snapshot/SnapshotManager.java  |  6 +--
 .../hadoop/hbase/quotas/MasterQuotaManager.java | 45 ++--
 .../hbase/namespace/TestNamespaceAuditor.java   |  6 +--
 3 files changed, 37 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fbde5ed6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index e60043f..d9cfa22 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -793,7 +793,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
 
   private void checkAndUpdateNamespaceQuota(SnapshotManifest manifest, 
TableName tableName)
   throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   
this.master.getMasterQuotaManager().checkNamespaceTableAndRegionQuota(tableName,
 manifest.getRegionManifestsMap().size());
 }
@@ -801,7 +801,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
 
   private void checkAndUpdateNamespaceRegionQuota(int updatedRegionCount, 
TableName tableName)
   throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   
this.master.getMasterQuotaManager().checkAndUpdateNamespaceRegionQuota(tableName,
 updatedRegionCount);
 }
@@ -811,7 +811,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
* @return cached region count, or -1 if quota manager is disabled or table 
status not found
   */
   private int getRegionCountOfTable(TableName tableName) throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   return 
this.master.getMasterQuotaManager().getRegionCountOfTable(tableName);
 }
 return -1;

http://git-wip-us.apache.org/repos/asf/hbase/blob/fbde5ed6/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 5237393..9bfa8db 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Master Quota Manager. It is responsible for initialize the quota table on 
the first-run and
@@ -50,7 +51,7 @@ public class MasterQuotaManager implements 
RegionStateListener {
   private NamedLock namespaceLocks;
   private NamedLock tableLocks;
   private NamedLock userLocks;
-  private boolean enabled = false;
+  private boolean initialized = false;
   private NamespaceAuditor namespaceQuotaManager;
 
   public MasterQuotaManager(final MasterServices masterServices) {
@@ -78,14 +79,14 @@ public class MasterQuotaManager implements 
RegionStateListener {
 
 namespaceQuotaManager = new NamespaceAuditor(masterServices);
 namespaceQuotaManager.start();

hbase git commit: HBASE-18093 Overloading the meaning of 'enabled' in Quota Manager to indicate either quota disabled or quota manager not ready is not good (Stephen Yuan Jiang)

2017-05-23 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/branch-1 8c313d5be -> 50708d952


HBASE-18093 Overloading the meaning of 'enabled' in Quota Manager to indicate 
either quota disabled or quota manager not ready is not good (Stephen Yuan 
Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/50708d95
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/50708d95
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/50708d95

Branch: refs/heads/branch-1
Commit: 50708d9524c7575324bf277c8ca0d3d711eb46be
Parents: 8c313d5
Author: Stephen Yuan Jiang 
Authored: Tue May 23 13:10:07 2017 -0700
Committer: Stephen Yuan Jiang 
Committed: Tue May 23 13:10:07 2017 -0700

--
 .../hbase/master/snapshot/SnapshotManager.java  |  6 +--
 .../hadoop/hbase/quotas/MasterQuotaManager.java | 45 ++--
 .../hbase/namespace/TestNamespaceAuditor.java   |  6 +--
 3 files changed, 37 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/50708d95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 9c50571..4e0181f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -795,7 +795,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
 
   private void checkAndUpdateNamespaceQuota(SnapshotManifest manifest, 
TableName tableName)
   throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   
this.master.getMasterQuotaManager().checkNamespaceTableAndRegionQuota(tableName,
 manifest.getRegionManifestsMap().size());
 }
@@ -803,7 +803,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
 
   private void checkAndUpdateNamespaceRegionQuota(int updatedRegionCount, 
TableName tableName)
   throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   
this.master.getMasterQuotaManager().checkAndUpdateNamespaceRegionQuota(tableName,
 updatedRegionCount);
 }
@@ -813,7 +813,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
* @return cached region count, or -1 if quota manager is disabled or table 
status not found
   */
   private int getRegionCountOfTable(TableName tableName) throws IOException {
-if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
+if (this.master.getMasterQuotaManager().isQuotaInitialized()) {
   return 
this.master.getMasterQuotaManager().getRegionCountOfTable(tableName);
 }
 return -1;

http://git-wip-us.apache.org/repos/asf/hbase/blob/50708d95/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 5237393..9bfa8db 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest;
 import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Master Quota Manager. It is responsible for initialize the quota table on 
the first-run and
@@ -50,7 +51,7 @@ public class MasterQuotaManager implements 
RegionStateListener {
   private NamedLock namespaceLocks;
   private NamedLock tableLocks;
   private NamedLock userLocks;
-  private boolean enabled = false;
+  private boolean initialized = false;
   private NamespaceAuditor namespaceQuotaManager;
 
   public MasterQuotaManager(final MasterServices masterServices) {
@@ -78,14 +79,14 @@ public class MasterQuotaManager implements 
RegionStateListener {
 
 namespaceQuotaManager = new NamespaceAuditor(masterServices);
 namespaceQuotaManager.start();
-

[01/28] hbase git commit: HBASE-18094 Display the return value of the command append [Forced Update!]

2017-05-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14614 f24362c15 -> 4143c0176 (forced update)


HBASE-18094 Display the return value of the command append

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ebe92c8f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ebe92c8f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ebe92c8f

Branch: refs/heads/HBASE-14614
Commit: ebe92c8fb3153367531aac3cf2b60d65f782083d
Parents: 9e7b0c1
Author: Guangxu Cheng 
Authored: Tue May 23 17:59:05 2017 +0800
Committer: tedyu 
Committed: Tue May 23 08:59:05 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/table.rb   | 8 +++-
 hbase-shell/src/main/ruby/shell/commands/append.rb | 4 +++-
 hbase-shell/src/test/ruby/hbase/table_test.rb  | 1 +
 3 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ebe92c8f/hbase-shell/src/main/ruby/hbase/table.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 946c72c..3185939 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -286,7 +286,13 @@ EOF
 set_op_ttl(append, ttl) if ttl
   end
   append.add(family, qualifier, value.to_s.to_java_bytes)
-  @table.append(append)
+  result = @table.append(append)
+  return nil if result.isEmpty
+
+  # Fetch cell value
+  cell = result.listCells[0]
+  org.apache.hadoop.hbase.util.Bytes::toStringBinary(cell.getValueArray,
+cell.getValueOffset, cell.getValueLength)
 end
 
 
#--

http://git-wip-us.apache.org/repos/asf/hbase/blob/ebe92c8f/hbase-shell/src/main/ruby/shell/commands/append.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/append.rb 
b/hbase-shell/src/main/ruby/shell/commands/append.rb
index 93a4317..c2fb9c1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/append.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/append.rb
@@ -42,7 +42,9 @@ EOF
   end
 
   def append(table, row, column, value, args={})
-table._append_internal(row, column, value, args)
+if current_value = table._append_internal(row, column, value, args)
+  puts "CURRENT VALUE = #{current_value}"
+end
   end
 end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/ebe92c8f/hbase-shell/src/test/ruby/hbase/table_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/table_test.rb 
b/hbase-shell/src/test/ruby/hbase/table_test.rb
index 53d0ca9..90ed7fc 100644
--- a/hbase-shell/src/test/ruby/hbase/table_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/table_test.rb
@@ -195,6 +195,7 @@ module Hbase
 
 define_test "append should work with value" do
   @test_table.append("123", 'x:cnt2', '123')
+  assert_equal("123123", @test_table._append_internal("123", 'x:cnt2', 
'123'))
 end
 
#---
 



[03/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
index e6e90ef..47b1248 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
@@ -18,12 +18,16 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
-import java.util.Random;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.util.List;
+import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -42,17 +46,19 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
 
 import static org.junit.Assert.*;
 
 @Category({MasterTests.class, MediumTests.class})
 public class TestProcedureAdmin {
   private static final Log LOG = LogFactory.getLog(TestProcedureAdmin.class);
+  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+  withLookingForStuckThread(true).build();
+  @Rule public TestName name = new TestName();
 
   protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
 
-  @Rule
-  public TestName name = new TestName();
 
   private static void setupConf(Configuration conf) {
 conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
index 9141e0f..bed8b4f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
@@ -18,11 +18,17 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ProcedureInfo;
@@ -47,6 +53,7 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -56,6 +63,8 @@ import static org.junit.Assert.assertTrue;
 @Category({MasterTests.class, MediumTests.class})
 public class TestRestoreSnapshotProcedure extends TestTableDDLProcedureBase {
   private static final Log LOG = 
LogFactory.getLog(TestRestoreSnapshotProcedure.class);
+  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+  withLookingForStuckThread(true).build();
 
   protected final TableName snapshotTableName = 
TableName.valueOf("testRestoreSnapshot");
   protected final byte[] CF1 = Bytes.toBytes("cf1");
@@ -202,8 +211,7 @@ public class TestRestoreSnapshotProcedure extends 
TestTableDDLProcedureBase {
   new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, 
snapshot));
 
 // Restart the executor and execute the step twice
-int numberOfSteps = RestoreSnapshotState.values().length;
-MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, 
procId, numberOfSteps);
+MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, 
procId);
 
 resetProcExecutorTestingKillFlag();
 validateSnapshotRestore();

http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java

[18/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java
deleted file mode 100644
index 929cd4e..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import java.io.IOException;
-import java.lang.Thread.UncaughtExceptionHandler;
-import java.util.concurrent.Executors;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Server;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-/**
- * Base class used bulk assigning and unassigning regions.
- * Encapsulates a fixed size thread pool of executors to run 
assignment/unassignment.
- * Implement {@link #populatePool(java.util.concurrent.ExecutorService)} and
- * {@link #waitUntilDone(long)}.  The default implementation of
- * the {@link #getUncaughtExceptionHandler()} is to abort the hosting
- * Server.
- */
-@InterfaceAudience.Private
-public abstract class BulkAssigner {
-  protected final Server server;
-
-  /**
-   * @param server An instance of Server
-   */
-  public BulkAssigner(final Server server) {
-this.server = server;
-  }
-
-  /**
-   * @return What to use for a thread prefix when executor runs.
-   */
-  protected String getThreadNamePrefix() {
-return this.server.getServerName() + "-" + this.getClass().getName(); 
-  }
-
-  protected UncaughtExceptionHandler getUncaughtExceptionHandler() {
-return new UncaughtExceptionHandler() {
-  @Override
-  public void uncaughtException(Thread t, Throwable e) {
-// Abort if exception of any kind.
-server.abort("Uncaught exception in " + t.getName(), e);
-  }
-};
-  }
-
-  protected int getThreadCount() {
-return this.server.getConfiguration().
-  getInt("hbase.bulk.assignment.threadpool.size", 20);
-  }
-
-  protected long getTimeoutOnRIT() {
-return this.server.getConfiguration().
-  getLong("hbase.bulk.assignment.waiton.empty.rit", 5 * 60 * 1000);
-  }
-
-  protected abstract void populatePool(
-  final java.util.concurrent.ExecutorService pool) throws IOException;
-
-  public boolean bulkAssign() throws InterruptedException, IOException {
-return bulkAssign(true);
-  }
-
-  /**
-   * Run the bulk assign.
-   * 
-   * @param sync
-   *  Whether to assign synchronously.
-   * @throws InterruptedException
-   * @return True if done.
-   * @throws IOException
-   */
-  public boolean bulkAssign(boolean sync) throws InterruptedException,
-  IOException {
-boolean result = false;
-ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
-builder.setDaemon(true);
-builder.setNameFormat(getThreadNamePrefix() + "-%1$d");
-builder.setUncaughtExceptionHandler(getUncaughtExceptionHandler());
-int threadCount = getThreadCount();
-java.util.concurrent.ExecutorService pool =
-  Executors.newFixedThreadPool(threadCount, builder.build());
-try {
-  populatePool(pool);
-  // How long to wait on empty regions-in-transition.  If we timeout, the
-  // RIT monitor should do fixup.
-  if (sync) result = waitUntilDone(getTimeoutOnRIT());
-} finally {
-  // We're done with the pool.  It'll exit when its done all in queue.
-  pool.shutdown();
-}
-return result;
-  }
-
-  /**
-   * Wait until bulk assign is done.
-   * @param timeout How long to wait.
-   * @throws InterruptedException
-   * @return True if the condition we were waiting on happened.
-   */
-  protected abstract boolean waitUntilDone(final long timeout)
-  throws InterruptedException;
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java
--
diff --git 

[27/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 1bb6118..bc73453 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -32,6 +32,8 @@ import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.DelayQueue;
@@ -113,9 +115,11 @@ public class ProcedureExecutor {
* Internal cleaner that removes the completed procedure results after a TTL.
* NOTE: This is a special case handled in timeoutLoop().
*
-   * Since the client code looks more or less like:
+   * Since the client code looks more or less like:
+   * 
*   procId = master.doOperation()
*   while (master.getProcResult(procId) == ProcInProgress);
+   * 
* The master should not throw away the proc result as soon as the procedure 
is done
* but should wait a result request from the client (see 
executor.removeResult(procId))
* The client will call something like master.isProcDone() or 
master.getProcResult()
@@ -480,10 +484,10 @@ public class ProcedureExecutor {
 // We have numThreads executor + one timer thread used for timing out
 // procedures and triggering periodic procedures.
 this.corePoolSize = numThreads;
-LOG.info("Starting executor worker threads=" + corePoolSize);
+LOG.info("Starting ProcedureExecutor Worker threads (ProcExecWrkr)=" + 
corePoolSize);
 
 // Create the Thread Group for the executors
-threadGroup = new ThreadGroup("ProcedureExecutor");
+threadGroup = new ThreadGroup("ProcExecThrdGrp");
 
 // Create the timeout executor
 timeoutExecutor = new TimeoutExecutorThread(threadGroup);
@@ -1077,13 +1081,16 @@ public class ProcedureExecutor {
 final Long rootProcId = getRootProcedureId(proc);
 if (rootProcId == null) {
   // The 'proc' was ready to run but the root procedure was rolledback
+  LOG.warn("Rollback because parent is done/rolledback proc=" + proc);
   executeRollback(proc);
   return;
 }
 
 final RootProcedureState procStack = rollbackStack.get(rootProcId);
-if (procStack == null) return;
-
+if (procStack == null) {
+  LOG.warn("RootProcedureState is null for " + proc.getProcId());
+  return;
+}
 do {
   // Try to acquire the execution
   if (!procStack.acquire(proc)) {
@@ -1097,6 +1104,7 @@ public class ProcedureExecutor {
   scheduler.yield(proc);
   break;
 case LOCK_EVENT_WAIT:
+  LOG.info("DEBUG LOCK_EVENT_WAIT rollback..." + proc);
   procStack.unsetRollback();
   break;
 default:
@@ -1114,6 +1122,7 @@ public class ProcedureExecutor {
 scheduler.yield(proc);
 break;
   case LOCK_EVENT_WAIT:
+LOG.info("DEBUG LOCK_EVENT_WAIT can't rollback child 
running?..." + proc);
 break;
   default:
 throw new UnsupportedOperationException();
@@ -1125,16 +1134,21 @@ public class ProcedureExecutor {
 
   // Execute the procedure
   assert proc.getState() == ProcedureState.RUNNABLE : proc;
-  switch (acquireLock(proc)) {
+  // Note that lock is NOT about concurrency but rather about ensuring
+  // ownership of a procedure of an entity such as a region or table.
+  LockState lockState = acquireLock(proc);
+  switch (lockState) {
 case LOCK_ACQUIRED:
   execProcedure(procStack, proc);
   releaseLock(proc, false);
   break;
 case LOCK_YIELD_WAIT:
+  LOG.info(lockState + " " + proc);
   scheduler.yield(proc);
   break;
 case LOCK_EVENT_WAIT:
-  // someone will wake us up when the lock is available
+  // Someone will wake us up when the lock is available
+  LOG.debug(lockState + " " + proc);
   break;
 default:
   throw new UnsupportedOperationException();
@@ -1150,10 +1164,7 @@ public class ProcedureExecutor {
   if (proc.isSuccess()) {
 // update metrics on finishing the procedure
 proc.updateMetricsOnFinish(getEnvironment(), proc.elapsedTime(), true);
-
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Finished " + proc + " in " + 

[22/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index 2435564..1ccf488 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -2210,7 +2210,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota req_num = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
   getReqNumFieldBuilder() {
 if (reqNumBuilder_ == null) {
   reqNumBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2328,7 +2328,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota req_size = 2;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
   getReqSizeFieldBuilder() {
 if (reqSizeBuilder_ == null) {
   reqSizeBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2446,7 +2446,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota write_num = 3;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
   getWriteNumFieldBuilder() {
 if (writeNumBuilder_ == null) {
   writeNumBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2564,7 +2564,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota write_size = 4;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
   getWriteSizeFieldBuilder() {
 if (writeSizeBuilder_ == null) {
   writeSizeBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2682,7 +2682,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota read_num = 5;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 

[09/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
deleted file mode 100644
index 7791ea7..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ /dev/null
@@ -1,695 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.Executors;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.RejectedExecutionHandler;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.conf.ConfigurationManager;
-import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
-import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.StealJobQueue;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.util.StringUtils;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-/**
- * Compact region on request and then run split if appropriate
- */
-@InterfaceAudience.Private
-public class CompactSplitThread implements CompactionRequestor, 
PropagatingConfigurationObserver {
-  private static final Log LOG = LogFactory.getLog(CompactSplitThread.class);
-
-  // Configuration key for the large compaction threads.
-  public final static String LARGE_COMPACTION_THREADS =
-  "hbase.regionserver.thread.compaction.large";
-  public final static int LARGE_COMPACTION_THREADS_DEFAULT = 1;
-  
-  // Configuration key for the small compaction threads.
-  public final static String SMALL_COMPACTION_THREADS =
-  "hbase.regionserver.thread.compaction.small";
-  public final static int SMALL_COMPACTION_THREADS_DEFAULT = 1;
-  
-  // Configuration key for split threads
-  public final static String SPLIT_THREADS = "hbase.regionserver.thread.split";
-  public final static int SPLIT_THREADS_DEFAULT = 1;
-
-  public static final String REGION_SERVER_REGION_SPLIT_LIMIT =
-  "hbase.regionserver.regionSplitLimit";
-  public static final int DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT= 1000;
-
-  private final HRegionServer server;
-  private final Configuration conf;
-
-  private final ThreadPoolExecutor longCompactions;
-  private final ThreadPoolExecutor shortCompactions;
-  private final ThreadPoolExecutor splits;
-
-  private volatile ThroughputController compactionThroughputController;
-
-  /**
-   * Splitting should not take place if the total number of regions exceed 
this.
-   * This is not a hard limit to the number of regions but it is a guideline to
-   * stop splitting after number of online regions is greater than this.
-   */
-  private int regionSplitLimit;
-
-  /** @param server */
-  CompactSplitThread(HRegionServer server) {
-super();
-this.server = server;
-this.conf = server.getConfiguration();
-this.regionSplitLimit = 

[19/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
deleted file mode 100644
index 69ebd97..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ /dev/null
@@ -1,3053 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.NotServingRegionException;
-import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.RegionStateListener;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.MasterSwitchType;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.executor.EventHandler;
-import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.executor.ExecutorService;
-import org.apache.hadoop.hbase.favored.FavoredNodesManager;
-import org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-import org.apache.hadoop.hbase.ipc.FailedServerException;
-import org.apache.hadoop.hbase.ipc.RpcClient;
-import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.quotas.QuotaExceededException;
-import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
-import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.KeyLocker;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.PairOfSameType;
-import org.apache.hadoop.hbase.util.RetryCounter;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import 

[10/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java
deleted file mode 100644
index bf9afd7..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java
+++ /dev/null
@@ -1,785 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.procedure;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-import org.apache.hadoop.hbase.master.MasterFileSystem;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.master.RegionStates;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Threads;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * The procedure to split a region in a table.
- */
-@InterfaceAudience.Private
-public class SplitTableRegionProcedure
-extends AbstractStateMachineTableProcedure {
-  private static final Log LOG = 
LogFactory.getLog(SplitTableRegionProcedure.class);
-
-  private Boolean traceEnabled;
-
-  /*
-   * Region to split
-   */
-  private HRegionInfo parentHRI;
-  private HRegionInfo daughter_1_HRI;
-  private HRegionInfo daughter_2_HRI;
-
-  public SplitTableRegionProcedure() {
-this.traceEnabled = null;
-  }
-
-  public SplitTableRegionProcedure(final MasterProcedureEnv env,
-  final HRegionInfo regionToSplit, final byte[] splitRow) throws 
IOException {
-super(env);
-
-checkSplitRow(regionToSplit, splitRow);
-
-this.traceEnabled = null;
-this.parentHRI = regionToSplit;
-
-final TableName table = regionToSplit.getTable();
-final long rid = getDaughterRegionIdTimestamp(regionToSplit);
-this.daughter_1_HRI = new HRegionInfo(table, regionToSplit.getStartKey(), 
splitRow, false, rid);
-this.daughter_2_HRI = new HRegionInfo(table, splitRow, 
regionToSplit.getEndKey(), false, rid);

[25/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
index 711b9c8..812cf3b 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
@@ -728,6 +728,40 @@ public final class AdminProtos {
  * optional bool isRecovering = 3;
  */
 boolean getIsRecovering();
+
+/**
+ * 
+ * True if region is splittable, false otherwise.
+ * 
+ *
+ * optional bool splittable = 4;
+ */
+boolean hasSplittable();
+/**
+ * 
+ * True if region is splittable, false otherwise.
+ * 
+ *
+ * optional bool splittable = 4;
+ */
+boolean getSplittable();
+
+/**
+ * 
+ * True if region is mergeable, false otherwise.
+ * 
+ *
+ * optional bool mergeable = 5;
+ */
+boolean hasMergeable();
+/**
+ * 
+ * True if region is mergeable, false otherwise.
+ * 
+ *
+ * optional bool mergeable = 5;
+ */
+boolean getMergeable();
   }
   /**
* Protobuf type {@code hbase.pb.GetRegionInfoResponse}
@@ -743,6 +777,8 @@ public final class AdminProtos {
 private GetRegionInfoResponse() {
   compactionState_ = 0;
   isRecovering_ = false;
+  splittable_ = false;
+  mergeable_ = false;
 }
 
 @java.lang.Override
@@ -802,6 +838,16 @@ public final class AdminProtos {
   isRecovering_ = input.readBool();
   break;
 }
+case 32: {
+  bitField0_ |= 0x0008;
+  splittable_ = input.readBool();
+  break;
+}
+case 40: {
+  bitField0_ |= 0x0010;
+  mergeable_ = input.readBool();
+  break;
+}
   }
 }
   } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
@@ -987,6 +1033,52 @@ public final class AdminProtos {
   return isRecovering_;
 }
 
+public static final int SPLITTABLE_FIELD_NUMBER = 4;
+private boolean splittable_;
+/**
+ * 
+ * True if region is splittable, false otherwise.
+ * 
+ *
+ * optional bool splittable = 4;
+ */
+public boolean hasSplittable() {
+  return ((bitField0_ & 0x0008) == 0x0008);
+}
+/**
+ * 
+ * True if region is splittable, false otherwise.
+ * 
+ *
+ * optional bool splittable = 4;
+ */
+public boolean getSplittable() {
+  return splittable_;
+}
+
+public static final int MERGEABLE_FIELD_NUMBER = 5;
+private boolean mergeable_;
+/**
+ * 
+ * True if region is mergeable, false otherwise.
+ * 
+ *
+ * optional bool mergeable = 5;
+ */
+public boolean hasMergeable() {
+  return ((bitField0_ & 0x0010) == 0x0010);
+}
+/**
+ * 
+ * True if region is mergeable, false otherwise.
+ * 
+ *
+ * optional bool mergeable = 5;
+ */
+public boolean getMergeable() {
+  return mergeable_;
+}
+
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
   byte isInitialized = memoizedIsInitialized;
@@ -1016,6 +1108,12 @@ public final class AdminProtos {
   if (((bitField0_ & 0x0004) == 0x0004)) {
 output.writeBool(3, isRecovering_);
   }
+  if (((bitField0_ & 0x0008) == 0x0008)) {
+output.writeBool(4, splittable_);
+  }
+  if (((bitField0_ & 0x0010) == 0x0010)) {
+output.writeBool(5, mergeable_);
+  }
   unknownFields.writeTo(output);
 }
 
@@ -1036,6 +1134,14 @@ public final class AdminProtos {
 size += 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
   .computeBoolSize(3, isRecovering_);
   }
+  if (((bitField0_ & 0x0008) == 0x0008)) {
+size += 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+  .computeBoolSize(4, splittable_);
+  }
+  if (((bitField0_ & 0x0010) == 0x0010)) {
+size += 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+  .computeBoolSize(5, mergeable_);
+  }
   size += unknownFields.getSerializedSize();
   memoizedSize = size;
   return size;
@@ -1067,6 +1173,16 @@ public final class AdminProtos {
 result = result && (getIsRecovering()
 == other.getIsRecovering());
   }
+  result = result && 

[04/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java
new file mode 100644
index 000..6824597
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestSplitTableRegionProcedure.java
@@ -0,0 +1,428 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.assignment;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.CompactionState;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestSplitTableRegionProcedure {
+  private static final Log LOG = 
LogFactory.getLog(TestSplitTableRegionProcedure.class);
+  @Rule public final TestRule timeout = CategoryBasedTimeout.builder().
+  withTimeout(this.getClass()).withLookingForStuckThread(true).build();
+
+  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  private static String ColumnFamilyName1 = "cf1";
+  private static String ColumnFamilyName2 = "cf2";
+
+  private static final int startRowNum = 11;
+  private static final int rowCount = 60;
+
+  @Rule
+  public TestName name = new TestName();
+
+  private static void setupConf(Configuration conf) {
+conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
+  }
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+setupConf(UTIL.getConfiguration());
+UTIL.startMiniCluster(3);
+  }
+
+  @AfterClass
+  public static void cleanupTest() throws Exception {
+try {
+  UTIL.shutdownMiniCluster();
+} catch (Exception e) {
+  LOG.warn("failure shutting down cluster", e);
+}
+  }
+
+  @Before
+  public void setup() throws Exception {
+
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(),
 false);
+
+// Turn off balancer so it doesn't cut in and mess up our placements.
+

[12/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
index 4d67edd..4f4b5b1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
@@ -21,34 +21,20 @@ package org.apache.hadoop.hbase.master.procedure;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.BulkAssigner;
-import org.apache.hadoop.hbase.master.GeneralBulkAssigner;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.RegionStates;
-import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.EnableTableState;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 
 @InterfaceAudience.Private
 public class EnableTableProcedure
@@ -114,7 +100,7 @@ public class EnableTableProcedure
 setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE);
 break;
   case ENABLE_TABLE_MARK_REGIONS_ONLINE:
-markRegionsOnline(env, tableName, true);
+
addChildProcedure(env.getAssignmentManager().createAssignProcedures(tableName));
 setNextState(EnableTableState.ENABLE_TABLE_SET_ENABLED_TABLE_STATE);
 break;
   case ENABLE_TABLE_SET_ENABLED_TABLE_STATE:
@@ -287,137 +273,6 @@ public class EnableTableProcedure
   }
 
   /**
-   * Mark offline regions of the table online with retry
-   * @param env MasterProcedureEnv
-   * @param tableName the target table
-   * @param retryRequired whether to retry if the first run failed
-   * @throws IOException
-   */
-  protected static void markRegionsOnline(
-  final MasterProcedureEnv env,
-  final TableName tableName,
-  final Boolean retryRequired) throws IOException {
-// This is best effort approach to make all regions of a table online.  If 
we fail to do
-// that, it is ok that the table has some offline regions; user can fix it 
manually.
-
-// Dev consideration: add a config to control max number of retry. For 
now, it is hard coded.
-int maxTry = (retryRequired ? 10 : 1);
-boolean done = false;
-
-do {
-  try {
-done = markRegionsOnline(env, tableName);
-if (done) {
-  break;
-}
-maxTry--;
-  } catch (Exception e) {
-LOG.warn("Received exception while marking regions online. tries left: 
" + maxTry, e);
-maxTry--;
-if (maxTry > 0) {
-  continue; // we still have some retry left, try again.
-}
-throw e;
-  }
-} while (maxTry > 0);
-
-if (!done) {
-  LOG.warn("Some or all regions of the Table '" + tableName + "' were 
offline");
-}
-  }
-
-  /**
-   * Mark offline regions of the table online
-   * @param env MasterProcedureEnv
-   * @param tableName the target table
-   * @return whether the operation is fully completed or being interrupted.
-   * @throws IOException
-   */
-  private static boolean markRegionsOnline(final MasterProcedureEnv env, final 
TableName tableName)
-  throws IOException {
-final AssignmentManager assignmentManager = 
env.getMasterServices().getAssignmentManager();
-final MasterServices masterServices = env.getMasterServices();
-final ServerManager serverManager = masterServices.getServerManager();
-boolean done = false;
-// Get the regions of this table. We're done when all listed
-// tables are onlined.
-List> tableRegionsAndLocations;
-
-if 

[17/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
deleted file mode 100644
index 3a2a6d7..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
+++ /dev/null
@@ -1,268 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import com.google.common.base.Preconditions;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.MultiHConnection;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.zookeeper.KeeperException;
-
-/**
- * A helper to persist region state in meta. We may change this class
- * to StateStore later if we also use it to store other states in meta
- */
-@InterfaceAudience.Private
-public class RegionStateStore {
-  private static final Log LOG = LogFactory.getLog(RegionStateStore.class);
-
-  /** The delimiter for meta columns for replicaIds  0 */
-  protected static final char META_REPLICA_ID_DELIMITER = '_';
-
-  private volatile Region metaRegion;
-  private volatile boolean initialized;
-  private MultiHConnection multiHConnection;
-  private final MasterServices server;
-
-  /**
-   * Returns the {@link ServerName} from catalog table {@link Result}
-   * where the region is transitioning. It should be the same as
-   * {@link MetaTableAccessor#getServerName(Result,int)} if the server is at 
OPEN state.
-   * @param r Result to pull the transitioning server name from
-   * @return A ServerName instance or {@link 
MetaTableAccessor#getServerName(Result,int)}
-   * if necessary fields not found or empty.
-   */
-  static ServerName getRegionServer(final Result r, int replicaId) {
-Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, 
getServerNameColumn(replicaId));
-if (cell == null || cell.getValueLength() == 0) {
-  RegionLocations locations = MetaTableAccessor.getRegionLocations(r);
-  if (locations != null) {
-HRegionLocation location = locations.getRegionLocation(replicaId);
-if (location != null) {
-  return location.getServerName();
-}
-  }
-  return null;
-}
-return ServerName.parseServerName(Bytes.toString(cell.getValueArray(),
-  cell.getValueOffset(), cell.getValueLength()));
-  }
-
-  private static byte[] getServerNameColumn(int replicaId) {
-return replicaId == 0
-? HConstants.SERVERNAME_QUALIFIER
-: Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + 
META_REPLICA_ID_DELIMITER
-  + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
-  }
-
-  /**
-   * Pull the region state from a catalog table {@link Result}.
-   * @param r Result to pull the region state from
-   * @return the region state, or OPEN if there's no value written.
-   */
-  static State getRegionState(final Result r, int replicaId) {
- 

[16/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
new file mode 100644
index 000..809902f
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -0,0 +1,1814 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.assignment;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.PleaseHoldException;
+import org.apache.hadoop.hbase.RegionException;
+import org.apache.hadoop.hbase.RegionStateListener;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
+import org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
+import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
+import org.apache.hadoop.hbase.master.AssignmentListener;
+import org.apache.hadoop.hbase.master.LoadBalancer;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.MetricsAssignmentManager;
+import org.apache.hadoop.hbase.master.NoSuchProcedureException;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.master.ServerListener;
+import org.apache.hadoop.hbase.master.TableStateManager;
+import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
+import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState;
+import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode;
+// TODO: why are they here?
+import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
+import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
+import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
+import org.apache.hadoop.hbase.quotas.QuotaExceededException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
+import 

[15/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
new file mode 100644
index 000..05766f7
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.assignment;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.master.MasterServices;
+import 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCRegionState;
+
+import com.google.common.collect.Lists;
+
+/**
+ * GC a Region that is no longer in use. It has been split or merged away.
+ * Caller determines if it is GC time. This Procedure does not check.
+ * This is a Region StateMachine Procedure. We take a read lock on the 
Table and then
+ * exclusive on the Region.
+ */
+@InterfaceAudience.Private
+public class GCRegionProcedure extends 
AbstractStateMachineRegionProcedure {
+  private static final Log LOG = LogFactory.getLog(GCRegionProcedure.class);
+
+  public GCRegionProcedure(final MasterProcedureEnv env, final HRegionInfo 
hri) {
+super(env, hri);
+  }
+
+  public GCRegionProcedure() {
+// Required by the Procedure framework to create the procedure on replay
+super();
+  }
+
+  @Override
+  public TableOperationType getTableOperationType() {
+return TableOperationType.REGION_GC;
+  }
+
+  @Override
+  protected Flow executeFromState(MasterProcedureEnv env, GCRegionState state)
+  throws ProcedureSuspendedException, ProcedureYieldException, 
InterruptedException {
+if (LOG.isTraceEnabled()) {
+  LOG.trace(this + " execute state=" + state);
+}
+MasterServices masterServices = env.getMasterServices();
+try {
+  switch (state) {
+  case GC_REGION_PREPARE:
+// Nothing to do to prepare.
+setNextState(GCRegionState.GC_REGION_ARCHIVE);
+break;
+  case GC_REGION_ARCHIVE:
+FileSystem fs = masterServices.getMasterFileSystem().getFileSystem();
+if (HFileArchiver.exists(masterServices.getConfiguration(), fs, 
getRegion())) {
+  if (LOG.isDebugEnabled()) LOG.debug("Archiving region=" + 
getRegion().getShortNameToLog());
+  HFileArchiver.archiveRegion(masterServices.getConfiguration(), fs, 
getRegion());
+}
+setNextState(GCRegionState.GC_REGION_PURGE_METADATA);
+break;
+  case GC_REGION_PURGE_METADATA:
+// TODO: Purge metadata before removing from HDFS? This ordering is 
copied
+// from CatalogJanitor.
+AssignmentManager am = masterServices.getAssignmentManager();
+if (am != null) {
+  if (am.getRegionStates() != null) {
+am.getRegionStates().deleteRegion(getRegion());
+  }
+}
+MetaTableAccessor.deleteRegion(masterServices.getConnection(), 
getRegion());
+masterServices.getServerManager().removeRegion(getRegion());
+FavoredNodesManager fnm = masterServices.getFavoredNodesManager();
+if 

[07/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
deleted file mode 100644
index 23e61f6..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
+++ /dev/null
@@ -1,1403 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.UnknownRegionException;
-import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
-import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.JVMClusterUtil;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.zookeeper.KeeperException;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-
-/**
- * This tests AssignmentManager with a testing cluster.
- */
-@SuppressWarnings("deprecation")
-@Category({MasterTests.class, MediumTests.class})
-public class TestAssignmentManagerOnCluster {
-  private final static byte[] FAMILY = Bytes.toBytes("FAMILY");
-  private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
-  final static Configuration conf = TEST_UTIL.getConfiguration();
-  private static Admin admin;
-
-  @Rule
-  public TestName name = new TestName();
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
- 

[21/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
index b886f5c..299b55e 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
@@ -8822,1348 +8822,6 @@ public final class RegionServerStatusProtos {
 
   }
 
-  public interface SplitTableRegionRequestOrBuilder extends
-  // 
@@protoc_insertion_point(interface_extends:hbase.pb.SplitTableRegionRequest)
-  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
-
-/**
- * required .hbase.pb.RegionInfo region_info = 1;
- */
-boolean hasRegionInfo();
-/**
- * required .hbase.pb.RegionInfo region_info = 1;
- */
-org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo 
getRegionInfo();
-/**
- * required .hbase.pb.RegionInfo region_info = 1;
- */
-
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder
 getRegionInfoOrBuilder();
-
-/**
- * required bytes split_row = 2;
- */
-boolean hasSplitRow();
-/**
- * required bytes split_row = 2;
- */
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString 
getSplitRow();
-
-/**
- * optional uint64 nonce_group = 3 [default = 0];
- */
-boolean hasNonceGroup();
-/**
- * optional uint64 nonce_group = 3 [default = 0];
- */
-long getNonceGroup();
-
-/**
- * optional uint64 nonce = 4 [default = 0];
- */
-boolean hasNonce();
-/**
- * optional uint64 nonce = 4 [default = 0];
- */
-long getNonce();
-  }
-  /**
-   * 
-   **
-   * Splits the specified region.
-   * 
-   *
-   * Protobuf type {@code hbase.pb.SplitTableRegionRequest}
-   */
-  public  static final class SplitTableRegionRequest extends
-  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
-  // 
@@protoc_insertion_point(message_implements:hbase.pb.SplitTableRegionRequest)
-  SplitTableRegionRequestOrBuilder {
-// Use SplitTableRegionRequest.newBuilder() to construct.
-private 
SplitTableRegionRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
-  super(builder);
-}
-private SplitTableRegionRequest() {
-  splitRow_ = 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
-  nonceGroup_ = 0L;
-  nonce_ = 0L;
-}
-
-@java.lang.Override
-public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
-getUnknownFields() {
-  return this.unknownFields;
-}
-private SplitTableRegionRequest(
-org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
-
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
-throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
-  this();
-  int mutable_bitField0_ = 0;
-  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
-  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
-  try {
-boolean done = false;
-while (!done) {
-  int tag = input.readTag();
-  switch (tag) {
-case 0:
-  done = true;
-  break;
-default: {
-  if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
-done = true;
-  }
-  break;
-}
-case 10: {
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder
 subBuilder = null;
-  if (((bitField0_ & 0x0001) == 0x0001)) {
-subBuilder = regionInfo_.toBuilder();
-  }
-  regionInfo_ = 
input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER,
 extensionRegistry);
-  if (subBuilder != null) {
-subBuilder.mergeFrom(regionInfo_);
-regionInfo_ = subBuilder.buildPartial();
-  }
-  bitField0_ |= 0x0001;
-  break;
-}
-case 18: {
-  bitField0_ |= 0x0002;
-  splitRow_ = input.readBytes();
-  break;
-}
- 

[08/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index eefde94..a99345b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -302,16 +302,6 @@ public class MockRegionServerServices implements 
RegionServerServices {
   }
 
   @Override
-  public long requestRegionSplit(final HRegionInfo regionInfo, final byte[] 
splitRow) {
-return -1;
-  }
-
-  @Override
-  public boolean isProcedureFinished(final long procId) {
-return false;
-  }
-
-  @Override
   public boolean registerService(Service service) {
 // TODO Auto-generated method stub
 return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
index 283d79d..cff1a8d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
@@ -21,13 +21,18 @@ package org.apache.hadoop.hbase;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
@@ -37,21 +42,18 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
 /**
  * Test whether region re-balancing works. (HBASE-71)
  */
+@Ignore // This is broken since new RegionServers does proper average of 
regions
+// and because Master is treated as a regionserver though it hosts two regions 
only.
 @Category({FlakeyTests.class, LargeTests.class})
 @RunWith(value = Parameterized.class)
 public class TestRegionRebalancing {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index 7b69db4..f84d9c2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -42,23 +42,18 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
-import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
 import 

[28/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
HBASE-18087 Fix unit tests in TestTableFavoredNodes

AssignmentManager call getFavoredNodes() on LoadBalancer and expects 
non-empty result. Adding getFavoredNodes() method to FavoredStochasticBalancer 
fixes broken unit tests.

Signed-off-by: Michael Stack 

Fix CatalogTracker. Make it use Procedures doing clean up of Region
data on split/merge. Without these changes, ITBLL was failing at
larger scale (3-4hours 5B rows) because we were splitting split
Regions.

Added a bunch of doc. on Procedure primitives.

Added new region-based state machine base class. Moved region-based
state machines on to it.

Found bugs in the way procedure locking was doing in a few of the
region-based Procedures. Having them all have same subclass helps here.

Added isSplittable and isMergeable to the Region Interface.

Master would split/merge even though the Regions still had
references. Fixed it so Master asks RegionServer if Region
is splittable.

Messing more w/ logging. Made all procedures log the same and report
the state the same; helps when logging is regular.

Rewrote TestCatalogTracker. Enabled TestMergeTableRegionProcedure.

Added more functionality to MockMasterServices so can use it doing
standalone testing of Procedures (made TestCatalogTracker use it
instead of its own version).

Trying to find who sets server and regionState to null around
servercrashprocedure add DEBUG. Ditto for why we do a suspend
though we have not done dispatch (on a retry)

Add to MasterServices ability to wait on Master being up -- makes
it so can Mock Master and start to implement standalone split testing.
Start in on a Split region standalone test in TestAM.

Fix bug where a Split can fail because it comes in in the middle of
a Move (by holding lock for duration of a Move).

HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi)
Move to a new AssignmentManager, one that describes Assignment using
a State Machine built on top of ProcedureV2 facility.

Includes four patches from Matteos' repository and then fix up to get 
it all to
pass, filling in some missing functionality, fix of findbugs, fixing 
bugs, etc..

This doc. keeps state on where we are at w/ the new AM:

https://docs.google.com/document/d/1eVKa7FHdeoJ1-9o8yZcOTAQbv0u0bblBlCCzVSIn69g/edit#heading=h.vfdoxqut9lqn
Includes list of tests disabled by this patch with reasons why.

I applied the two patches in one go because applying each independently 
puts
hbase in a non-working state.

1. HBASE-14616 Procedure v2 - Replace the old AM with the new AM
The basis comes from Matteo's repo here:
  
https://github.com/matteobertozzi/hbase/commit/689227fcbfe8e6588433dbcdabf4526e3d478b2e

Patch replaces old AM with the new under subpackage master.assignment.
Mostly just updating classes to use new AM -- import changes -- rather
than the old. It also removes old AM and supporting classes.
See below for more detail.

2. HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi)
Adds running of remote procedure. Adds batching of remote calls.
Adds support for assign/unassign in procedures. Adds version info
reporting in rpc. Adds start of an AMv2.

3. and 4. are fixes around merge and split.

This work mostly comes from:

https://github.com/matteobertozzi/hbase/commit/3622cba4e331d2fc7bfc1932abb4c9cbf5802efa

Reporting of remote RS version is from here:

https://github.com/matteobertozzi/hbase/commit/ddb4df3964e8298c88c0210e83493aa91ac0942d.patch

And remote dispatch of procedures is from:

https://github.com/matteobertozzi/hbase/commit/186b9e7c4dae61a79509a6c3aad7f80ec61345e5

The split merge patches from here are also melded in:

https://github.com/matteobertozzi/hbase/commit/9a3a95a2c2974842a4849d1ad867e70764e7f707
and 
https://github.com/matteobertozzi/hbase/commit/d6289307a02a777299f65238238a2a8af3253067

Adds testing util for new AM and new sets of tests.

Does a bunch of fixup on logging so its possible to follow a procedures'
narrative by grepping procedure id. We spewed loads of log too on big
transitions such as master fail; fixed.

Details:

M hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
 Takes List of regionstates on construction rather than a Set.
 NOTE! This is a change in a public class.

M hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
 Add utility getShortNameToLog

M 
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
M 

[05/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
new file mode 100644
index 000..d558aaf
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -0,0 +1,358 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.assignment;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.SortedSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
+import org.apache.hadoop.hbase.master.LoadBalancer;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.MasterWalManager;
+import org.apache.hadoop.hbase.master.MockNoopMasterServices;
+import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.store.NoopProcedureStore;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+import org.apache.hadoop.hbase.security.Superusers;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+/**
+ * A mocked master services.
+ * Tries to fake it. May not always work.
+ */
+public class MockMasterServices extends MockNoopMasterServices {
+  private final MasterFileSystem fileSystemManager;
+  private final MasterWalManager walManager;
+  private final AssignmentManager assignmentManager;
+
+  private MasterProcedureEnv procedureEnv;
+  private ProcedureExecutor procedureExecutor;
+  private ProcedureStore procedureStore;
+  private final ClusterConnection connection;
+  private final LoadBalancer balancer;
+  private final ServerManager 

[20/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java
index 454e3bc..4d5953c 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java
@@ -357,7 +357,7 @@ public final class SnapshotProtos {
   if (ref instanceof java.lang.String) {
 return (java.lang.String) ref;
   } else {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
 (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) 
ref;
 java.lang.String s = bs.toStringUtf8();
 if (bs.isValidUtf8()) {
@@ -373,7 +373,7 @@ public final class SnapshotProtos {
 getNameBytes() {
   java.lang.Object ref = name_;
   if (ref instanceof java.lang.String) {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
 (java.lang.String) ref);
 name_ = b;
@@ -407,7 +407,7 @@ public final class SnapshotProtos {
   if (ref instanceof java.lang.String) {
 return (java.lang.String) ref;
   } else {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
 (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) 
ref;
 java.lang.String s = bs.toStringUtf8();
 if (bs.isValidUtf8()) {
@@ -427,7 +427,7 @@ public final class SnapshotProtos {
 getTableBytes() {
   java.lang.Object ref = table_;
   if (ref instanceof java.lang.String) {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
 (java.lang.String) ref);
 table_ = b;
@@ -499,7 +499,7 @@ public final class SnapshotProtos {
   if (ref instanceof java.lang.String) {
 return (java.lang.String) ref;
   } else {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
 (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) 
ref;
 java.lang.String s = bs.toStringUtf8();
 if (bs.isValidUtf8()) {
@@ -515,7 +515,7 @@ public final class SnapshotProtos {
 getOwnerBytes() {
   java.lang.Object ref = owner_;
   if (ref instanceof java.lang.String) {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
 (java.lang.String) ref);
 owner_ = b;
@@ -1047,7 +1047,7 @@ public final class SnapshotProtos {
   getNameBytes() {
 java.lang.Object ref = name_;
 if (ref instanceof String) {
-  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
   
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
   (java.lang.String) ref);
   name_ = b;
@@ -1135,7 +1135,7 @@ public final class SnapshotProtos {
   getTableBytes() {
 java.lang.Object ref = table_;
 if (ref instanceof String) {
-  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
   
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
   (java.lang.String) ref);
   table_ = b;
@@ -1323,7 +1323,7 @@ public final class SnapshotProtos {
   getOwnerBytes() {
 java.lang.Object ref = owner_;
 if (ref instanceof String) {
-  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
   
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
   (java.lang.String) ref);
   owner_ = b;
@@ 

[02/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java
index 5b8b404..d31d8cb 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.regionserver.wal;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -25,10 +26,14 @@ import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.wal.AsyncFSWALProvider.AsyncWriter;
 import org.apache.hadoop.hbase.wal.WAL.Reader;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestRule;
 
 @Category({ RegionServerTests.class, MediumTests.class })
 public class TestSecureAsyncWALReplay extends TestAsyncWALReplay {
+  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+  withLookingForStuckThread(true).build();
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
index e2aa580..2758d4d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
@@ -22,16 +22,22 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestRule;
 
 @Category({ RegionServerTests.class, MediumTests.class })
 public class TestWALReplay extends AbstractTestWALReplay {
+  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+  withLookingForStuckThread(true).build();
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -48,4 +54,4 @@ public class TestWALReplay extends AbstractTestWALReplay {
 HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
 return wal;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java
index 4bb97d3..d8666b6 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java
@@ -57,7 +57,11 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
 /**
- * Performs checks for reference counting w.r.t. TableAuthManager which is 
used by AccessController.
+ * Performs checks for reference counting w.r.t. TableAuthManager which is 
used by
+ * AccessController.
+ *
+ * NOTE: Only one test in  here. In AMv2, there is problem deleting because
+ * we are missing auth. For now disabled. See the cleanup method.
  */
 @Category({SecurityTests.class, MediumTests.class})
 public class TestAccessController3 extends SecureTestUtil {
@@ -200,7 +204,7 @@ public class TestAccessController3 extends SecureTestUtil {
   TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
   rs = 

[11/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
new file mode 100644
index 000..887e272
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -0,0 +1,541 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import com.google.common.collect.ArrayListMultimap;
+
+import java.io.IOException;
+import java.net.SocketTimeoutException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.ServerListener;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * A remote procecdure dispatcher for regionservers.
+ */
+public class RSProcedureDispatcher
+extends RemoteProcedureDispatcher
+implements ServerListener {
+  private static final Log LOG = 
LogFactory.getLog(RSProcedureDispatcher.class);
+
+  public static final String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY =
+  "hbase.regionserver.rpc.startup.waittime";
+  private static final int DEFAULT_RS_RPC_STARTUP_WAIT_TIME = 6;
+
+  private static final int RS_VERSION_WITH_EXEC_PROCS = 0x0201000; // 2.1
+
+  protected final MasterServices master;
+  protected final long rsStartupWaitTime;
+
+  public RSProcedureDispatcher(final MasterServices master) {
+super(master.getConfiguration());
+
+this.master = master;
+this.rsStartupWaitTime = master.getConfiguration().getLong(
+  RS_RPC_STARTUP_WAIT_TIME_CONF_KEY, DEFAULT_RS_RPC_STARTUP_WAIT_TIME);
+  }
+
+  @Override
+  public boolean start() {
+if (!super.start()) {
+  return false;
+}
+
+master.getServerManager().registerListener(this);
+for (ServerName serverName: 
master.getServerManager().getOnlineServersList()) {
+  addNode(serverName);
+}
+return true;
+  }
+
+  @Override
+  public boolean stop() {
+if (!super.stop()) {
+  return false;
+}
+
+master.getServerManager().unregisterListener(this);
+return true;
+  }
+
+  @Override
+  protected void remoteDispatch(final ServerName serverName,
+  final Set operations) {
+final int rsVersion = 
master.getAssignmentManager().getServerVersion(serverName);
+if (rsVersion >= RS_VERSION_WITH_EXEC_PROCS) {
+  LOG.info(String.format(
+"Using procedure batch rpc execution for serverName=%s version=%s",
+serverName, rsVersion));
+  submitTask(new 

[24/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
index d7bbd05..5c72331 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
@@ -1734,100 +1734,201 @@ public final class MasterProcedureProtos {
   }
 
   /**
-   * Protobuf enum {@code hbase.pb.MergeTableRegionsState}
+   * Protobuf enum {@code hbase.pb.DispatchMergingRegionsState}
*/
-  public enum MergeTableRegionsState
+  public enum DispatchMergingRegionsState
   implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
 /**
- * MERGE_TABLE_REGIONS_PREPARE = 1;
+ * DISPATCH_MERGING_REGIONS_PREPARE = 1;
  */
-MERGE_TABLE_REGIONS_PREPARE(1),
+DISPATCH_MERGING_REGIONS_PREPARE(1),
 /**
- * MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS = 2;
+ * DISPATCH_MERGING_REGIONS_PRE_OPERATION = 2;
  */
-MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS(2),
+DISPATCH_MERGING_REGIONS_PRE_OPERATION(2),
 /**
- * MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION = 3;
+ * DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS = 3;
  */
-MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION(3),
+DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS(3),
 /**
- * MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE = 4;
+ * DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS = 4;
  */
-MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE(4),
+DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS(4),
 /**
- * MERGE_TABLE_REGIONS_CLOSE_REGIONS = 5;
+ * DISPATCH_MERGING_REGIONS_POST_OPERATION = 5;
  */
-MERGE_TABLE_REGIONS_CLOSE_REGIONS(5),
+DISPATCH_MERGING_REGIONS_POST_OPERATION(5),
+;
+
 /**
- * MERGE_TABLE_REGIONS_CREATE_MERGED_REGION = 6;
+ * DISPATCH_MERGING_REGIONS_PREPARE = 1;
  */
-MERGE_TABLE_REGIONS_CREATE_MERGED_REGION(6),
+public static final int DISPATCH_MERGING_REGIONS_PREPARE_VALUE = 1;
 /**
- * MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION = 7;
+ * DISPATCH_MERGING_REGIONS_PRE_OPERATION = 2;
  */
-MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION(7),
+public static final int DISPATCH_MERGING_REGIONS_PRE_OPERATION_VALUE = 2;
 /**
- * MERGE_TABLE_REGIONS_UPDATE_META = 8;
+ * DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS = 3;
  */
-MERGE_TABLE_REGIONS_UPDATE_META(8),
+public static final int 
DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS_VALUE = 3;
 /**
- * MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION = 9;
+ * DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS = 4;
  */
-MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION(9),
+public static final int DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS_VALUE = 4;
 /**
- * MERGE_TABLE_REGIONS_OPEN_MERGED_REGION = 10;
+ * DISPATCH_MERGING_REGIONS_POST_OPERATION = 5;
  */
-MERGE_TABLE_REGIONS_OPEN_MERGED_REGION(10),
+public static final int DISPATCH_MERGING_REGIONS_POST_OPERATION_VALUE = 5;
+
+
+public final int getNumber() {
+  return value;
+}
+
 /**
- * MERGE_TABLE_REGIONS_POST_OPERATION = 11;
+ * @deprecated Use {@link #forNumber(int)} instead.
  */
-MERGE_TABLE_REGIONS_POST_OPERATION(11),
-;
+@java.lang.Deprecated
+public static DispatchMergingRegionsState valueOf(int value) {
+  return forNumber(value);
+}
+
+public static DispatchMergingRegionsState forNumber(int value) {
+  switch (value) {
+case 1: return DISPATCH_MERGING_REGIONS_PREPARE;
+case 2: return DISPATCH_MERGING_REGIONS_PRE_OPERATION;
+case 3: return DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS;
+case 4: return DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS;
+case 5: return DISPATCH_MERGING_REGIONS_POST_OPERATION;
+default: return null;
+  }
+}
+
+public static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+DispatchMergingRegionsState> internalValueMap =
+  new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap()
 {
+public DispatchMergingRegionsState findValueByNumber(int number) {
+  return DispatchMergingRegionsState.forNumber(number);
+}
+  };
+
+public final 

[26/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AccessControlProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AccessControlProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AccessControlProtos.java
index 06a4e01..e83a7ac 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AccessControlProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AccessControlProtos.java
@@ -1024,7 +1024,7 @@ public final class AccessControlProtos {
* optional .hbase.pb.GlobalPermission global_permission = 
2;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermission,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermission.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermissionOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermission,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermission.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermissionOrBuilder>
 
   getGlobalPermissionFieldBuilder() {
 if (globalPermissionBuilder_ == null) {
   globalPermissionBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -1142,7 +1142,7 @@ public final class AccessControlProtos {
* optional .hbase.pb.NamespacePermission namespace_permission = 
3;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermissionOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermissionOrBuilder>
 
   getNamespacePermissionFieldBuilder() {
 if (namespacePermissionBuilder_ == null) {
   namespacePermissionBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -1260,7 +1260,7 @@ public final class AccessControlProtos {
* optional .hbase.pb.TablePermission table_permission = 4;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermissionOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermissionOrBuilder>
 
   getTablePermissionFieldBuilder() {
 if (tablePermissionBuilder_ == null) {
   tablePermissionBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2074,7 +2074,7 @@ public final class AccessControlProtos {
* optional .hbase.pb.TableName table_name = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
 
   getTableNameFieldBuilder() {
 if (tableNameBuilder_ == null) {
   tableNameBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -4130,7 +4130,7 @@ public final class AccessControlProtos {
* required .hbase.pb.Permission permission = 3;
*/
   private 

[06/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 32bce26..59e8fb3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -27,8 +27,11 @@ import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.util.Map;
+import java.util.NavigableMap;
 import java.util.SortedMap;
+import java.util.SortedSet;
 import java.util.TreeMap;
+import java.util.concurrent.ConcurrentSkipListMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -36,267 +39,141 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaMockingUtil;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
-import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
-import 
org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.io.Reference;
 import 
org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiResponse;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException;
-import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.master.assignment.MockMasterServices;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.regionserver.ChunkCreator;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.Triple;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
+import org.junit.rules.TestRule;
 
 @Category({MasterTests.class, SmallTests.class})
 public class TestCatalogJanitor {
   private static final Log LOG = LogFactory.getLog(TestCatalogJanitor.class);
-
-  @Rule
-  public TestName name = new TestName();
+  @Rule public final TestRule timeout = CategoryBasedTimeout.builder().
+ withTimeout(this.getClass()).withLookingForStuckThread(true).build();
+  @Rule public final TestName name = new TestName();
+  private static final HBaseTestingUtility HTU = new HBaseTestingUtility();
+  private MockMasterServices masterServices;
+  private 

[14/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
new file mode 100644
index 000..082e171
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
@@ -0,0 +1,969 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.assignment;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * RegionStates contains a set of Maps that describes the in-memory state of 
the AM, with
+ * the regions available in the system, the region in transition, the offline 
regions and
+ * the servers holding regions.
+ */
+@InterfaceAudience.Private
+public class RegionStates {
+  private static final Log LOG = LogFactory.getLog(RegionStates.class);
+
+  protected static final State[] STATES_EXPECTED_ON_OPEN = new State[] {
+State.OFFLINE, State.CLOSED,  // disable/offline
+State.SPLITTING, State.SPLIT, // ServerCrashProcedure
+State.OPENING, State.FAILED_OPEN, // already in-progress (retrying)
+  };
+
+  protected static final State[] STATES_EXPECTED_ON_CLOSE = new State[] {
+State.SPLITTING, State.SPLIT, // ServerCrashProcedure
+State.OPEN,   // enabled/open
+State.CLOSING // already in-progress (retrying)
+  };
+
+  private static class AssignmentProcedureEvent extends 
ProcedureEvent {
+public AssignmentProcedureEvent(final HRegionInfo regionInfo) {
+  super(regionInfo);
+}
+  }
+
+  private static class ServerReportEvent extends ProcedureEvent {
+public ServerReportEvent(final ServerName serverName) {
+  super(serverName);
+}
+  }
+
+  /**
+   * Current Region State.
+   * In-memory only. Not persisted.
+   */
+  // Mutable/Immutable? Changes have to be synchronized or not?
+  // Data members are volatile which seems to say multi-threaded access is 
fine.
+  // In the below we do check and set but the check state could change before
+  // we do the set because no synchronizationwhich seems dodgy. Clear up
+  // understanding here... how many threads accessing? Do locks make it so one
+  // thread at a time working on a single Region's RegionStateNode? Lets 
presume
+  // so for now. Odd is that elsewhere in this RegionStates, we synchronize on
+  // the RegionStateNode instance. TODO.
+  public static class RegionStateNode implements Comparable {
+private final HRegionInfo regionInfo;
+private final ProcedureEvent event;
+
+private volatile RegionTransitionProcedure procedure = null;
+private volatile ServerName regionLocation = null;
+private volatile ServerName lastHost = null;
+/**
+ * A Region-in-Transition (RIT) moves through states.
+ * See {@link State} for complete 

[13/28] hbase git commit: HBASE-18087 Fix unit tests in TestTableFavoredNodes

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/4143c017/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
new file mode 100644
index 000..126718a
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
@@ -0,0 +1,247 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.assignment;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ServerCrashException;
+import 
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionCloseOperation;
+import org.apache.hadoop.hbase.master.RegionState.State;
+import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
+import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+
+
+/**
+ * Procedure that describe the unassignment of a single region.
+ * There can only be one RegionTransitionProcedure per region running at the 
time,
+ * since each procedure takes a lock on the region.
+ *
+ * The Unassign starts by placing a "close region" request in the Remote 
Dispatcher
+ * queue, and the procedure will then go into a "waiting state".
+ * The Remote Dispatcher will batch the various requests for that server and
+ * they will be sent to the RS for execution.
+ * The RS will complete the open operation by calling 
master.reportRegionStateTransition().
+ * The AM will intercept the transition report, and notify the procedure.
+ * The procedure will finish the unassign by publishing its new state on meta
+ * or it will retry the unassign.
+ */
+@InterfaceAudience.Private
+public class UnassignProcedure extends RegionTransitionProcedure {
+  private static final Log LOG = LogFactory.getLog(UnassignProcedure.class);
+
+  /**
+   * Where to send the unassign RPC.
+   */
+  protected volatile ServerName destinationServer;
+
+  private final AtomicBoolean serverCrashed = new AtomicBoolean(false);
+
+  // TODO: should this be in a reassign procedure?
+  //   ...and keep unassign for 'disable' case?
+  private boolean force;
+
+  public UnassignProcedure() {
+// Required by the Procedure framework to create the procedure on replay
+super();
+  }
+
+  public UnassignProcedure(final HRegionInfo regionInfo,
+  final ServerName destinationServer, final boolean force) {
+super(regionInfo);
+this.destinationServer = destinationServer;
+this.force = force;
+
+// we don't need REGION_TRANSITION_QUEUE, we jump directly to sending the 
request
+setTransitionState(RegionTransitionState.REGION_TRANSITION_DISPATCH);
+  }
+
+  @Override
+  public TableOperationType getTableOperationType() {
+return 

hbase git commit: HBASE-18094 Display the return value of the command append

2017-05-23 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 abf03da41 -> 8c313d5be


HBASE-18094 Display the return value of the command append

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8c313d5b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8c313d5b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8c313d5b

Branch: refs/heads/branch-1
Commit: 8c313d5be46a02c212d38a1ee782cbff570f007f
Parents: abf03da
Author: Guangxu Cheng 
Authored: Tue May 23 19:34:58 2017 +0800
Committer: tedyu 
Committed: Tue May 23 08:59:54 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/table.rb   | 7 ++-
 hbase-shell/src/main/ruby/shell/commands/append.rb | 6 --
 hbase-shell/src/test/ruby/hbase/table_test.rb  | 1 +
 3 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8c313d5b/hbase-shell/src/main/ruby/hbase/table.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 057adca..36c6509 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -244,7 +244,12 @@ EOF
 set_op_ttl(append, ttl) if ttl
   end
   append.add(family, qualifier, value.to_s.to_java_bytes)
-  @table.append(append)
+  result = @table.append(append)
+  return nil if result.isEmpty
+
+  # Fetch cell value
+  cell = result.listCells[0]
+  org.apache.hadoop.hbase.util.Bytes::toStringBinary(cell.getValue)
 end
 
 
#--

http://git-wip-us.apache.org/repos/asf/hbase/blob/8c313d5b/hbase-shell/src/main/ruby/shell/commands/append.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/append.rb 
b/hbase-shell/src/main/ruby/shell/commands/append.rb
index a0ef36d..adf1184 100644
--- a/hbase-shell/src/main/ruby/shell/commands/append.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/append.rb
@@ -40,8 +40,10 @@ EOF
   end
 
   def append(table, row, column, value, args={})
-   format_simple_command do
-   table._append_internal(row, column, value, args)
+format_simple_command do
+  if current_value = table._append_internal(row, column, value, args)
+puts "CURRENT VALUE = #{current_value}"
+  end
 end
   end
 end

http://git-wip-us.apache.org/repos/asf/hbase/blob/8c313d5b/hbase-shell/src/test/ruby/hbase/table_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/table_test.rb 
b/hbase-shell/src/test/ruby/hbase/table_test.rb
index 0fb5a14..b6801da 100644
--- a/hbase-shell/src/test/ruby/hbase/table_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/table_test.rb
@@ -183,6 +183,7 @@ module Hbase
 
 define_test "append should work with value" do
   @test_table.append("123", 'x:cnt2', '123')
+  assert_equal("123123", @test_table._append_internal("123", 'x:cnt2', 
'123'))
 end
 
#---
 



hbase git commit: HBASE-18094 Display the return value of the command append

2017-05-23 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 9e7b0c1a4 -> ebe92c8fb


HBASE-18094 Display the return value of the command append

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ebe92c8f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ebe92c8f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ebe92c8f

Branch: refs/heads/master
Commit: ebe92c8fb3153367531aac3cf2b60d65f782083d
Parents: 9e7b0c1
Author: Guangxu Cheng 
Authored: Tue May 23 17:59:05 2017 +0800
Committer: tedyu 
Committed: Tue May 23 08:59:05 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/table.rb   | 8 +++-
 hbase-shell/src/main/ruby/shell/commands/append.rb | 4 +++-
 hbase-shell/src/test/ruby/hbase/table_test.rb  | 1 +
 3 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ebe92c8f/hbase-shell/src/main/ruby/hbase/table.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 946c72c..3185939 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -286,7 +286,13 @@ EOF
 set_op_ttl(append, ttl) if ttl
   end
   append.add(family, qualifier, value.to_s.to_java_bytes)
-  @table.append(append)
+  result = @table.append(append)
+  return nil if result.isEmpty
+
+  # Fetch cell value
+  cell = result.listCells[0]
+  org.apache.hadoop.hbase.util.Bytes::toStringBinary(cell.getValueArray,
+cell.getValueOffset, cell.getValueLength)
 end
 
 
#--

http://git-wip-us.apache.org/repos/asf/hbase/blob/ebe92c8f/hbase-shell/src/main/ruby/shell/commands/append.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/append.rb 
b/hbase-shell/src/main/ruby/shell/commands/append.rb
index 93a4317..c2fb9c1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/append.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/append.rb
@@ -42,7 +42,9 @@ EOF
   end
 
   def append(table, row, column, value, args={})
-table._append_internal(row, column, value, args)
+if current_value = table._append_internal(row, column, value, args)
+  puts "CURRENT VALUE = #{current_value}"
+end
   end
 end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/ebe92c8f/hbase-shell/src/test/ruby/hbase/table_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/table_test.rb 
b/hbase-shell/src/test/ruby/hbase/table_test.rb
index 53d0ca9..90ed7fc 100644
--- a/hbase-shell/src/test/ruby/hbase/table_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/table_test.rb
@@ -195,6 +195,7 @@ module Hbase
 
 define_test "append should work with value" do
   @test_table.append("123", 'x:cnt2', '123')
+  assert_equal("123123", @test_table._append_internal("123", 'x:cnt2', 
'123'))
 end
 
#---
 



[36/50] [abbrv] hbase git commit: Fix CatalogTracker. Make it use Procedures doing clean up of Region data on split/merge. Without these changes, ITBLL was failing at larger scale (3-4hours 5B rows) b

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/5c422d62/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index d5846ce..5ea2044 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -39686,10 +39686,18 @@ public final class MasterProtos {
   org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
 
 /**
+ * 
+ * This is how many archiving tasks we started as a result of this scan.
+ * 
+ *
  * optional int32 scan_result = 1;
  */
 boolean hasScanResult();
 /**
+ * 
+ * This is how many archiving tasks we started as a result of this scan.
+ * 
+ *
  * optional int32 scan_result = 1;
  */
 int getScanResult();
@@ -39770,12 +39778,20 @@ public final class MasterProtos {
 public static final int SCAN_RESULT_FIELD_NUMBER = 1;
 private int scanResult_;
 /**
+ * 
+ * This is how many archiving tasks we started as a result of this scan.
+ * 
+ *
  * optional int32 scan_result = 1;
  */
 public boolean hasScanResult() {
   return ((bitField0_ & 0x0001) == 0x0001);
 }
 /**
+ * 
+ * This is how many archiving tasks we started as a result of this scan.
+ * 
+ *
  * optional int32 scan_result = 1;
  */
 public int getScanResult() {
@@ -40069,18 +40085,30 @@ public final class MasterProtos {
 
   private int scanResult_ ;
   /**
+   * 
+   * This is how many archiving tasks we started as a result of this scan.
+   * 
+   *
* optional int32 scan_result = 1;
*/
   public boolean hasScanResult() {
 return ((bitField0_ & 0x0001) == 0x0001);
   }
   /**
+   * 
+   * This is how many archiving tasks we started as a result of this scan.
+   * 
+   *
* optional int32 scan_result = 1;
*/
   public int getScanResult() {
 return scanResult_;
   }
   /**
+   * 
+   * This is how many archiving tasks we started as a result of this scan.
+   * 
+   *
* optional int32 scan_result = 1;
*/
   public Builder setScanResult(int value) {
@@ -40090,6 +40118,10 @@ public final class MasterProtos {
 return this;
   }
   /**
+   * 
+   * This is how many archiving tasks we started as a result of this scan.
+   * 
+   *
* optional int32 scan_result = 1;
*/
   public Builder clearScanResult() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c422d62/hbase-protocol-shaded/src/main/protobuf/Admin.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto 
b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index 5577cb1..fe95fd5 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -39,6 +39,10 @@ message GetRegionInfoResponse {
   required RegionInfo region_info = 1;
   optional CompactionState compaction_state = 2;
   optional bool isRecovering = 3;
+  // True if region is splittable, false otherwise.
+  optional bool splittable = 4;
+  // True if region is mergeable, false otherwise.
+  optional bool mergeable = 5;
 
   enum CompactionState {
 NONE = 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c422d62/hbase-protocol-shaded/src/main/protobuf/Master.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto 
b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index bfb6aad..7015fcb 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -366,6 +366,7 @@ message RunCatalogScanRequest {
 }
 
 message RunCatalogScanResponse {
+  // This is how many archiving tasks we started as a result of this scan.
   optional int32 scan_result = 1;
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c422d62/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index fb50636..5951ee3 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ 

[42/50] [abbrv] hbase git commit: Undo OPENING state if we fail to dispatch

2017-05-23 Thread stack
Undo OPENING state if we fail to dispatch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e1512812
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e1512812
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e1512812

Branch: refs/heads/HBASE-14614
Commit: e15128128d305954a934c30ccab4c3f9fd3d
Parents: c94c44d
Author: Michael Stack 
Authored: Sun May 7 01:31:38 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../master/assignment/AssignProcedure.java  |  9 
 .../master/assignment/AssignmentManager.java| 13 +++
 .../assignment/RegionTransitionProcedure.java   | 13 +--
 .../assignment/SplitTableRegionProcedure.java   | 24 
 .../master/assignment/UnassignProcedure.java|  2 --
 5 files changed, 38 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e1512812/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
index e78ae22..8555925 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
@@ -221,16 +221,15 @@ public class AssignProcedure extends 
RegionTransitionProcedure {
   return false;
 }
 
-// region is now in OPENING state
+// Set OPENING in hbase:meta and add region to list of regions on server.
 env.getAssignmentManager().markRegionAsOpening(regionNode);
 
 // TODO: Requires a migration to be open by the RS?
 // regionNode.getFormatVersion()
 
-// Add the open region operation to the server dispatch queue.
-// The pending open will be dispatched to the server together with the 
other
-// pending operation for that server.
 addToRemoteDispatcher(env, regionNode.getRegionLocation());
+// We always return true, even if we fail dispatch because failiure sets
+// state back to beginning so we retry assign.
 return true;
   }
 
@@ -279,6 +278,7 @@ public class AssignProcedure extends 
RegionTransitionProcedure {
 this.forceNewPlan = true;
 this.server = null;
 regionNode.offline();
+env.getAssignmentManager().undoRegionAsOpening(regionNode);
 setTransitionState(RegionTransitionState.REGION_TRANSITION_QUEUE);
   }
 
@@ -302,7 +302,6 @@ public class AssignProcedure extends 
RegionTransitionProcedure {
   @Override
   protected void remoteCallFailed(final MasterProcedureEnv env, final 
RegionStateNode regionNode,
   final IOException exception) {
-// TODO: put the server in the bad list?
 handleFailure(env, regionNode);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1512812/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index ed55235..1e42ea6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -1430,6 +1430,9 @@ public class AssignmentManager implements ServerListener {
 }
   }
 
+  /**
+   * @see #undoRegionAsOpening(RegionStateNode)
+   */
   public void markRegionAsOpening(final RegionStateNode regionNode) throws 
IOException {
 synchronized (regionNode) {
   State state = regionNode.transitionState(State.OPENING, 
RegionStates.STATES_EXPECTED_ON_OPEN);
@@ -1443,6 +1446,16 @@ public class AssignmentManager implements ServerListener 
{
 metrics.incrementOperationCounter();
   }
 
+  public void undoRegionAsOpening(final RegionStateNode regionNode) {
+// TODO: Metrics. Do opposite of metrics.incrementOperationCounter();
+synchronized (regionNode) {
+  if (regionNode.isInState(State.OPENING)) {
+regionStates.addRegionToServer(regionNode.getRegionLocation(), 
regionNode);
+  }
+  // Should we update hbase:meta?
+}
+  }
+
   public void markRegionAsOpened(final RegionStateNode regionNode) throws 
IOException {
 final HRegionInfo hri = regionNode.getRegionInfo();
 synchronized (regionNode) {


[32/50] [abbrv] hbase git commit: Add an exists

2017-05-23 Thread stack
Add an exists


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d523dec7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d523dec7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d523dec7

Branch: refs/heads/HBASE-14614
Commit: d523dec794085656ff2fdc33a25c031386e7407b
Parents: 53b865a
Author: Michael Stack 
Authored: Fri May 12 15:43:23 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../hbase/procedure2/store/wal/ProcedureWALFile.java|  2 +-
 .../hbase/procedure2/store/wal/WALProcedureStore.java   | 12 +++-
 .../java/org/apache/hadoop/hbase/master/HMaster.java|  4 ++--
 3 files changed, 14 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d523dec7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
index 42abe8f..95a1ef6 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
@@ -160,7 +160,7 @@ public class ProcedureWALFile implements 
Comparable {
 boolean archived = false;
 if (walArchiveDir != null) {
   Path archivedFile = new Path(walArchiveDir, logFile.getName());
-  LOG.info("ARCHIVED (TODO: FILES ARE NOT PURGED FROM ARCHIVE!) " + 
logFile + " to " + walArchiveDir);
+  LOG.info("ARCHIVED (TODO: FILES ARE NOT PURGED FROM ARCHIVE!) " + 
logFile + " to " + archivedFile);
   if (!fs.rename(logFile, archivedFile)) {
 LOG.warn("Failed archive of " + logFile + ", deleting");
   } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d523dec7/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index df818fe..b64fd54 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -246,6 +246,16 @@ public class WALProcedureStore extends ProcedureStoreBase {
   }
 };
 syncThread.start();
+
+// Create archive dir up front. Rename won't work w/o it up on HDFS.
+if (this.walArchiveDir != null && !this.fs.exists(this.walArchiveDir)) {
+  if (this.fs.mkdirs(this.walArchiveDir)) {
+if (LOG.isDebugEnabled()) LOG.debug("Created Procedure Store WAL 
archive dir " +
+this.walArchiveDir);
+  } else {
+LOG.warn("Failed create of " + this.walArchiveDir);
+  }
+}
   }
 
   @Override
@@ -1113,7 +1123,7 @@ public class WALProcedureStore extends ProcedureStoreBase 
{
   log.removeFile(walArchiveDir);
   logs.remove(log);
   if (LOG.isDebugEnabled()) {
-LOG.info("Removed log=" + log + " activeLogs=" + logs);
+LOG.info("Removed log=" + log + ", activeLogs=" + logs);
   }
   assert logs.size() > 0 : "expected at least one log";
 } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d523dec7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index a07c436..d1fe35e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1163,7 +1163,7 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
 final Path walDir = new Path(FSUtils.getWALRootDir(this.conf),
 MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
-final Path archiveWalDir = new Path(new 
Path(FSUtils.getWALRootDir(this.conf),
+final Path walArchiveDir = new Path(new 
Path(FSUtils.getWALRootDir(this.conf),
 HConstants.HFILE_ARCHIVE_DIRECTORY), 
MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
 
 final FileSystem walFs = 

[48/50] [abbrv] hbase git commit: Check SPLIT state

2017-05-23 Thread stack
Check SPLIT state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5bab1e99
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5bab1e99
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5bab1e99

Branch: refs/heads/HBASE-14614
Commit: 5bab1e999fc50f33007145d1a67c9c7e590ce8e0
Parents: 7e9c84a
Author: Michael Stack 
Authored: Fri May 19 14:00:24 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:54 2017 -0700

--
 .../master/assignment/SplitTableRegionProcedure.java   | 13 -
 1 file changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5bab1e99/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 2b5f46b..a893783 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -342,11 +342,14 @@ public class SplitTableRegionProcedure
   parentHRI = node.getRegionInfo();
 
   // Lookup the parent HRI state from the AM, which has the latest updated 
info.
-  // Protect against the case where concurrent SPLIT requests came in. 
Check a SPLIT
-  // did not just run.
+  // Protect against the case where concurrent SPLIT requests came in and 
succeeded
+  // just before us.
+  if (node.isInState(State.SPLIT)) {
+LOG.info("Split of " + parentHRI + " skipped; state is already SPLIT");
+return false;
+  }
   if (parentHRI.isSplit() || parentHRI.isOffline()) {
-LOG.info("Split of " + parentHRI.getShortNameToLog() +
-" skipped because already offline/split.");
+LOG.info("Split of " + parentHRI + " skipped because offline/split.");
 return false;
   }
 
@@ -727,4 +730,4 @@ public class SplitTableRegionProcedure
 }
 return traceEnabled;
   }
-}
\ No newline at end of file
+}



[46/50] [abbrv] hbase git commit: Nothing log change

2017-05-23 Thread stack
Nothing log change


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12162ea7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12162ea7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12162ea7

Branch: refs/heads/HBASE-14614
Commit: 12162ea74913c1e12245b68d0ed685dcc77a2bc3
Parents: 5bab1e9
Author: Michael Stack 
Authored: Fri May 19 20:55:12 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:54 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/12162ea7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index dc0c0a6..4775a0a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -121,7 +121,7 @@ public class CatalogJanitor extends ScheduledChore {
   && !am.hasRegionsInTransition()) {
 scan();
   } else {
-LOG.warn("CatalogJanitor disabled! enabled=" + this.enabled.get() +
+LOG.warn("CatalogJanitor is disabled! Enabled=" + this.enabled.get() +
 ", maintenanceMode=" + this.services.isInMaintenanceMode() +
 ", am=" + am + ", failoverCleanupDone=" + (am != null && 
am.isFailoverCleanupDone()) +
 ", hasRIT=" + (am != null && am.hasRegionsInTransition()));



[33/50] [abbrv] hbase git commit: Doc and adding undoMarkRegionAsOpening/Closing to undo OPENING state if failure

2017-05-23 Thread stack
Doc and adding undoMarkRegionAsOpening/Closing to undo OPENING state if failure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/599b5c64
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/599b5c64
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/599b5c64

Branch: refs/heads/HBASE-14614
Commit: 599b5c6446dc537b5fd6d498a7fd85a87ac5285c
Parents: e151281
Author: Michael Stack 
Authored: Sun May 7 13:56:09 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../hbase/procedure2/ProcedureExecutor.java |  4 +-
 .../hbase/procedure2/ProcedureScheduler.java|  3 +-
 .../master/assignment/AssignProcedure.java  | 38 --
 .../master/assignment/AssignmentManager.java| 17 ++--
 .../hbase/master/assignment/RegionStates.java   |  4 ++
 .../assignment/RegionTransitionProcedure.java   | 42 +++-
 .../master/assignment/UnassignProcedure.java| 22 +++---
 .../assignment/TestAssignmentManager.java   |  3 +-
 8 files changed, 99 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/599b5c64/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index e819ae8..ffb09c9 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1640,7 +1640,7 @@ public class ProcedureExecutor {
   int activeCount = activeExecutorCount.incrementAndGet();
   int runningCount = store.setRunningProcedureCount(activeCount);
   if (LOG.isDebugEnabled()) {
-LOG.debug("Run pid=" + procedure.getProcId() +
+LOG.debug("Execute pid=" + procedure.getProcId() +
 " runningCount=" + runningCount + ", activeCount=" + 
activeCount);
   }
   executionStartTime.set(EnvironmentEdgeManager.currentTime());
@@ -1653,7 +1653,7 @@ public class ProcedureExecutor {
 activeCount = activeExecutorCount.decrementAndGet();
 runningCount = store.setRunningProcedureCount(activeCount);
 if (LOG.isDebugEnabled()) {
-  LOG.debug("Done pid=" + procedure.getProcId() +
+  LOG.debug("Leave pid=" + procedure.getProcId() +
   " runningCount=" + runningCount + ", activeCount=" + 
activeCount);
 }
 lastUpdate = EnvironmentEdgeManager.currentTime();

http://git-wip-us.apache.org/repos/asf/hbase/blob/599b5c64/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
index b5295e7..a2ae514 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
@@ -93,7 +93,7 @@ public interface ProcedureScheduler {
 
   /**
* Mark the event as not ready.
-   * procedures calling waitEvent() will be suspended.
+   * Procedures calling waitEvent() will be suspended.
* @param event the event to mark as suspended/not ready
*/
   void suspendEvent(ProcedureEvent event);
@@ -125,6 +125,7 @@ public interface ProcedureScheduler {
* List lock queues.
* @return the locks
*/
+  // TODO: This seems to be the wrong place to hang this method.
   List listLocks();
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/599b5c64/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
index 8555925..158155e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
@@ -194,23 +194,21 @@ public class AssignProcedure extends 
RegionTransitionProcedure {
   setTransitionState(RegionTransitionState.REGION_TRANSITION_QUEUE);
   return true;
 

[38/50] [abbrv] hbase git commit: Fix CatalogTracker. Make it use Procedures doing clean up of Region data on split/merge. Without these changes, ITBLL was failing at larger scale (3-4hours 5B rows) b

2017-05-23 Thread stack
Fix CatalogTracker. Make it use Procedures doing clean up of Region
data on split/merge. Without these changes, ITBLL was failing at
larger scale (3-4hours 5B rows) because we were splitting split
Regions.

Added a bunch of doc. on Procedure primitives.

Added new region-based state machine base class. Moved region-based
state machines on to it.

Found bugs in the way procedure locking was doing in a few of the
region-based Procedures. Having them all have same subclass helps here.

Added isSplittable and isMergeable to the Region Interface.

Master would split/merge even though the Regions still had
references. Fixed it so Master asks RegionServer if Region
is splittable.

Messing more w/ logging. Made all procedures log the same and report
the state the same; helps when logging is regular.

Rewrote TestCatalogTracker. Enabled TestMergeTableRegionProcedure.

Added more functionality to MockMasterServices so can use it doing
standalone testing of Procedures (made TestCatalogTracker use it
instead of its own version).


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5c422d62
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5c422d62
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5c422d62

Branch: refs/heads/HBASE-14614
Commit: 5c422d62869d0f2e71eafe6c8dffcd36da7a7b27
Parents: 079e65d
Author: Michael Stack 
Authored: Thu May 11 16:59:27 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../org/apache/hadoop/hbase/HRegionInfo.java|4 +
 .../apache/hadoop/hbase/MetaTableAccessor.java  |7 +-
 .../hadoop/hbase/procedure2/Procedure.java  |  161 +-
 .../hbase/procedure2/ProcedureExecutor.java |   36 +-
 .../hbase/procedure2/StateMachineProcedure.java |6 +-
 .../shaded/protobuf/generated/AdminProtos.java  |  499 +++-
 .../generated/MasterProcedureProtos.java| 2323 --
 .../shaded/protobuf/generated/MasterProtos.java |   32 +
 .../src/main/protobuf/Admin.proto   |4 +
 .../src/main/protobuf/Master.proto  |1 +
 .../src/main/protobuf/MasterProcedure.proto |   22 +
 .../hadoop/hbase/backup/HFileArchiver.java  |   15 +-
 .../hadoop/hbase/master/CatalogJanitor.java |   79 +-
 .../hadoop/hbase/master/TableStateManager.java  |3 +-
 .../master/assignment/AssignProcedure.java  |   61 +-
 .../assignment/GCMergedRegionsProcedure.java|  170 ++
 .../master/assignment/GCRegionProcedure.java|  154 ++
 .../assignment/MergeTableRegionsProcedure.java  |  131 +-
 .../master/assignment/MoveRegionProcedure.java  |   22 +-
 .../master/assignment/RegionStateStore.java |8 +-
 .../hbase/master/assignment/RegionStates.java   |   12 +-
 .../assignment/RegionTransitionProcedure.java   |   21 +-
 .../assignment/SplitTableRegionProcedure.java   |  125 +-
 .../master/assignment/UnassignProcedure.java|   23 +-
 .../hadoop/hbase/master/assignment/Util.java|   60 +
 .../hbase/master/balancer/BaseLoadBalancer.java |2 -
 .../AbstractStateMachineRegionProcedure.java|  118 +
 .../AbstractStateMachineTableProcedure.java |   11 +-
 .../DispatchMergingRegionsProcedure.java|2 +-
 .../procedure/MasterProcedureScheduler.java |   10 +-
 .../master/procedure/ServerCrashProcedure.java  |8 +-
 .../procedure/TableProcedureInterface.java  |3 +-
 .../hadoop/hbase/regionserver/HRegion.java  |6 +-
 .../hbase/regionserver/HRegionFileSystem.java   |3 +-
 .../hbase/regionserver/RSRpcServices.java   |2 +
 .../hadoop/hbase/regionserver/Region.java   |8 +
 .../hadoop/hbase/HBaseTestingUtility.java   |2 +-
 .../hbase/client/TestAsyncRegionAdminApi.java   |3 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java |  596 ++---
 .../master/assignment/MockMasterServices.java   |  184 +-
 .../TestMergeTableRegionsProcedure.java |   44 +-
 .../TestSplitTableRegionProcedure.java  |   20 +-
 ...ProcedureSchedulerPerformanceEvaluation.java |2 +-
 .../procedure/TestMasterProcedureScheduler.java |   20 +-
 44 files changed, 3850 insertions(+), 1173 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5c422d62/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 5b9cbec..d470ffa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -168,6 +168,10 @@ public class HRegionInfo implements 
Comparable {
 return 

hbase git commit: HBASE-18077 Update JUnit licensing to use EPL

2017-05-23 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 2cbdad412 -> dbd72f9b0


HBASE-18077 Update JUnit licensing to use EPL

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dbd72f9b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dbd72f9b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dbd72f9b

Branch: refs/heads/branch-1.1
Commit: dbd72f9b0c187dc1d9296d5f8b58c4a75a7fedb9
Parents: 2cbdad4
Author: Mike Drob 
Authored: Thu May 18 19:16:56 2017 -0700
Committer: Sean Busbey 
Committed: Tue May 23 10:38:13 2017 -0500

--
 .../src/main/resources/META-INF/LICENSE.vm  | 264 ++-
 .../src/main/resources/supplemental-models.xml  |   5 +-
 2 files changed, 266 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dbd72f9b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
--
diff --git a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm 
b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
index 4f98ef5..9f9afb5 100644
--- a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
+++ b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
@@ -1551,6 +1551,8 @@ You can redistribute it and/or modify it under either the 
terms of the GPL
 #set($bsd3 = [])
 ## gather up CPL 1.0 works
 #set($cpl = [])
+## gather up EPL 1.0 works
+#set($epl = [])
 ## track commons-math
 #set($commons-math-two = false)
 #set($commons-math-three = false)
@@ -1561,7 +1563,9 @@ You can redistribute it and/or modify it under either the 
terms of the GPL
 ## Whitelist of licenses that it's safe to not aggregate as above.
 ## Note that this doesn't include ALv2 or the aforementioned aggregate
 ## license mentions.
-#set($non_aggregate_fine = [ 'Public Domain', 'New BSD license', 'BSD 
license', 'Mozilla Public License Version 2.0', 'Mozilla Public License Version 
1.1' ])
+##
+## See this FAQ link for justifications: 
https://www.apache.org/legal/resolved.html
+#set($non_aggregate_fine = [ 'Public Domain', 'New BSD license', 'BSD 
license', 'Mozilla Public License Version 1.1', 'Mozilla Public License Version 
2.0', 'Creative Commons Attribution License, Version 2.5' ])
 ## include LICENSE sections for anything not under ASL2.0
 #foreach( ${dep} in ${projects} )
 #if(${debug-print-included-work-info.equalsIgnoreCase("true")})
@@ -1637,6 +1641,9 @@ ${dep.scm.url}
 #if(${dep.licenses[0].name.equals("Common Public License Version 1.0")})
 #set($aggregated = $cpl.add($dep))
 #end
+#if(${dep.licenses[0].name.equals("Eclipse Public License 1.0")})
+#set($aggregated = $epl.add($dep))
+#end
 #if(!${aggregated})
 --
 This product includes ${dep.name} licensed under the ${dep.licenses[0].name}.
@@ -2563,4 +2570,259 @@ Common Public License - v 1.0
 #if($jruby)
 #jruby_license()
 #end
+#if(!(${epl.isEmpty()}))
+
+## print all the EPL 1.0 licensed works
+This product includes the following works licensed under the Eclipse Public 
License 1.0:
+
+#foreach($dep in $epl)
+#if( $dep.licenses[0].comments && !$dep.licenses[0].comments.empty )
+  * ${dep.name}, ${dep.licenses[0].comments}
+#else
+  * ${dep.name}
+#end
+#end
+
+  Eclipse Public License - v 1.0
+
+  THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE
+  PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
+  OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+  1. DEFINITIONS
+
+  "Contribution" means:
+
+  a) in the case of the initial Contributor, the initial code and
+ documentation distributed under this Agreement, and
+
+  b) in the case of each subsequent Contributor:
+
+  i) changes to the Program, and
+
+  ii) additions to the Program;
+  where such changes and/or additions to the Program
+  originate from and are distributed by that particular
+  Contributor. A Contribution 'originates' from a
+  Contributor if it was added to the Program by such
+  Contributor itself or anyone acting on such
+  Contributor's behalf. Contributions do not include
+  additions to the Program which: (i) are separate modules
+  of software distributed in conjunction with the Program
+  under their own license agreement, and (ii) are not
+  derivative works of the Program.
+
+  "Contributor" means any person or entity that distributes the Program.
+
+  "Licensed Patents" mean patent claims licensable by a Contributor
+  which are necessarily infringed by the use or sale of its
+  Contribution alone or when combined with the Program.
+
+  

[47/50] [abbrv] hbase git commit: Fix NPE in CJ

2017-05-23 Thread stack
Fix NPE in CJ


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7e9c84a5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7e9c84a5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7e9c84a5

Branch: refs/heads/HBASE-14614
Commit: 7e9c84a5be9c42c380314e7dfc7394097a0a4177
Parents: 37a9ab6
Author: Michael Stack 
Authored: Wed May 17 09:43:07 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:54 2017 -0700

--
 .../java/org/apache/hadoop/hbase/master/CatalogJanitor.java   | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7e9c84a5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 7d43f2f..dc0c0a6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -346,9 +346,12 @@ public class CatalogJanitor extends ScheduledChore {
 Pair a = checkDaughterInFs(parent, daughters.getFirst());
 Pair b = checkDaughterInFs(parent, 
daughters.getSecond());
 if (hasNoReferences(a) && hasNoReferences(b)) {
+  String daughterA = daughters.getFirst() != null?
+  daughters.getFirst().getShortNameToLog(): "null";
+  String daughterB = daughters.getSecond() != null?
+  daughters.getSecond().getShortNameToLog(): "null";
   LOG.debug("Deleting region " + parent.getShortNameToLog() +
-" because daughters -- " + daughters.getFirst().getShortNameToLog() + 
", " +
-  daughters.getSecond().getShortNameToLog() +
+" because daughters -- " + daughterA + ", " + daughterB +
 " -- no longer hold references");
   ProcedureExecutor pe = 
this.services.getMasterProcedureExecutor();
   pe.submitProcedure(new GCRegionProcedure(pe.getEnvironment(), parent));



[35/50] [abbrv] hbase git commit: Fix CatalogTracker. Make it use Procedures doing clean up of Region data on split/merge. Without these changes, ITBLL was failing at larger scale (3-4hours 5B rows) b

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/5c422d62/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
index 9f23848..eca963d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.security.User;
 
 /**
  * Base class for all the Table procedures that want to use a 
StateMachineProcedure.
- * It provide some basic helpers like basic locking, sync latch, and basic 
toStringClassDetails().
+ * It provides helpers like basic locking, sync latch, and 
toStringClassDetails().
  */
 @InterfaceAudience.Private
 public abstract class AbstractStateMachineTableProcedure
@@ -52,9 +52,10 @@ public abstract class 
AbstractStateMachineTableProcedure
 
   protected AbstractStateMachineTableProcedure(final MasterProcedureEnv env,
   final ProcedurePrepareLatch latch) {
-this.user = env.getRequestUser();
-this.setOwner(user);
-
+if (env != null) {
+  this.user = env.getRequestUser();
+  this.setOwner(user);
+}
 // used for compatibility with clients without procedures
 // they need a sync TableExistsException, TableNotFoundException, 
TableNotDisabledException, ...
 this.syncLatch = latch;
@@ -110,4 +111,4 @@ public abstract class 
AbstractStateMachineTableProcedure
   throw new TableNotFoundException(getTableName());
 }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c422d62/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
index 1478fc7..15ed429 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
@@ -275,7 +275,7 @@ public class DispatchMergingRegionsProcedure
 
   @Override
   public TableOperationType getTableOperationType() {
-return TableOperationType.MERGE;
+return TableOperationType.REGION_MERGE;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c422d62/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index bcb0004..61e984c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -572,11 +572,13 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 return false;
   // region operations are using the shared-lock on the table
   // and then they will grab an xlock on the region.
-  case SPLIT:
-  case MERGE:
-  case ASSIGN:
-  case UNASSIGN:
+  case REGION_SPLIT:
+  case REGION_MERGE:
+  case REGION_ASSIGN:
+  case REGION_UNASSIGN:
   case REGION_EDIT:
+  case REGION_GC:
+  case MERGED_REGIONS_GC:
 return false;
   default:
 break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c422d62/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 9e00579..3bd2c9e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -413,14 +413,8 @@ implements ServerProcedureInterface {
   final HRegionInfo hri = it.next();
   RegionTransitionProcedure rtp = 
am.getRegionStates().getRegionTransitionProcedure(hri);
   if (rtp == null) 

[50/50] [abbrv] hbase git commit: Rebase fixup

2017-05-23 Thread stack
Rebase fixup


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f24362c1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f24362c1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f24362c1

Branch: refs/heads/HBASE-14614
Commit: f24362c15d9849a9495ccf1cf5b0719bffba7f99
Parents: 12162ea
Author: Michael Stack 
Authored: Tue May 23 00:32:51 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:37:25 2017 -0700

--
 .../hbase/regionserver/RSRpcServices.java   | 76 ++--
 1 file changed, 70 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f24362c1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 6bc8f89..ed19dc9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -18,8 +18,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import com.google.common.annotations.VisibleForTesting;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -27,8 +25,17 @@ import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.Set;
+import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -120,8 +127,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
@@ -199,7 +204,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos.ScanMet
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
@@ -217,6 +221,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.zookeeper.KeeperException;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Implements the regionserver RPC services.
  */
@@ -3376,4 +3382,62 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
   throw new ServiceException(e);
 }
   }
+
+  @Override
+  public ExecuteProceduresResponse executeProcedures(RpcController controller,
+   ExecuteProceduresRequest request) throws ServiceException {
+ExecuteProceduresResponse.Builder builder = 
ExecuteProceduresResponse.newBuilder();
+if (request.getOpenRegionCount() > 0) {
+  for (OpenRegionRequest req: request.getOpenRegionList()) {
+builder.addOpenRegion(openRegion(controller, req));
+  }
+ }
+ if (request.getCloseRegionCount() > 0) {
+   for (CloseRegionRequest req: request.getCloseRegionList()) {
+ builder.addCloseRegion(closeRegion(controller, req));
+   }
+ }
+ return 

[49/50] [abbrv] hbase git commit: LOG, dont throw exception, if already SPLIT

2017-05-23 Thread stack
LOG, dont throw exception, if already SPLIT


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/37a9ab6f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/37a9ab6f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/37a9ab6f

Branch: refs/heads/HBASE-14614
Commit: 37a9ab6f1c665ed0762f7d6101b177df1b4c3e4a
Parents: 2bf1b4f
Author: Michael Stack 
Authored: Tue May 16 22:14:06 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:54 2017 -0700

--
 .../hbase/master/assignment/SplitTableRegionProcedure.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/37a9ab6f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 7ebe769..2b5f46b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -345,8 +345,8 @@ public class SplitTableRegionProcedure
   // Protect against the case where concurrent SPLIT requests came in. 
Check a SPLIT
   // did not just run.
   if (parentHRI.isSplit() || parentHRI.isOffline()) {
-setFailure(new IOException("Split " + 
parentHRI.getRegionNameAsString() + " FAILED because " +
-"offline/split already."));
+LOG.info("Split of " + parentHRI.getShortNameToLog() +
+" skipped because already offline/split.");
 return false;
   }
 



[43/50] [abbrv] hbase git commit: Fix broke unit test. Use nice facility I found in procedure testing utility in a few other tests... in place of checking for symptom

2017-05-23 Thread stack
Fix broke unit test. Use nice facility I found in procedure testing utility in 
a few other tests... in place of checking for symptom


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2e780735
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2e780735
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2e780735

Branch: refs/heads/HBASE-14614
Commit: 2e780735860302c815cc598f8b330aafa90d2ac7
Parents: 18eaefb
Author: Michael Stack 
Authored: Fri May 12 17:07:27 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../hadoop/hbase/master/CatalogJanitor.java |  8 --
 .../hadoop/hbase/master/TestCatalogJanitor.java | 28 
 .../TestSplitTransactionOnCluster.java  |  3 ++-
 3 files changed, 19 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2e780735/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 2331d8f..7d43f2f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.GCMergedRegionsProcedure;
 import org.apache.hadoop.hbase.master.assignment.GCRegionProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -54,6 +55,8 @@ import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.Triple;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * A janitor for the catalog tables.  Scans the hbase:meta catalog
  * table on a period looking for unused regions to garbage collect.
@@ -66,6 +69,7 @@ public class CatalogJanitor extends ScheduledChore {
   private final AtomicBoolean enabled = new AtomicBoolean(true);
   private final MasterServices services;
   private final Connection connection;
+  // PID of the last Procedure launched herein. Keep around for Tests.
 
   CatalogJanitor(final MasterServices services) {
 super("CatalogJanitor-" + services.getServerName().toShortString(), 
services,
@@ -215,8 +219,8 @@ public class CatalogJanitor extends ScheduledChore {
   + regionB.getShortNameToLog()
   + " from fs because merged region no longer holds references");
   ProcedureExecutor pe = 
this.services.getMasterProcedureExecutor();
-  pe.submitProcedure(new 
GCMergedRegionsProcedure(pe.getEnvironment(),mergedRegion,
-  regionA, regionB));
+  pe.submitProcedure(new GCMergedRegionsProcedure(pe.getEnvironment(),
+  mergedRegion, regionA, regionB));
   return true;
 }
 return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e780735/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 5c082a4..59e8fb3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.io.Reference;
 import 
org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
 import org.apache.hadoop.hbase.master.assignment.MockMasterServices;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.regionserver.ChunkCreator;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
@@ -135,14 +136,14 @@ public class TestCatalogJanitor {
 // Add a parentdir for kicks so can check it gets removed by the 
catalogjanitor.
 fs.mkdirs(parentdir);
 assertFalse(this.janitor.cleanParent(parent, r));
+
ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor());
+assertTrue(fs.exists(parentdir));
 // Remove 

[40/50] [abbrv] hbase git commit: archive is for hfiles only and the cleaner is removing my pv2 files

2017-05-23 Thread stack
archive is for hfiles only and the cleaner is removing my pv2 files


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/18eaefb6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/18eaefb6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/18eaefb6

Branch: refs/heads/HBASE-14614
Commit: 18eaefb64c02d2b644e6bc9711221edb1be2e737
Parents: de32b5e
Author: Michael Stack 
Authored: Fri May 12 16:25:55 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../hadoop/hbase/procedure2/store/wal/WALProcedureStore.java  | 7 ---
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java | 1 +
 2 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/18eaefb6/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index b64fd54..1791cae 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -66,6 +66,7 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceStability.Evolving
 public class WALProcedureStore extends ProcedureStoreBase {
   private static final Log LOG = LogFactory.getLog(WALProcedureStore.class);
+  public static final String LOG_PREFIX = "pv2-";
 
   public interface LeaseRecovery {
 void recoverFileLease(FileSystem fs, Path path) throws IOException;
@@ -1145,7 +1146,7 @@ public class WALProcedureStore extends ProcedureStoreBase 
{
   }
 
   protected Path getLogFilePath(final long logId) throws IOException {
-return new Path(walDir, String.format("state-%020d.log", logId));
+return new Path(walDir, String.format(LOG_PREFIX + "%020d.log", logId));
   }
 
   private static long getLogIdFromName(final String name) {
@@ -1158,7 +1159,7 @@ public class WALProcedureStore extends ProcedureStoreBase 
{
 @Override
 public boolean accept(Path path) {
   String name = path.getName();
-  return name.startsWith("state-") && name.endsWith(".log");
+  return name.startsWith(LOG_PREFIX) && name.endsWith(".log");
 }
   };
 
@@ -1248,7 +1249,7 @@ public class WALProcedureStore extends ProcedureStoreBase 
{
   return null;
 }
 if (LOG.isDebugEnabled()) {
-  LOG.debug("Opening state-log: " + logFile);
+  LOG.debug("Opening Pv2 " + logFile);
 }
 try {
   log.open();

http://git-wip-us.apache.org/repos/asf/hbase/blob/18eaefb6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 65e2de9..83f5a1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1163,6 +1163,7 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
 final Path walDir = new Path(FSUtils.getWALRootDir(this.conf),
 MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
+// TODO: No cleaner currently!
 final Path walArchiveDir = new 
Path(HFileArchiveUtil.getArchivePath(this.conf),
 MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
 



[45/50] [abbrv] hbase git commit: MoveRegionProcedure was not passing its Region to super class. NPEs when locking.

2017-05-23 Thread stack
MoveRegionProcedure was not passing its Region to super class. NPEs when
locking.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ff42f1b4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ff42f1b4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ff42f1b4

Branch: refs/heads/HBASE-14614
Commit: ff42f1b4b8e7eba40cf83f24922def08207c7840
Parents: 5c422d6
Author: Michael Stack 
Authored: Thu May 11 20:22:33 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java| 2 +-
 .../apache/hadoop/hbase/master/assignment/AssignmentManager.java | 4 +---
 .../hadoop/hbase/master/assignment/MoveRegionProcedure.java  | 3 ++-
 .../master/procedure/AbstractStateMachineTableProcedure.java | 3 +++
 4 files changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ff42f1b4/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index f065a98..fa3df04 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1669,7 +1669,7 @@ public class ProcedureExecutor {
   }
 }
   } catch (Throwable t) {
-LOG.warn("Worker terminating because", t);
+LOG.warn("Worker terminating UNNATURALLY " + this.activeProcedure, t);
   } finally {
 LOG.debug("Worker terminated.");
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ff42f1b4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index e567d2d..eceb624 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -720,9 +720,7 @@ public class AssignmentManager implements ServerListener {
   }
 
   public MoveRegionProcedure createMoveRegionProcedure(final RegionPlan plan) {
-MoveRegionProcedure proc = new MoveRegionProcedure(plan);
-proc.setOwner(getProcedureEnvironment().getRequestUser().getShortName());
-return proc;
+return new MoveRegionProcedure(getProcedureEnvironment(), plan);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ff42f1b4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
index 6cc04e4..f998af8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
@@ -52,7 +52,8 @@ public class MoveRegionProcedure extends 
AbstractStateMachineRegionProcedurehttp://git-wip-us.apache.org/repos/asf/hbase/blob/ff42f1b4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
index eca963d..1417159 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
@@ -50,6 +50,9 @@ public abstract class 
AbstractStateMachineTableProcedure
 this(env, null);
   }
 
+  /**
+   * @param env Uses this to set Procedure Owner at least.
+   */
   protected AbstractStateMachineTableProcedure(final MasterProcedureEnv env,
   final ProcedurePrepareLatch latch) {
 if (env != null) {



[41/50] [abbrv] hbase git commit: Archive dir setting

2017-05-23 Thread stack
Archive dir setting


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/de32b5e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/de32b5e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/de32b5e7

Branch: refs/heads/HBASE-14614
Commit: de32b5e7a9dc3b8c4dcb2c13f7c4b471bd3e7d56
Parents: d523dec
Author: Michael Stack 
Authored: Fri May 12 15:56:10 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/de32b5e7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index d1fe35e..65e2de9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1163,8 +1163,8 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
 final Path walDir = new Path(FSUtils.getWALRootDir(this.conf),
 MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
-final Path walArchiveDir = new Path(new 
Path(FSUtils.getWALRootDir(this.conf),
-HConstants.HFILE_ARCHIVE_DIRECTORY), 
MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
+final Path walArchiveDir = new 
Path(HFileArchiveUtil.getArchivePath(this.conf),
+MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
 
 final FileSystem walFs = walDir.getFileSystem(conf);
 



[44/50] [abbrv] hbase git commit: Wait a second before killing a regionserver if state is not what is expected. Also, stop active expire of a RS from setting state on regions to offline.... let Server

2017-05-23 Thread stack
Wait a second before killing a regionserver if state is not what is expected. 
Also, stop active expire of a RS from setting state on regions to offline 
let ServerCrashProcedure do this Its messing us up when a Region is set 
OFFLINE of a sudden


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/079e65dd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/079e65dd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/079e65dd

Branch: refs/heads/HBASE-14614
Commit: 079e65dd6cb5169a152c22f278d9b527f4c4a648
Parents: 599b5c6
Author: Michael Stack 
Authored: Mon May 8 19:28:06 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../master/assignment/AssignmentManager.java| 21 
 .../hbase/master/assignment/RegionStates.java   |  9 -
 .../master/procedure/DisableTableProcedure.java |  2 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |  6 +-
 .../hbase/client/TestAsyncRegionAdminApi.java   |  7 ++-
 .../TestFavoredStochasticLoadBalancer.java  |  7 ---
 .../TestSimpleRegionNormalizerOnCluster.java|  3 ++-
 7 files changed, 39 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/079e65dd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index e13a052..e567d2d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -83,6 +83,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
@@ -831,6 +832,7 @@ public class AssignmentManager implements ServerListener {
 }
   }
 
+  // FYI: regionNode is sometimes synchronized by the caller but not always.
   private boolean reportTransition(final RegionStateNode regionNode,
   final ServerStateNode serverNode, final TransitionCode state, final long 
seqId)
   throws UnexpectedStateException {
@@ -988,18 +990,15 @@ public class AssignmentManager implements ServerListener {
 }
   }
 
-  public void checkOnlineRegionsReport(final ServerStateNode serverNode,
-  final Set regionNames) {
+  void checkOnlineRegionsReport(final ServerStateNode serverNode, final 
Set regionNames) {
 final ServerName serverName = serverNode.getServerName();
 try {
   for (byte[] regionName: regionNames) {
 if (!isRunning()) return;
-
 final RegionStateNode regionNode = 
regionStates.getRegionNodeFromName(regionName);
 if (regionNode == null) {
   throw new UnexpectedStateException("Not online: " + 
Bytes.toStringBinary(regionName));
 }
-
 synchronized (regionNode) {
   if (regionNode.isInState(State.OPENING, State.OPEN)) {
 if (!regionNode.getRegionLocation().equals(serverName)) {
@@ -1017,9 +1016,14 @@ public class AssignmentManager implements ServerListener 
{
   }
 }
   } else if (!regionNode.isInState(State.CLOSING, State.SPLITTING)) {
-// TODO: We end up killing the RS if we get a report while we 
already
-// transitioned to close or split. we should have a 
timeout/timestamp to compare
-throw new UnexpectedStateException(regionNode.toString() + " 
reported unexpected OPEN");
+long diff = regionNode.getLastUpdate() - 
EnvironmentEdgeManager.currentTime();
+if (diff > 1000/*One Second... make configurable if an issue*/) {
+  // So, we can get report that a region is CLOSED or SPLIT 
because a heartbeat
+  // came in at about same time as a region transition. Make sure 
there is some
+  // elapsed time between killing remote server.
+  throw new UnexpectedStateException(regionNode.toString() +
+" reported an unexpected OPEN; time since last update=" + 
diff);
+}
   }

[17/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java
deleted file mode 100644
index 929cd4e..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import java.io.IOException;
-import java.lang.Thread.UncaughtExceptionHandler;
-import java.util.concurrent.Executors;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Server;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-/**
- * Base class used bulk assigning and unassigning regions.
- * Encapsulates a fixed size thread pool of executors to run 
assignment/unassignment.
- * Implement {@link #populatePool(java.util.concurrent.ExecutorService)} and
- * {@link #waitUntilDone(long)}.  The default implementation of
- * the {@link #getUncaughtExceptionHandler()} is to abort the hosting
- * Server.
- */
-@InterfaceAudience.Private
-public abstract class BulkAssigner {
-  protected final Server server;
-
-  /**
-   * @param server An instance of Server
-   */
-  public BulkAssigner(final Server server) {
-this.server = server;
-  }
-
-  /**
-   * @return What to use for a thread prefix when executor runs.
-   */
-  protected String getThreadNamePrefix() {
-return this.server.getServerName() + "-" + this.getClass().getName(); 
-  }
-
-  protected UncaughtExceptionHandler getUncaughtExceptionHandler() {
-return new UncaughtExceptionHandler() {
-  @Override
-  public void uncaughtException(Thread t, Throwable e) {
-// Abort if exception of any kind.
-server.abort("Uncaught exception in " + t.getName(), e);
-  }
-};
-  }
-
-  protected int getThreadCount() {
-return this.server.getConfiguration().
-  getInt("hbase.bulk.assignment.threadpool.size", 20);
-  }
-
-  protected long getTimeoutOnRIT() {
-return this.server.getConfiguration().
-  getLong("hbase.bulk.assignment.waiton.empty.rit", 5 * 60 * 1000);
-  }
-
-  protected abstract void populatePool(
-  final java.util.concurrent.ExecutorService pool) throws IOException;
-
-  public boolean bulkAssign() throws InterruptedException, IOException {
-return bulkAssign(true);
-  }
-
-  /**
-   * Run the bulk assign.
-   * 
-   * @param sync
-   *  Whether to assign synchronously.
-   * @throws InterruptedException
-   * @return True if done.
-   * @throws IOException
-   */
-  public boolean bulkAssign(boolean sync) throws InterruptedException,
-  IOException {
-boolean result = false;
-ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
-builder.setDaemon(true);
-builder.setNameFormat(getThreadNamePrefix() + "-%1$d");
-builder.setUncaughtExceptionHandler(getUncaughtExceptionHandler());
-int threadCount = getThreadCount();
-java.util.concurrent.ExecutorService pool =
-  Executors.newFixedThreadPool(threadCount, builder.build());
-try {
-  populatePool(pool);
-  // How long to wait on empty regions-in-transition.  If we timeout, the
-  // RIT monitor should do fixup.
-  if (sync) result = waitUntilDone(getTimeoutOnRIT());
-} finally {
-  // We're done with the pool.  It'll exit when its done all in queue.
-  pool.shutdown();
-}
-return result;
-  }
-
-  /**
-   * Wait until bulk assign is done.
-   * @param timeout How long to wait.
-   * @throws InterruptedException
-   * @return True if the condition we were waiting on happened.
-   */
-  protected abstract boolean waitUntilDone(final long timeout)
-  throws InterruptedException;
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java
--
diff --git 

[27/50] [abbrv] hbase git commit: Don't drop old master proc wal files... need to debug

2017-05-23 Thread stack
Don't drop old master proc wal files... need to debug


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f2a5623d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f2a5623d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f2a5623d

Branch: refs/heads/HBASE-14614
Commit: f2a5623d63fa34e8751f1c459f1d7193d3136238
Parents: db1dcf3
Author: Michael Stack 
Authored: Sun Apr 30 21:16:05 2017 -1000
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../hbase/procedure2/store/wal/ProcedureWALFile.java  | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f2a5623d/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
index 012ddeb..585762b 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
@@ -157,7 +158,18 @@ public class ProcedureWALFile implements 
Comparable {
 
   public void removeFile() throws IOException {
 close();
-fs.delete(logFile, false);
+// TODO: FIX THIS. MAKE THIS ARCHIVE FORMAL.
+Path archiveDir =
+new Path(logFile.getParent().getParent(), 
HConstants.HFILE_ARCHIVE_DIRECTORY);
+try {
+  fs.mkdirs(archiveDir);
+} catch (IOException ioe) {
+  LOG.warn("Making " + archiveDir, ioe);
+}
+Path archivedFile = new Path(archiveDir, logFile.getName());
+LOG.info("ARCHIVED WAL (FIX) " + logFile + " to " + archivedFile);
+fs.rename(logFile, archivedFile);
+// fs.delete(logFile, false);
   }
 
   public void setProcIds(long minId, long maxId) {



[25/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
new file mode 100644
index 000..8d5ff3c
--- /dev/null
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -0,0 +1,375 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.io.IOException;
+import java.lang.Thread.UncaughtExceptionHandler;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.DelayQueue;
+import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.procedure2.util.DelayedUtil;
+import 
org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedContainerWithTimestamp;
+import org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedWithTimeout;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Threads;
+
+import com.google.common.collect.ArrayListMultimap;
+
+/**
+ * A procedure dispatcher that aggregates and sends after elapsed time or 
after we hit
+ * count threshold. Creates its own threadpool to run RPCs with timeout.
+ * 
+ * Each server queue has a dispatch buffer
+ * Once the dispatch buffer reaches a threshold-size/time we send
+ * 
+ * Call {@link #start()} and then {@link #submitTask(Callable)}. When done,
+ * call {@link #stop()}.
+ */
+@InterfaceAudience.Private
+public abstract class RemoteProcedureDispatcher {
+  private static final Log LOG = 
LogFactory.getLog(RemoteProcedureDispatcher.class);
+
+  public static final String THREAD_POOL_SIZE_CONF_KEY =
+  "hbase.procedure.remote.dispatcher.threadpool.size";
+  private static final int DEFAULT_THREAD_POOL_SIZE = 128;
+
+  public static final String DISPATCH_DELAY_CONF_KEY =
+  "hbase.procedure.remote.dispatcher.delay.msec";
+  private static final int DEFAULT_DISPATCH_DELAY = 150;
+
+  public static final String DISPATCH_MAX_QUEUE_SIZE_CONF_KEY =
+  "hbase.procedure.remote.dispatcher.max.queue.size";
+  private static final int DEFAULT_MAX_QUEUE_SIZE = 32;
+
+  private final AtomicBoolean running = new AtomicBoolean(false);
+  private final ConcurrentHashMap nodeMap =
+  new ConcurrentHashMap();
+
+  private final int operationDelay;
+  private final int queueMaxSize;
+  private final int corePoolSize;
+
+  private TimeoutExecutorThread timeoutExecutor;
+  private ThreadPoolExecutor threadPool;
+
+  protected RemoteProcedureDispatcher(Configuration conf) {
+this.corePoolSize = conf.getInt(THREAD_POOL_SIZE_CONF_KEY, 
DEFAULT_THREAD_POOL_SIZE);
+this.operationDelay = conf.getInt(DISPATCH_DELAY_CONF_KEY, 
DEFAULT_DISPATCH_DELAY);
+this.queueMaxSize = conf.getInt(DISPATCH_MAX_QUEUE_SIZE_CONF_KEY, 
DEFAULT_MAX_QUEUE_SIZE);
+  }
+
+  public boolean start() {
+if (running.getAndSet(true)) {
+  LOG.warn("Already running");
+  return false;
+}
+
+LOG.info("Starting procedure remote dispatcher; threads=" + 
this.corePoolSize +
+  ", queueMaxSize=" + this.queueMaxSize + ", operationDelay=" + 
this.operationDelay);
+
+// Create the timeout executor
+timeoutExecutor = new TimeoutExecutorThread();
+timeoutExecutor.start();
+
+// Create the thread pool that will execute 

[20/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
index b886f5c..299b55e 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
@@ -8822,1348 +8822,6 @@ public final class RegionServerStatusProtos {
 
   }
 
-  public interface SplitTableRegionRequestOrBuilder extends
-  // 
@@protoc_insertion_point(interface_extends:hbase.pb.SplitTableRegionRequest)
-  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
-
-/**
- * required .hbase.pb.RegionInfo region_info = 1;
- */
-boolean hasRegionInfo();
-/**
- * required .hbase.pb.RegionInfo region_info = 1;
- */
-org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo 
getRegionInfo();
-/**
- * required .hbase.pb.RegionInfo region_info = 1;
- */
-
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder
 getRegionInfoOrBuilder();
-
-/**
- * required bytes split_row = 2;
- */
-boolean hasSplitRow();
-/**
- * required bytes split_row = 2;
- */
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString 
getSplitRow();
-
-/**
- * optional uint64 nonce_group = 3 [default = 0];
- */
-boolean hasNonceGroup();
-/**
- * optional uint64 nonce_group = 3 [default = 0];
- */
-long getNonceGroup();
-
-/**
- * optional uint64 nonce = 4 [default = 0];
- */
-boolean hasNonce();
-/**
- * optional uint64 nonce = 4 [default = 0];
- */
-long getNonce();
-  }
-  /**
-   * 
-   **
-   * Splits the specified region.
-   * 
-   *
-   * Protobuf type {@code hbase.pb.SplitTableRegionRequest}
-   */
-  public  static final class SplitTableRegionRequest extends
-  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
-  // 
@@protoc_insertion_point(message_implements:hbase.pb.SplitTableRegionRequest)
-  SplitTableRegionRequestOrBuilder {
-// Use SplitTableRegionRequest.newBuilder() to construct.
-private 
SplitTableRegionRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
-  super(builder);
-}
-private SplitTableRegionRequest() {
-  splitRow_ = 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
-  nonceGroup_ = 0L;
-  nonce_ = 0L;
-}
-
-@java.lang.Override
-public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
-getUnknownFields() {
-  return this.unknownFields;
-}
-private SplitTableRegionRequest(
-org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
-
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
-throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
-  this();
-  int mutable_bitField0_ = 0;
-  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
-  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
-  try {
-boolean done = false;
-while (!done) {
-  int tag = input.readTag();
-  switch (tag) {
-case 0:
-  done = true;
-  break;
-default: {
-  if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
-done = true;
-  }
-  break;
-}
-case 10: {
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder
 subBuilder = null;
-  if (((bitField0_ & 0x0001) == 0x0001)) {
-subBuilder = regionInfo_.toBuilder();
-  }
-  regionInfo_ = 
input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER,
 extensionRegistry);
-  if (subBuilder != null) {
-subBuilder.mergeFrom(regionInfo_);
-regionInfo_ = subBuilder.buildPartial();
-  }
-  bitField0_ |= 0x0001;
-  break;
-}
-case 18: {
-  bitField0_ |= 0x0002;
-  splitRow_ = input.readBytes();
-  break;
-}
- 

[09/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
new file mode 100644
index 000..e7157d0
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java
@@ -0,0 +1,723 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.RejectedExecutionHandler;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.conf.ConfigurationManager;
+import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
+import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
+import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.StealJobQueue;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.StringUtils;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+/**
+ * Compact region on request and then run split if appropriate
+ */
+@InterfaceAudience.Private
+public class CompactSplit implements CompactionRequestor, 
PropagatingConfigurationObserver {
+  private static final Log LOG = LogFactory.getLog(CompactSplit.class);
+
+  // Configuration key for the large compaction threads.
+  public final static String LARGE_COMPACTION_THREADS =
+  "hbase.regionserver.thread.compaction.large";
+  public final static int LARGE_COMPACTION_THREADS_DEFAULT = 1;
+
+  // Configuration key for the small compaction threads.
+  public final static String SMALL_COMPACTION_THREADS =
+  "hbase.regionserver.thread.compaction.small";
+  public final static int SMALL_COMPACTION_THREADS_DEFAULT = 1;
+
+  // Configuration key for split threads
+  public final static String SPLIT_THREADS = "hbase.regionserver.thread.split";
+  public final static int SPLIT_THREADS_DEFAULT = 1;
+
+  // Configuration keys for merge threads
+  public final static String MERGE_THREADS = "hbase.regionserver.thread.merge";
+  public final static int MERGE_THREADS_DEFAULT = 1;
+
+  public static final String REGION_SERVER_REGION_SPLIT_LIMIT =
+  "hbase.regionserver.regionSplitLimit";
+  public static final int DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT= 1000;
+
+  private final HRegionServer server;
+  private final Configuration conf;
+
+  private final ThreadPoolExecutor longCompactions;
+  private final ThreadPoolExecutor shortCompactions;
+  private final ThreadPoolExecutor splits;
+  private final ThreadPoolExecutor mergePool;
+
+  private volatile ThroughputController compactionThroughputController;
+
+  /**
+   * Splitting should not take place if the total number of regions exceed 
this.
+   * This is not a hard limit to the number of regions but it is a guideline to
+   * stop splitting after number of online regions is greater than this.
+   */
+  private int regionSplitLimit;
+
+  /** 

[34/50] [abbrv] hbase git commit: Trying to find who sets server and regionState to null around servercrashprocedure add DEBUG. Ditto for why we do a suspend though we have not done dispatch (on a ret

2017-05-23 Thread stack
Trying to find who sets server and regionState to null around
servercrashprocedure add DEBUG. Ditto for why we do a suspend
though we have not done dispatch (on a retry)

Add to MasterServices ability to wait on Master being up -- makes
it so can Mock Master and start to implement standalone split testing.
Start in on a Split region standalone test in TestAM.

Fix bug where a Split can fail because it comes in in the middle of
a Move (by holding lock for duration of a Move).

+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
@@ -61,6 +61,15 @@ public class MoveRegionProcedure
   }


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/924bb80a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/924bb80a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/924bb80a

Branch: refs/heads/HBASE-14614
Commit: 924bb80addbf9726f0d95e3f27162eb9dd1ab97e
Parents: f2a5623
Author: Michael Stack 
Authored: Fri May 5 20:26:00 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../hadoop/hbase/procedure2/Procedure.java  |  6 +-
 .../hbase/procedure2/ProcedureExecutor.java | 10 +--
 .../org/apache/hadoop/hbase/master/HMaster.java |  1 +
 .../hadoop/hbase/master/MasterServices.java |  9 +++
 .../master/assignment/AssignProcedure.java  |  7 +--
 .../master/assignment/AssignmentManager.java| 10 +--
 .../master/assignment/MoveRegionProcedure.java  |  9 +++
 .../hbase/master/assignment/RegionStates.java   |  3 +
 .../assignment/RegionTransitionProcedure.java   | 11 +++-
 .../assignment/SplitTableRegionProcedure.java   |  6 +-
 .../master/assignment/UnassignProcedure.java|  3 -
 .../master/procedure/MasterProcedureEnv.java|  2 +-
 .../master/procedure/ProcedureSyncWait.java |  4 +-
 .../master/procedure/ServerCrashProcedure.java  | 18 ++
 .../hbase/master/MockNoopMasterServices.java|  7 +++
 .../master/assignment/MockMasterServices.java   |  8 ++-
 .../assignment/TestAssignmentManager.java   | 66 +++-
 17 files changed, 133 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/924bb80a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 0184d5d..5527076 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -288,12 +288,14 @@ public abstract class Procedure implements 
Comparable {
   sb.append(getParentProcId());
 }
 
+/**
+ * Enable later when this is being used.
 if (hasOwner()) {
   sb.append(", owner=");
   sb.append(getOwner());
-}
+}*/
 
-sb.append(", procState=");
+sb.append(", state=");
 toStringState(sb);
 
 if (hasException()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/924bb80a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index b1db2dc..d36be64 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1162,7 +1162,7 @@ public class ProcedureExecutor {
   if (proc.isSuccess()) {
 // update metrics on finishing the procedure
 proc.updateMetricsOnFinish(getEnvironment(), proc.elapsedTime(), true);
-LOG.info("Finished " + proc + " in " + 
StringUtils.humanTimeDiff(proc.elapsedTime()));
+LOG.info("Finish " + proc + " in " + 
StringUtils.humanTimeDiff(proc.elapsedTime()));
 // Finalize the procedure state
 if (proc.getProcId() == rootProcId) {
   procedureFinished(proc);
@@ -1372,7 +1372,7 @@ public class ProcedureExecutor {
   subprocs = null;
 }
   } catch (ProcedureSuspendedException e) {
-LOG.info("Suspended " + procedure);
+LOG.info("Suspend " + procedure);
 suspended = true;
   } catch (ProcedureYieldException e) {
 if (LOG.isTraceEnabled()) {
@@ -1519,7 +1519,7 @@ public class ProcedureExecutor {
 
 // If this procedure is the last child awake the parent procedure
 if 

[06/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
index 0084d44..8a216c5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.zookeeper.KeeperException;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -55,12 +56,13 @@ public class TestMasterMetrics {
 KeeperException, InterruptedException {
   super(conf, cp);
 }
-
+/*
 @Override
 protected void tryRegionServerReport(
 long reportStartTime, long reportEndTime) {
   // do nothing
 }
+*/
   }
 
   @BeforeClass
@@ -81,7 +83,7 @@ public class TestMasterMetrics {
 }
   }
 
-  @Test(timeout = 30)
+  @Ignore @Test(timeout = 30)
   public void testClusterRequests() throws Exception {
 
 // sending fake request to master to see how metric value has changed
@@ -114,7 +116,7 @@ public class TestMasterMetrics {
 master.stopMaster();
   }
 
-  @Test
+  @Ignore @Test
   public void testDefaultMasterMetrics() throws Exception {
 MetricsMasterSource masterSource = 
master.getMasterMetrics().getMetricsSource();
 metricsHelper.assertGauge( "numRegionServers", 2, masterSource);

http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index 6c737e9..737d145 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.master;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.Collection;
@@ -191,18 +192,20 @@ public class TestMasterOperationsForRegionReplicas {
   for (int i = 1; i < numSlaves; i++) { //restore the cluster
 TEST_UTIL.getMiniHBaseCluster().startRegionServer();
   }
-
-  //check on alter table
+/* DISABLED! FOR NOW
+  // Check on alter table
   ADMIN.disableTable(tableName);
   assert(ADMIN.isTableDisabled(tableName));
   //increase the replica
   desc.setRegionReplication(numReplica + 1);
   ADMIN.modifyTable(tableName, desc);
   ADMIN.enableTable(tableName);
+  LOG.info(ADMIN.getTableDescriptor(tableName).toString());
   assert(ADMIN.isTableEnabled(tableName));
   List regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
   
.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName);
-  assert(regions.size() == numRegions * (numReplica + 1));
+  assertTrue("regions.size=" + regions.size() + ", numRegions=" + 
numRegions + ", numReplica=" + numReplica,
+  regions.size() == numRegions * (numReplica + 1));
 
   //decrease the replica(earlier, table was modified to have a replica 
count of numReplica + 1)
   ADMIN.disableTable(tableName);
@@ -229,6 +232,7 @@ public class TestMasterOperationsForRegionReplicas {
   assert(defaultReplicas.size() == numRegions);
   Collection counts = new HashSet<>(defaultReplicas.values());
   assert(counts.size() == 1 && counts.contains(new Integer(numReplica)));
+  */
 } finally {
   ADMIN.disableTable(tableName);
   ADMIN.deleteTable(tableName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java
index b59e6ff..23efdb2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java
@@ -18,15 +18,12 @@
  */
 package org.apache.hadoop.hbase.master;
 
-import static org.junit.Assert.*;

[23/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
index d7bbd05..9780e4f 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
@@ -1734,100 +1734,201 @@ public final class MasterProcedureProtos {
   }
 
   /**
-   * Protobuf enum {@code hbase.pb.MergeTableRegionsState}
+   * Protobuf enum {@code hbase.pb.DispatchMergingRegionsState}
*/
-  public enum MergeTableRegionsState
+  public enum DispatchMergingRegionsState
   implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
 /**
- * MERGE_TABLE_REGIONS_PREPARE = 1;
+ * DISPATCH_MERGING_REGIONS_PREPARE = 1;
  */
-MERGE_TABLE_REGIONS_PREPARE(1),
+DISPATCH_MERGING_REGIONS_PREPARE(1),
 /**
- * MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS = 2;
+ * DISPATCH_MERGING_REGIONS_PRE_OPERATION = 2;
  */
-MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS(2),
+DISPATCH_MERGING_REGIONS_PRE_OPERATION(2),
 /**
- * MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION = 3;
+ * DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS = 3;
  */
-MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION(3),
+DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS(3),
 /**
- * MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE = 4;
+ * DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS = 4;
  */
-MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE(4),
+DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS(4),
 /**
- * MERGE_TABLE_REGIONS_CLOSE_REGIONS = 5;
+ * DISPATCH_MERGING_REGIONS_POST_OPERATION = 5;
  */
-MERGE_TABLE_REGIONS_CLOSE_REGIONS(5),
+DISPATCH_MERGING_REGIONS_POST_OPERATION(5),
+;
+
 /**
- * MERGE_TABLE_REGIONS_CREATE_MERGED_REGION = 6;
+ * DISPATCH_MERGING_REGIONS_PREPARE = 1;
  */
-MERGE_TABLE_REGIONS_CREATE_MERGED_REGION(6),
+public static final int DISPATCH_MERGING_REGIONS_PREPARE_VALUE = 1;
 /**
- * MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION = 7;
+ * DISPATCH_MERGING_REGIONS_PRE_OPERATION = 2;
  */
-MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION(7),
+public static final int DISPATCH_MERGING_REGIONS_PRE_OPERATION_VALUE = 2;
 /**
- * MERGE_TABLE_REGIONS_UPDATE_META = 8;
+ * DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS = 3;
  */
-MERGE_TABLE_REGIONS_UPDATE_META(8),
+public static final int 
DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS_VALUE = 3;
 /**
- * MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION = 9;
+ * DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS = 4;
  */
-MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION(9),
+public static final int DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS_VALUE = 4;
 /**
- * MERGE_TABLE_REGIONS_OPEN_MERGED_REGION = 10;
+ * DISPATCH_MERGING_REGIONS_POST_OPERATION = 5;
  */
-MERGE_TABLE_REGIONS_OPEN_MERGED_REGION(10),
+public static final int DISPATCH_MERGING_REGIONS_POST_OPERATION_VALUE = 5;
+
+
+public final int getNumber() {
+  return value;
+}
+
 /**
- * MERGE_TABLE_REGIONS_POST_OPERATION = 11;
+ * @deprecated Use {@link #forNumber(int)} instead.
  */
-MERGE_TABLE_REGIONS_POST_OPERATION(11),
-;
+@java.lang.Deprecated
+public static DispatchMergingRegionsState valueOf(int value) {
+  return forNumber(value);
+}
+
+public static DispatchMergingRegionsState forNumber(int value) {
+  switch (value) {
+case 1: return DISPATCH_MERGING_REGIONS_PREPARE;
+case 2: return DISPATCH_MERGING_REGIONS_PRE_OPERATION;
+case 3: return DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS;
+case 4: return DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS;
+case 5: return DISPATCH_MERGING_REGIONS_POST_OPERATION;
+default: return null;
+  }
+}
+
+public static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+DispatchMergingRegionsState> internalValueMap =
+  new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap()
 {
+public DispatchMergingRegionsState findValueByNumber(int number) {
+  return DispatchMergingRegionsState.forNumber(number);
+}
+  };
+
+public final 

[10/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 2703947..9c2efa6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -19,55 +19,42 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.InterruptedIOException;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
-import java.util.Set;
-import java.util.concurrent.locks.Lock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.MasterWalManager;
 import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.master.assignment.AssignProcedure;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.master.assignment.RegionTransitionProcedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.zookeeper.KeeperException;
 
 /**
  * Handle crashed server. This is a port to ProcedureV2 of what used to be 
euphemistically called
  * ServerShutdownHandler.
  *
- * The procedure flow varies dependent on whether meta is assigned, if we 
are
- * doing distributed log replay versus distributed log splitting, and if we 
are to split logs at
- * all.
- *
- * This procedure asks that all crashed servers get processed equally; we 
yield after the
- * completion of each successful flow step. We do this so that we do not 
'deadlock' waiting on
- * a region assignment so we can replay edits which could happen if a region 
moved there are edits
- * on two servers for replay.
+ * The procedure flow varies dependent on whether meta is assigned and if 
we are to split logs.
  *
- * TODO: ASSIGN and WAIT_ON_ASSIGN (at least) are not idempotent. Revisit 
when assign is pv2.
- * TODO: We do not have special handling for system tables.
+ * We come in here after ServerManager has noticed a server has expired. 
Procedures
+ * queued on the rpc should have been notified about fail and should be 
concurrently
+ * getting themselves ready to assign elsewhere.
  */
 public class ServerCrashProcedure
 extends StateMachineProcedure
@@ -75,36 +62,6 @@ implements ServerProcedureInterface {
   private static final Log LOG = LogFactory.getLog(ServerCrashProcedure.class);
 
   /**
-   * Configuration key to set how long to wait in ms doing a quick check on 
meta state.
-   */
-  public static final String KEY_SHORT_WAIT_ON_META =
-  "hbase.master.servercrash.short.wait.on.meta.ms";
-
-  public static final int DEFAULT_SHORT_WAIT_ON_META = 1000;
-
-  /**
-   * Configuration key to set how many retries to cycle before we give up on 
meta.
-   * Each attempt will wait at least {@link #KEY_SHORT_WAIT_ON_META} 
milliseconds.
-   */
-  public static final String KEY_RETRIES_ON_META =
-  "hbase.master.servercrash.meta.retries";
-
-  public static final int DEFAULT_RETRIES_ON_META = 10;
-
-  /**
-   * Configuration key to set how long to wait in ms on regions in 

[28/50] [abbrv] hbase git commit: Rebase with master fixup

2017-05-23 Thread stack
Rebase with master fixup


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bd672032
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bd672032
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bd672032

Branch: refs/heads/HBASE-14614
Commit: bd672032fc8efe1555eeeb35fbf94e798fb96296
Parents: 924bb80
Author: Michael Stack 
Authored: Fri May 5 21:58:22 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bd672032/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
index 79eb7d2..a1b33f7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
@@ -49,6 +49,7 @@ import java.util.concurrent.LinkedBlockingQueue;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Put;
@@ -66,8 +67,10 @@ import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestRule;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 



[08/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
new file mode 100644
index 000..e95932b
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
@@ -0,0 +1,108 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.security.PrivilegedAction;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.security.User;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Handles processing region merges. Put in a queue, owned by HRegionServer.
+ */
+// UNUSED: REMOVE!!!
+@InterfaceAudience.Private
+class RegionMergeRequest implements Runnable {
+  private static final Log LOG = LogFactory.getLog(RegionMergeRequest.class);
+  private final HRegionInfo region_a;
+  private final HRegionInfo region_b;
+  private final HRegionServer server;
+  private final boolean forcible;
+  private final User user;
+
+  RegionMergeRequest(Region a, Region b, HRegionServer hrs, boolean forcible,
+  long masterSystemTime, User user) {
+Preconditions.checkNotNull(hrs);
+this.region_a = a.getRegionInfo();
+this.region_b = b.getRegionInfo();
+this.server = hrs;
+this.forcible = forcible;
+this.user = user;
+  }
+
+  @Override
+  public String toString() {
+return "MergeRequest,regions:" + region_a + ", " + region_b + ", forcible="
++ forcible;
+  }
+
+  private void doMerge() {
+boolean success = false;
+//server.metricsRegionServer.incrMergeRequest();
+
+if (user != null && user.getUGI() != null) {
+  user.getUGI().doAs (new PrivilegedAction() {
+@Override
+public Void run() {
+  requestRegionMerge();
+  return null;
+}
+  });
+} else {
+  requestRegionMerge();
+}
+  }
+
+  private void requestRegionMerge() {
+final TableName table = region_a.getTable();
+if (!table.equals(region_b.getTable())) {
+  LOG.error("Can't merge regions from two different tables: " + region_a + 
", " + region_b);
+  return;
+}
+
+// TODO: fake merged region for compat with the report protocol
+final HRegionInfo merged = new HRegionInfo(table);
+
+// Send the split request to the master. the master will do the validation 
on the split-key.
+// The parent region will be unassigned and the two new regions will be 
assigned.
+// hri_a and hri_b objects may not reflect the regions that will be 
created, those objectes
+// are created just to pass the information to the 
reportRegionStateTransition().
+if (!server.reportRegionStateTransition(TransitionCode.READY_TO_MERGE, 
merged, region_a, region_b)) {
+  LOG.error("Unable to ask master to merge: " + region_a + ", " + 
region_b);
+}
+  }
+
+  @Override
+  public void run() {
+if (this.server.isStopping() || this.server.isStopped()) {
+  LOG.debug("Skipping merge because server is stopping="
+  + this.server.isStopping() + " or stopped=" + 
this.server.isStopped());
+  return;
+}
+
+doMerge();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java

[16/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
deleted file mode 100644
index dcbf5a4..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ /dev/null
@@ -1,1170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Pair;
-
-/**
- * Region state accountant. It holds the states of all regions in the memory.
- * In normal scenario, it should match the meta table and the true region 
states.
- *
- * This map is used by AssignmentManager to track region states.
- */
-@InterfaceAudience.Private
-public class RegionStates {
-  private static final Log LOG = LogFactory.getLog(RegionStates.class);
-
-  public final static RegionStateStampComparator REGION_STATE_COMPARATOR =
-new RegionStateStampComparator();
-
-  // This comparator sorts the RegionStates by time stamp then Region name.
-  // Comparing by timestamp alone can lead us to discard different 
RegionStates that happen
-  // to share a timestamp.
-  private static class RegionStateStampComparator implements 
Comparator {
-@Override
-public int compare(RegionState l, RegionState r) {
-  return Long.compare(l.getStamp(), r.getStamp()) == 0 ?
-  Bytes.compareTo(l.getRegion().getRegionName(), 
r.getRegion().getRegionName()) :
-  Long.compare(l.getStamp(), r.getStamp());
-}
-  }
-
-  /**
-   * Regions currently in transition.
-   */
-  final HashMap regionsInTransition = new HashMap<>();
-
-  /**
-   * Region encoded name to state map.
-   * All the regions should be in this map.
-   */
-  private final Map regionStates = new HashMap<>();
-
-  /**
-   * Holds mapping of table -> region state
-   */
-  private final Map> 
regionStatesTableIndex = new HashMap<>();
-
-  /**
-   * Server to regions assignment map.
-   * Contains the set of regions currently assigned to a given server.
-   */
-  private final Map serverHoldings = new 
HashMap<>();
-
-  /**
-   * Maintains the mapping from the default region to the replica regions.
-   */
-  private final Map 
defaultReplicaToOtherReplicas = new HashMap<>();
-
-  /**
-   * Region to server assignment map.
-   * Contains the server a given region is currently assigned to.
-   */
-  private final TreeMap regionAssignments = new 
TreeMap<>();
-
-  /**
-   * Encoded region name to server assignment map for re-assignment
-   * purpose. Contains the server a given region is last known assigned
-   * to, which has not completed log 

[18/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
deleted file mode 100644
index 69ebd97..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ /dev/null
@@ -1,3053 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.NotServingRegionException;
-import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.RegionStateListener;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.MasterSwitchType;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.executor.EventHandler;
-import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.executor.ExecutorService;
-import org.apache.hadoop.hbase.favored.FavoredNodesManager;
-import org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-import org.apache.hadoop.hbase.ipc.FailedServerException;
-import org.apache.hadoop.hbase.ipc.RpcClient;
-import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.quotas.QuotaExceededException;
-import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
-import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.KeyLocker;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.PairOfSameType;
-import org.apache.hadoop.hbase.util.RetryCounter;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import 

[01/50] [abbrv] hbase git commit: Making more logging trace-level [Forced Update!]

2017-05-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14614 052fab32a -> f24362c15 (forced update)


Making more logging trace-level


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/007d0d2b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/007d0d2b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/007d0d2b

Branch: refs/heads/HBASE-14614
Commit: 007d0d2bc41ac4e6033944e20ed8c443a904c4d6
Parents: f101abd
Author: Michael Stack 
Authored: Fri May 12 10:28:39 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java| 6 --
 .../hadoop/hbase/procedure2/store/wal/WALProcedureStore.java   | 4 ++--
 .../org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java | 4 ++--
 3 files changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/007d0d2b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
index 585762b..2221cfc 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
@@ -167,8 +167,10 @@ public class ProcedureWALFile implements 
Comparable {
   LOG.warn("Making " + archiveDir, ioe);
 }
 Path archivedFile = new Path(archiveDir, logFile.getName());
-LOG.info("ARCHIVED WAL (FIX) " + logFile + " to " + archivedFile);
-fs.rename(logFile, archivedFile);
+LOG.info("ARCHIVED WAL (TODO: FILES ARE NOT PURGED FROM ARCHIVE!) " + 
logFile + " to " + archivedFile);
+if (!fs.rename(logFile, archivedFile)) {
+  LOG.warn("Failed archive of " + logFile);
+}
 // fs.delete(logFile, false);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/007d0d2b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 7eeb2df..300e023 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -1079,8 +1079,8 @@ public class WALProcedureStore extends ProcedureStoreBase 
{
   private void removeAllLogs(long lastLogId) {
 if (logs.size() <= 1) return;
 
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Remove all state logs with ID less than " + lastLogId);
+if (LOG.isTraceEnabled()) {
+  LOG.trace("Remove all state logs with ID less than " + lastLogId);
 }
 
 boolean removed = false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/007d0d2b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
index 69cd233..a6a5c17 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
@@ -88,8 +88,8 @@ public class RegionServerTracker extends ZooKeeperListener {
   int magicLen = ProtobufUtil.lengthOfPBMagic();
   ProtobufUtil.mergeFrom(rsInfoBuilder, data, magicLen, 
data.length - magicLen);
 }
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Added tracking of RS " + nodePath);
+if (LOG.isTraceEnabled()) {
+  LOG.trace("Added tracking of RS " + nodePath);
 }
   } catch (KeeperException e) {
 LOG.warn("Get Rs info port from ephemeral node", e);



[21/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index 2435564..1ccf488 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -2210,7 +2210,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota req_num = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
   getReqNumFieldBuilder() {
 if (reqNumBuilder_ == null) {
   reqNumBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2328,7 +2328,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota req_size = 2;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
   getReqSizeFieldBuilder() {
 if (reqSizeBuilder_ == null) {
   reqSizeBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2446,7 +2446,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota write_num = 3;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
   getWriteNumFieldBuilder() {
 if (writeNumBuilder_ == null) {
   writeNumBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2564,7 +2564,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota write_size = 4;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
 
   getWriteSizeFieldBuilder() {
 if (writeSizeBuilder_ == null) {
   writeSizeBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2682,7 +2682,7 @@ public final class QuotaProtos {
* optional .hbase.pb.TimedQuota read_num = 5;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota, 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder,
 

[24/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
index 711b9c8..1ffdc52 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
@@ -9414,53 +9414,70 @@ public final class AdminProtos {
 
   }
 
-  public interface CloseRegionForSplitOrMergeRequestOrBuilder extends
-  // 
@@protoc_insertion_point(interface_extends:hbase.pb.CloseRegionForSplitOrMergeRequest)
+  public interface FlushRegionRequestOrBuilder extends
+  // 
@@protoc_insertion_point(interface_extends:hbase.pb.FlushRegionRequest)
   org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
 
 /**
- * repeated .hbase.pb.RegionSpecifier region = 1;
+ * required .hbase.pb.RegionSpecifier region = 1;
  */
-
java.util.List
 
-getRegionList();
+boolean hasRegion();
 /**
- * repeated .hbase.pb.RegionSpecifier region = 1;
+ * required .hbase.pb.RegionSpecifier region = 1;
  */
-
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier 
getRegion(int index);
+
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier 
getRegion();
 /**
- * repeated .hbase.pb.RegionSpecifier region = 1;
+ * required .hbase.pb.RegionSpecifier region = 1;
  */
-int getRegionCount();
+
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder
 getRegionOrBuilder();
+
 /**
- * repeated .hbase.pb.RegionSpecifier region = 1;
+ * optional uint64 if_older_than_ts = 2;
  */
-java.util.List
 
-getRegionOrBuilderList();
+boolean hasIfOlderThanTs();
 /**
- * repeated .hbase.pb.RegionSpecifier region = 1;
+ * optional uint64 if_older_than_ts = 2;
  */
-
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder
 getRegionOrBuilder(
-int index);
+long getIfOlderThanTs();
+
+/**
+ * 
+ * whether to write a marker to WAL even if not flushed
+ * 
+ *
+ * optional bool write_flush_wal_marker = 3;
+ */
+boolean hasWriteFlushWalMarker();
+/**
+ * 
+ * whether to write a marker to WAL even if not flushed
+ * 
+ *
+ * optional bool write_flush_wal_marker = 3;
+ */
+boolean getWriteFlushWalMarker();
   }
   /**
* 
**
-   * Closes the specified region(s) for
-   * split or merge
+   * Flushes the MemStore of the specified region.
+   * p
+   * This method is synchronous.
* 
*
-   * Protobuf type {@code hbase.pb.CloseRegionForSplitOrMergeRequest}
+   * Protobuf type {@code hbase.pb.FlushRegionRequest}
*/
-  public  static final class CloseRegionForSplitOrMergeRequest extends
+  public  static final class FlushRegionRequest extends
   org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
-  // 
@@protoc_insertion_point(message_implements:hbase.pb.CloseRegionForSplitOrMergeRequest)
-  CloseRegionForSplitOrMergeRequestOrBuilder {
-// Use CloseRegionForSplitOrMergeRequest.newBuilder() to construct.
-private 
CloseRegionForSplitOrMergeRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  // 
@@protoc_insertion_point(message_implements:hbase.pb.FlushRegionRequest)
+  FlushRegionRequestOrBuilder {
+// Use FlushRegionRequest.newBuilder() to construct.
+private 
FlushRegionRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
   super(builder);
 }
-private CloseRegionForSplitOrMergeRequest() {
-  region_ = java.util.Collections.emptyList();
+private FlushRegionRequest() {
+  ifOlderThanTs_ = 0L;
+  writeFlushWalMarker_ = false;
 }
 
 @java.lang.Override
@@ -9468,7 +9485,7 @@ public final class AdminProtos {
 getUnknownFields() {
   return this.unknownFields;
 }
-private CloseRegionForSplitOrMergeRequest(
+private FlushRegionRequest(
 org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
 throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
@@ -9492,12 +9509,26 @@ public final class AdminProtos {
   break;
 }
 case 10: {
-  if (!((mutable_bitField0_ & 0x0001) 

[22/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 798932e..d5846ce 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -842,7 +842,7 @@ public final class MasterProtos {
* required .hbase.pb.TableName table_name = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
 
   getTableNameFieldBuilder() {
 if (tableNameBuilder_ == null) {
   tableNameBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -960,7 +960,7 @@ public final class MasterProtos {
* required .hbase.pb.ColumnFamilySchema column_families = 
2;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
 
   getColumnFamiliesFieldBuilder() {
 if (columnFamiliesBuilder_ == null) {
   columnFamiliesBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -2257,7 +2257,7 @@ public final class MasterProtos {
* required .hbase.pb.TableName table_name = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
 
   getTableNameFieldBuilder() {
 if (tableNameBuilder_ == null) {
   tableNameBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -3622,7 +3622,7 @@ public final class MasterProtos {
* required .hbase.pb.TableName table_name = 1;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>
 
   getTableNameFieldBuilder() {
 if (tableNameBuilder_ == null) {
   tableNameBuilder_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -3740,7 +3740,7 @@ public final class MasterProtos {
* required .hbase.pb.ColumnFamilySchema column_families = 
2;
*/
   private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>
+  

[05/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java
new file mode 100644
index 000..003dfdd
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java
@@ -0,0 +1,224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.assignment;
+
+import static org.junit.Assert.assertEquals;
+
+import java.lang.Thread.UncaughtExceptionHandler;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestRegionStates {
+  private static final Log LOG = LogFactory.getLog(TestRegionStates.class);
+
+  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  private static ThreadPoolExecutor threadPool;
+  private static ExecutorCompletionService executorService;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+threadPool = Threads.getBoundedCachedThreadPool(32, 60L, TimeUnit.SECONDS,
+  Threads.newDaemonThreadFactory("ProcedureDispatcher",
+new UncaughtExceptionHandler() {
+  @Override
+  public void uncaughtException(Thread t, Throwable e) {
+LOG.warn("Failed thread " + t.getName(), e);
+  }
+}));
+executorService = new ExecutorCompletionService(threadPool);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+threadPool.shutdown();
+  }
+
+  @Before
+  public void testSetup() {
+  }
+
+  @After
+  public void testTearDown() throws Exception {
+while (true) {
+  Future f = executorService.poll();
+  if (f == null) break;
+  f.get();
+}
+  }
+
+  private static void waitExecutorService(final int count) throws Exception {
+for (int i = 0; i < count; ++i) {
+  executorService.take().get();
+}
+  }
+
+  // ==
+  //  Regions related
+  // ==
+
+  @Test
+  public void testRegionDoubleCreation() throws Exception {
+// NOTE: HRegionInfo sort by table first, so we are relying on that
+final TableName TABLE_NAME_A = TableName.valueOf("testOrderedByTableA");
+final TableName TABLE_NAME_B = TableName.valueOf("testOrderedByTableB");
+final TableName TABLE_NAME_C = TableName.valueOf("testOrderedByTableC");
+final RegionStates stateMap = new RegionStates();
+final int NRUNS = 1000;
+final int NSMALL_RUNS = 3;
+
+// add some regions for table B
+for (int i = 0; i < NRUNS; ++i) {
+  addRegionNode(stateMap, TABLE_NAME_B, i);
+}
+// re-add the regions for table B
+for (int i = 0; i < NRUNS; ++i) {
+  addRegionNode(stateMap, TABLE_NAME_B, i);
+}
+waitExecutorService(NRUNS * 2);
+
+// add two other tables A and C that will be placed before and after table 
B (sort order)
+for (int i = 0; i < NSMALL_RUNS; ++i) {
+ 

[15/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
new file mode 100644
index 000..f1c1a40
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -0,0 +1,1792 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.assignment;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.PleaseHoldException;
+import org.apache.hadoop.hbase.RegionException;
+import org.apache.hadoop.hbase.RegionStateListener;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
+import org.apache.hadoop.hbase.favored.FavoredNodeLoadBalancer;
+import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
+import org.apache.hadoop.hbase.master.AssignmentListener;
+import org.apache.hadoop.hbase.master.LoadBalancer;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.MetricsAssignmentManager;
+import org.apache.hadoop.hbase.master.NoSuchProcedureException;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.master.ServerListener;
+import org.apache.hadoop.hbase.master.TableStateManager;
+import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
+import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState;
+import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode;
+// TODO: why are they here?
+import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
+import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
+import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
+import org.apache.hadoop.hbase.quotas.QuotaExceededException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
+import 

[02/50] [abbrv] hbase git commit: Fix archiving of pv2 WAL files

2017-05-23 Thread stack
Fix archiving of pv2 WAL files


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/53b865aa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/53b865aa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/53b865aa

Branch: refs/heads/HBASE-14614
Commit: 53b865aa9922143e2e95af09a73814a9e7d60047
Parents: 007d0d2
Author: Michael Stack 
Authored: Fri May 12 13:02:32 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../procedure2/store/wal/ProcedureWALFile.java  | 28 ++--
 .../procedure2/store/wal/WALProcedureStore.java | 28 +---
 .../org/apache/hadoop/hbase/master/HMaster.java |  4 ++-
 .../assignment/TestAssignmentManager.java   |  2 +-
 4 files changed, 36 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/53b865aa/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
index 2221cfc..42abe8f 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
@@ -156,22 +155,23 @@ public class ProcedureWALFile implements 
Comparable {
 this.logSize += size;
   }
 
-  public void removeFile() throws IOException {
+  public void removeFile(final Path walArchiveDir) throws IOException {
 close();
-// TODO: FIX THIS. MAKE THIS ARCHIVE FORMAL.
-Path archiveDir =
-new Path(logFile.getParent().getParent(), 
HConstants.HFILE_ARCHIVE_DIRECTORY);
-try {
-  fs.mkdirs(archiveDir);
-} catch (IOException ioe) {
-  LOG.warn("Making " + archiveDir, ioe);
+boolean archived = false;
+if (walArchiveDir != null) {
+  Path archivedFile = new Path(walArchiveDir, logFile.getName());
+  LOG.info("ARCHIVED (TODO: FILES ARE NOT PURGED FROM ARCHIVE!) " + 
logFile + " to " + walArchiveDir);
+  if (!fs.rename(logFile, archivedFile)) {
+LOG.warn("Failed archive of " + logFile + ", deleting");
+  } else {
+archived = true;
+  }
 }
-Path archivedFile = new Path(archiveDir, logFile.getName());
-LOG.info("ARCHIVED WAL (TODO: FILES ARE NOT PURGED FROM ARCHIVE!) " + 
logFile + " to " + archivedFile);
-if (!fs.rename(logFile, archivedFile)) {
-  LOG.warn("Failed archive of " + logFile);
+if (!archived) {
+  if (!fs.delete(logFile, false)) {
+LOG.warn("Failed delete of " + logFile);
+  }
 }
-// fs.delete(logFile, false);
   }
 
   public void setProcIds(long minId, long maxId) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/53b865aa/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 300e023..df818fe 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -124,6 +124,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
   private final Configuration conf;
   private final FileSystem fs;
   private final Path walDir;
+  private final Path walArchiveDir;
 
   private final AtomicReference syncException = new 
AtomicReference<>();
   private final AtomicBoolean loading = new AtomicBoolean(true);
@@ -185,9 +186,15 @@ public class WALProcedureStore extends ProcedureStoreBase {
 
   public WALProcedureStore(final Configuration conf, final FileSystem fs, 
final Path walDir,
   final LeaseRecovery leaseRecovery) {
+this(conf, fs, walDir, null, leaseRecovery);
+  }
+
+  public WALProcedureStore(final Configuration conf, final FileSystem fs, 
final Path walDir,
+  final 

[31/50] [abbrv] hbase git commit: Debugging around the ask if a region is splittable -- doesn't seem to be working

2017-05-23 Thread stack
Debugging around the ask if a region is splittable -- doesn't seem to be working


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fa191d9a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fa191d9a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fa191d9a

Branch: refs/heads/HBASE-14614
Commit: fa191d9a5d62a17884ab4189ca898e1519193b32
Parents: edad94e
Author: Michael Stack 
Authored: Sun May 14 22:45:05 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../hbase/master/assignment/SplitTableRegionProcedure.java   | 1 +
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java  | 4 +++-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fa191d9a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 4ed1931..6815e9f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -365,6 +365,7 @@ public class SplitTableRegionProcedure
 GetRegionInfoResponse response =
 Util.getRegionInfoResponse(env, node.getRegionLocation(), 
node.getRegionInfo());
 splittable = response.hasSplittable() && response.getSplittable();
+LOG.info("REMOVE splittable " + splittable + " " + this + " " + node);
   } catch (IOException e) {
 splittableCheckIOE = e;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/fa191d9a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index f49e7bf..ac0867c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1392,7 +1392,9 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
   @Override
   public boolean isSplittable() {
-return isAvailable() && !hasReferences();
+boolean result = isAvailable() && !hasReferences();
+LOG.info("ASKED IF SPLITTABLE " + result, new Throwable("LOGGING"));
+return result;
   }
 
   @Override



[11/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java
deleted file mode 100644
index 3600fe0..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java
+++ /dev/null
@@ -1,906 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.procedure;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaMutationAnnotation;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.UnknownRegionException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.exceptions.MergeRegionException;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.CatalogJanitor;
-import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-import org.apache.hadoop.hbase.master.MasterFileSystem;
-import org.apache.hadoop.hbase.master.RegionPlan;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.master.RegionStates;
-import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * The procedure to Merge a region in a table.
- */
-@InterfaceAudience.Private
-public class MergeTableRegionsProcedure
-extends AbstractStateMachineTableProcedure {
-  private static final Log LOG = 
LogFactory.getLog(MergeTableRegionsProcedure.class);
-
-  private Boolean traceEnabled;
-  private AssignmentManager assignmentManager;
-  private int timeout;
-  private ServerName regionLocation;
-  private String regionsToMergeListFullName;
-  private String regionsToMergeListEncodedName;
-
-  private HRegionInfo [] regionsToMerge;
-  private HRegionInfo mergedRegionInfo;
-  private boolean forcible;
-
-  public MergeTableRegionsProcedure() {
-this.traceEnabled = isTraceEnabled();
-this.assignmentManager = null;
-this.timeout = -1;
-this.regionLocation = null;
-this.regionsToMergeListFullName = null;
-this.regionsToMergeListEncodedName = null;
-  }
-
-  public MergeTableRegionsProcedure(
-  final MasterProcedureEnv env,
-  final HRegionInfo[] regionsToMerge,
-  final boolean forcible) throws IOException {
-

[29/50] [abbrv] hbase git commit: Fix failing hbase-procedure tests

2017-05-23 Thread stack
Fix failing hbase-procedure tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2c99a03b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2c99a03b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2c99a03b

Branch: refs/heads/HBASE-14614
Commit: 2c99a03b4164f02aa67dfb278aa12c177c9eb33e
Parents: bd67203
Author: Michael Stack 
Authored: Sat May 6 10:45:58 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../java/org/apache/hadoop/hbase/procedure2/Procedure.java | 2 +-
 .../org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java  | 6 --
 2 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2c99a03b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 5527076..9c47957 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -351,7 +351,7 @@ public abstract class Procedure implements 
Comparable {
* @param builder the string builder to use to append the proc specific 
information
*/
   protected void toStringClassDetails(StringBuilder builder) {
-builder.append(getClass().getSimpleName());
+builder.append(getClass().getName());
   }
 
   // ==

http://git-wip-us.apache.org/repos/asf/hbase/blob/2c99a03b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index d36be64..e819ae8 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1403,11 +1403,13 @@ public class ProcedureExecutor {
 reExecute = true;
 LOG.info("Short-circuit to rexecute for pid=" + 
procedure.getProcId());
   } else {
-// yield the current procedure, and make the subprocedure runnable
+// Yield the current procedure, and make the subprocedure runnable
+// subprocs may come back 'null'.
 subprocs = initializeChildren(procStack, procedure, subprocs);
 LOG.info("Initialized subprocedures=" +
+  (subprocs == null? null:
 Stream.of(subprocs).map(e -> "{" + e.toString() + "}").
-collect(Collectors.toList()).toString());
+collect(Collectors.toList()).toString()));
   }
 } else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
   if (LOG.isTraceEnabled()) {



[26/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi)
Move to a new AssignmentManager, one that describes Assignment using
a State Machine built on top of ProcedureV2 facility.

Includes four patches from Matteos' repository and then fix up to get it 
all to
pass, filling in some missing functionality, fix of findbugs, fixing bugs, 
etc..

This doc. keeps state on where we are at w/ the new AM:

https://docs.google.com/document/d/1eVKa7FHdeoJ1-9o8yZcOTAQbv0u0bblBlCCzVSIn69g/edit#heading=h.vfdoxqut9lqn
Includes list of tests disabled by this patch with reasons why.

I applied the two patches in one go because applying each independently puts
hbase in a non-working state.

1. HBASE-14616 Procedure v2 - Replace the old AM with the new AM
The basis comes from Matteo's repo here:
  
https://github.com/matteobertozzi/hbase/commit/689227fcbfe8e6588433dbcdabf4526e3d478b2e

Patch replaces old AM with the new under subpackage master.assignment.
Mostly just updating classes to use new AM -- import changes -- rather
than the old. It also removes old AM and supporting classes.
See below for more detail.

2. HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi)
Adds running of remote procedure. Adds batching of remote calls.
Adds support for assign/unassign in procedures. Adds version info
reporting in rpc. Adds start of an AMv2.

3. and 4. are fixes around merge and split.

This work mostly comes from:

https://github.com/matteobertozzi/hbase/commit/3622cba4e331d2fc7bfc1932abb4c9cbf5802efa

Reporting of remote RS version is from here:

https://github.com/matteobertozzi/hbase/commit/ddb4df3964e8298c88c0210e83493aa91ac0942d.patch

And remote dispatch of procedures is from:

https://github.com/matteobertozzi/hbase/commit/186b9e7c4dae61a79509a6c3aad7f80ec61345e5

The split merge patches from here are also melded in:

https://github.com/matteobertozzi/hbase/commit/9a3a95a2c2974842a4849d1ad867e70764e7f707
and 
https://github.com/matteobertozzi/hbase/commit/d6289307a02a777299f65238238a2a8af3253067

Adds testing util for new AM and new sets of tests.

Does a bunch of fixup on logging so its possible to follow a procedures'
narrative by grepping procedure id. We spewed loads of log too on big
transitions such as master fail; fixed.

Details:

M hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
 Takes List of regionstates on construction rather than a Set.
 NOTE! This is a change in a public class.

M hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
 Add utility getShortNameToLog

M 
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
M 
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java
 Add support for dispatching assign, split and merge processes.

M hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
 Purge old overlapping states: PENDING_OPEN, PENDING_CLOSE, etc.

A 
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 Dispatch remote procedures every 150ms or 32 items -- which ever
 happens first (configurable). Runs a timeout thread. This facility is
 not on yet; will come in as part of a later fix. Currently works a
 region at a time. This class carries notion of a remote procedure and of a 
buffer full of these.
 "hbase.procedure.remote.dispatcher.threadpool.size" with default = 128
 "hbase.procedure.remote.dispatcher.delay.msec" with default = 150ms
 "hbase.procedure.remote.dispatcher.max.queue.size" with default = 32

M 
hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 Add in support for merge. Remove no-longer used methods.

M hbase-protocol-shaded/src/main/protobuf/Admin.proto 
b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
 Add execute procedures call ExecuteProcedures.

M hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
 Add assign and unassign state support for procedures.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java
 Adds getting RS version out of RPC
 Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000)

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
 Remove periodic metrics chore. This is done over in new AM now.
 Replace AM with the new.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
 Have AMv2 handle assigning meta.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 Extract version number of the server making rpc.

A 

[14/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
new file mode 100644
index 000..177f397
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -0,0 +1,737 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.assignment;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaMutationAnnotation;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.UnknownRegionException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.MasterSwitchType;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.exceptions.MergeRegionException;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.master.assignment.RegionStates;
+import org.apache.hadoop.hbase.master.CatalogJanitor;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.RegionState;
+import 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
+import org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * The procedure to Merge a region in a table.
+ */
+@InterfaceAudience.Private
+public class MergeTableRegionsProcedure
+extends AbstractStateMachineTableProcedure {
+  private static final Log LOG = 
LogFactory.getLog(MergeTableRegionsProcedure.class);
+
+  private Boolean traceEnabled;
+
+  private ServerName regionLocation;
+  private String regionsToMergeListFullName;
+
+  private HRegionInfo[] regionsToMerge;
+  private HRegionInfo mergedRegion;
+  private boolean forcible;
+
+  public MergeTableRegionsProcedure() {
+// Required by the Procedure framework to create the procedure on replay
+  }
+
+  public MergeTableRegionsProcedure(final MasterProcedureEnv env,
+  final HRegionInfo regionToMergeA, final HRegionInfo regionToMergeB) 
throws IOException {
+this(env, regionToMergeA, regionToMergeB, false);
+  }
+
+  public MergeTableRegionsProcedure(final MasterProcedureEnv env,
+  final HRegionInfo regionToMergeA, final HRegionInfo regionToMergeB,
+  final boolean forcible) throws MergeRegionException {
+this(env, new HRegionInfo[] {regionToMergeA, regionToMergeB}, forcible);
+  }
+
+  public MergeTableRegionsProcedure(final MasterProcedureEnv env,
+  final HRegionInfo[] 

[37/50] [abbrv] hbase git commit: Fix CatalogTracker. Make it use Procedures doing clean up of Region data on split/merge. Without these changes, ITBLL was failing at larger scale (3-4hours 5B rows) b

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/5c422d62/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
index 9780e4f..5c72331 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
@@ -2533,6 +2533,204 @@ public final class MasterProcedureProtos {
 // @@protoc_insertion_point(enum_scope:hbase.pb.MoveRegionState)
   }
 
+  /**
+   * Protobuf enum {@code hbase.pb.GCRegionState}
+   */
+  public enum GCRegionState
+  implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+/**
+ * GC_REGION_PREPARE = 1;
+ */
+GC_REGION_PREPARE(1),
+/**
+ * GC_REGION_ARCHIVE = 2;
+ */
+GC_REGION_ARCHIVE(2),
+/**
+ * GC_REGION_PURGE_METADATA = 3;
+ */
+GC_REGION_PURGE_METADATA(3),
+;
+
+/**
+ * GC_REGION_PREPARE = 1;
+ */
+public static final int GC_REGION_PREPARE_VALUE = 1;
+/**
+ * GC_REGION_ARCHIVE = 2;
+ */
+public static final int GC_REGION_ARCHIVE_VALUE = 2;
+/**
+ * GC_REGION_PURGE_METADATA = 3;
+ */
+public static final int GC_REGION_PURGE_METADATA_VALUE = 3;
+
+
+public final int getNumber() {
+  return value;
+}
+
+/**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+@java.lang.Deprecated
+public static GCRegionState valueOf(int value) {
+  return forNumber(value);
+}
+
+public static GCRegionState forNumber(int value) {
+  switch (value) {
+case 1: return GC_REGION_PREPARE;
+case 2: return GC_REGION_ARCHIVE;
+case 3: return GC_REGION_PURGE_METADATA;
+default: return null;
+  }
+}
+
+public static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+GCRegionState> internalValueMap =
+  new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap()
 {
+public GCRegionState findValueByNumber(int number) {
+  return GCRegionState.forNumber(number);
+}
+  };
+
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(ordinal());
+}
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(20);
+}
+
+private static final GCRegionState[] VALUES = values();
+
+public static GCRegionState valueOf(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
 desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int value;
+
+private GCRegionState(int value) {
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.GCRegionState)
+  }
+
+  /**
+   * Protobuf enum {@code hbase.pb.GCMergedRegionsState}
+   */
+  public enum GCMergedRegionsState
+  implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+/**
+ * GC_MERGED_REGIONS_PREPARE = 1;
+ */
+GC_MERGED_REGIONS_PREPARE(1),
+/**
+ * GC_MERGED_REGIONS_PURGE = 2;
+ */
+GC_MERGED_REGIONS_PURGE(2),
+/**
+ * GC_REGION_EDIT_METADATA = 3;
+ */
+GC_REGION_EDIT_METADATA(3),
+;
+
+/**
+ * GC_MERGED_REGIONS_PREPARE = 1;
+ */
+public static final int GC_MERGED_REGIONS_PREPARE_VALUE = 1;
+/**
+ * GC_MERGED_REGIONS_PURGE = 2;
+ */
+public static final int GC_MERGED_REGIONS_PURGE_VALUE = 2;
+/**
+ * GC_REGION_EDIT_METADATA = 3;
+ */
+public static final int GC_REGION_EDIT_METADATA_VALUE = 3;
+
+
+public final int getNumber() {
+  return value;
+}
+
+/**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+

[19/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java
index 454e3bc..4d5953c 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java
@@ -357,7 +357,7 @@ public final class SnapshotProtos {
   if (ref instanceof java.lang.String) {
 return (java.lang.String) ref;
   } else {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
 (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) 
ref;
 java.lang.String s = bs.toStringUtf8();
 if (bs.isValidUtf8()) {
@@ -373,7 +373,7 @@ public final class SnapshotProtos {
 getNameBytes() {
   java.lang.Object ref = name_;
   if (ref instanceof java.lang.String) {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
 (java.lang.String) ref);
 name_ = b;
@@ -407,7 +407,7 @@ public final class SnapshotProtos {
   if (ref instanceof java.lang.String) {
 return (java.lang.String) ref;
   } else {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
 (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) 
ref;
 java.lang.String s = bs.toStringUtf8();
 if (bs.isValidUtf8()) {
@@ -427,7 +427,7 @@ public final class SnapshotProtos {
 getTableBytes() {
   java.lang.Object ref = table_;
   if (ref instanceof java.lang.String) {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
 (java.lang.String) ref);
 table_ = b;
@@ -499,7 +499,7 @@ public final class SnapshotProtos {
   if (ref instanceof java.lang.String) {
 return (java.lang.String) ref;
   } else {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
 (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) 
ref;
 java.lang.String s = bs.toStringUtf8();
 if (bs.isValidUtf8()) {
@@ -515,7 +515,7 @@ public final class SnapshotProtos {
 getOwnerBytes() {
   java.lang.Object ref = owner_;
   if (ref instanceof java.lang.String) {
-org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
 (java.lang.String) ref);
 owner_ = b;
@@ -1047,7 +1047,7 @@ public final class SnapshotProtos {
   getNameBytes() {
 java.lang.Object ref = name_;
 if (ref instanceof String) {
-  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
   
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
   (java.lang.String) ref);
   name_ = b;
@@ -1135,7 +1135,7 @@ public final class SnapshotProtos {
   getTableBytes() {
 java.lang.Object ref = table_;
 if (ref instanceof String) {
-  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
   
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
   (java.lang.String) ref);
   table_ = b;
@@ -1323,7 +1323,7 @@ public final class SnapshotProtos {
   getOwnerBytes() {
 java.lang.Object ref = owner_;
 if (ref instanceof String) {
-  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b =
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
   
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
   (java.lang.String) ref);
   owner_ = b;
@@ 

[12/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
index b53ce45..4d45af3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -21,12 +21,9 @@ package org.apache.hadoop.hbase.master.procedure;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
@@ -34,17 +31,11 @@ import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.BulkAssigner;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DisableTableState;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.htrace.Trace;
 
 @InterfaceAudience.Private
 public class DisableTableProcedure
@@ -116,12 +107,8 @@ public class DisableTableProcedure
 setNextState(DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE);
 break;
   case DISABLE_TABLE_MARK_REGIONS_OFFLINE:
-if (markRegionsOffline(env, tableName, true) ==
-MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) {
-  
setNextState(DisableTableState.DISABLE_TABLE_SET_DISABLED_TABLE_STATE);
-} else {
-  LOG.trace("Retrying later to disable the missing regions");
-}
+
addChildProcedure(env.getAssignmentManager().createUnassignProcedures(tableName));
+setNextState(DisableTableState.DISABLE_TABLE_SET_DISABLED_TABLE_STATE);
 break;
   case DISABLE_TABLE_SET_DISABLED_TABLE_STATE:
 setTableStateToDisabled(env, tableName);
@@ -290,83 +277,6 @@ public class DisableTableProcedure
   }
 
   /**
-   * Mark regions of the table offline with retries
-   * @param env MasterProcedureEnv
-   * @param tableName the target table
-   * @param retryRequired whether to retry if the first run failed
-   * @return whether the operation is fully completed or being interrupted.
-   * @throws IOException
-   */
-  protected static MarkRegionOfflineOpResult markRegionsOffline(
-  final MasterProcedureEnv env,
-  final TableName tableName,
-  final Boolean retryRequired) throws IOException {
-// Dev consideration: add a config to control max number of retry. For 
now, it is hard coded.
-int maxTry = (retryRequired ? 10 : 1);
-MarkRegionOfflineOpResult operationResult =
-MarkRegionOfflineOpResult.BULK_ASSIGN_REGIONS_FAILED;
-do {
-  try {
-operationResult = markRegionsOffline(env, tableName);
-if (operationResult == 
MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) {
-  break;
-}
-maxTry--;
-  } catch (Exception e) {
-LOG.warn("Received exception while marking regions online. tries left: 
" + maxTry, e);
-maxTry--;
-if (maxTry > 0) {
-  continue; // we still have some retry left, try again.
-}
-throw e;
-  }
-} while (maxTry > 0);
-
-if (operationResult != 
MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) {
-  LOG.warn("Some or all regions of the Table '" + tableName + "' were 
still online");
-}
-
-return operationResult;
-  }
-
-  /**
-   * Mark regions of the table offline
-   * @param env MasterProcedureEnv
-   * @param tableName the target table
-   * @return whether the operation is fully completed or being interrupted.
-   * @throws IOException
-   */
-  private static MarkRegionOfflineOpResult markRegionsOffline(
-  final MasterProcedureEnv env,
-  final TableName tableName) throws IOException {
-// Get list of online 

[13/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
new file mode 100644
index 000..cd8b858
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
@@ -0,0 +1,348 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.assignment;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
+import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
+import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+
+/**
+ * Base class for the Assign and Unassign Procedure.
+ * There can only be one RegionTransitionProcedure per region running at a time
+ * since each procedure takes a lock on the region (see 
MasterProcedureScheduler).
+ *
+ * This procedure is asynchronous and responds to external events.
+ * The AssignmentManager will notify this procedure when the RS completes
+ * the operation and reports the transitioned state
+ * (see the Assign and Unassign class for more details).
+ * Procedures move from the REGION_TRANSITION_QUEUE state when they are
+ * first submitted, to the REGION_TRANSITION_DISPATCH state when the request
+ * to remote server is done. They end in the REGION_TRANSITION_FINISH state.
+ * the 
+ */
+@InterfaceAudience.Private
+public abstract class RegionTransitionProcedure
+extends Procedure
+implements TableProcedureInterface,
+  RemoteProcedure {
+  private static final Log LOG = 
LogFactory.getLog(RegionTransitionProcedure.class);
+
+  protected final AtomicBoolean aborted = new AtomicBoolean(false);
+
+  private RegionTransitionState transitionState =
+  RegionTransitionState.REGION_TRANSITION_QUEUE;
+  private HRegionInfo regionInfo;
+  private volatile boolean lock = false;
+  // Server we assign or unassign from -- the target.
+  protected volatile ServerName server;
+
+  public RegionTransitionProcedure() {
+// Required by the Procedure framework to create the procedure on replay
+super();
+  }
+
+  public RegionTransitionProcedure(final HRegionInfo regionInfo) {
+this.regionInfo = regionInfo;
+  }
+
+  public HRegionInfo getRegionInfo() {
+return regionInfo;
+  }
+
+  protected void setRegionInfo(final HRegionInfo regionInfo) {
+// Setter is for deserialization.
+this.regionInfo = regionInfo;
+  }
+
+  @Override
+  public TableName getTableName() {
+HRegionInfo hri = getRegionInfo();
+return hri != null? hri.getTable(): null;
+  }
+
+  public boolean isMeta() {
+return TableName.isMetaTableName(getTableName());
+  }
+
+  @Override
+  public void toStringClassDetails(final StringBuilder sb) {
+sb.append(getClass().getSimpleName());
+sb.append(" table=");
+sb.append(getTableName());
+sb.append(", region=");
+

[07/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java
index 3eb65a5..7c0aa74 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java
@@ -35,18 +35,19 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
-import org.apache.hadoop.hbase.favored.FavoredNodesManager;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
+import org.apache.hadoop.hbase.favored.FavoredNodeLoadBalancer;
+import org.apache.hadoop.hbase.favored.FavoredNodesManager;
 import org.apache.hadoop.hbase.master.LoadBalancer;
-import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import 
org.apache.hadoop.hbase.master.balancer.LoadOnlyFavoredStochasticBalancer;
@@ -55,9 +56,7 @@ import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -66,6 +65,9 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
 
 @Category({ClientTests.class, MediumTests.class})
 public class TestTableFavoredNodes {
@@ -76,7 +78,6 @@ public class TestTableFavoredNodes {
   private final static int WAIT_TIMEOUT = 6;
   private final static int SLAVES = 8;
   private FavoredNodesManager fnm;
-  private RegionStates regionStates;
   private Admin admin;
 
   private final byte[][] splitKeys = new byte[][] {Bytes.toBytes(1), 
Bytes.toBytes(9)};
@@ -101,8 +102,8 @@ public class TestTableFavoredNodes {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
-TEST_UTIL.cleanupTestDir();
 TEST_UTIL.shutdownMiniCluster();
+TEST_UTIL.cleanupTestDir();
   }
 
   @Before
@@ -111,8 +112,6 @@ public class TestTableFavoredNodes {
 admin = TEST_UTIL.getAdmin();
 admin.setBalancerRunning(false, true);
 admin.enableCatalogJanitor(false);
-regionStates =
-  
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
   }
 
   /*
@@ -165,8 +164,9 @@ public class TestTableFavoredNodes {
   @Test
   public void testSplitTable() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
-TEST_UTIL.createTable(tableName, Bytes.toBytes("f"), splitKeys);
+Table t = TEST_UTIL.createTable(tableName, Bytes.toBytes("f"), splitKeys);
 TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
+final int numberOfRegions = admin.getTableRegions(t.getName()).size();
 
 checkIfFavoredNodeInformationIsCorrect(tableName);
 
@@ -176,13 +176,14 @@ public class TestTableFavoredNodes {
 List parentFN = fnm.getFavoredNodes(parent);
 assertNotNull("FN should not be null for region: " + parent, parentFN);
 
+LOG.info("SPLITTING TABLE");
 admin.split(tableName, splitPoint);
 
 TEST_UTIL.waitUntilNoRegionsInTransition(WAIT_TIMEOUT);
-waitUntilTableRegionCountReached(tableName, NUM_REGIONS + 1);
+LOG.info("FINISHED WAITING ON RIT");
+waitUntilTableRegionCountReached(tableName, numberOfRegions + 1);
 
-// All regions should have favored nodes
-checkIfFavoredNodeInformationIsCorrect(tableName);
+// All regions should have favored nodes
checkIfFavoredNodeInformationIsCorrect(tableName);
 
 // Get the daughters of parent.
 HRegionInfo daughter1 = locator.getRegionLocation(parent.getStartKey(), 
true).getRegionInfo();
@@ -210,7 +211,10 @@ public class 

[30/50] [abbrv] hbase git commit: Exceptions are not logging identifying details like region; makes it hard to debug who is problem

2017-05-23 Thread stack
Exceptions are not logging identifying details like region; makes it hard to 
debug who is problem


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c94c44d9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c94c44d9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c94c44d9

Branch: refs/heads/HBASE-14614
Commit: c94c44d9c4e82b785f1e4847dcc65706aa19e2f3
Parents: 2c99a03
Author: Michael Stack 
Authored: Sat May 6 11:22:57 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../master/assignment/AssignmentManager.java  | 18 --
 .../hbase/master/assignment/RegionStates.java |  2 +-
 2 files changed, 9 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c94c44d9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 2986868..ed55235 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -983,7 +983,7 @@ public class AssignmentManager implements ServerListener {
   }
 } catch (UnexpectedStateException e) {
   final ServerName serverName = serverNode.getServerName();
-  LOG.warn("Killing " + serverName + ": " + e.getMessage());
+  LOG.warn("KILLING " + serverName + ": " + e.getMessage());
   killRegionServer(serverNode);
 }
   }
@@ -1003,25 +1003,23 @@ public class AssignmentManager implements 
ServerListener {
 synchronized (regionNode) {
   if (regionNode.isInState(State.OPENING, State.OPEN)) {
 if (!regionNode.getRegionLocation().equals(serverName)) {
-  throw new UnexpectedStateException(
-"Reported OPEN on server=" + serverName +
-" but state found says server=" + 
regionNode.getRegionLocation());
+  throw new UnexpectedStateException(regionNode.toString() +
+"reported OPEN on server=" + serverName +
+" but state has otherwise.");
 } else if (regionNode.isInState(State.OPENING)) {
   try {
 if (!reportTransition(regionNode, serverNode, 
TransitionCode.OPENED, 0)) {
-  LOG.warn("Reported OPEN on server=" + serverName +
-" but state found says " + regionNode + " and NO procedure 
is running");
+  LOG.warn(regionNode.toString() + " reported OPEN on server=" 
+ serverName +
+" but state has otherwise AND NO procedure is running");
 }
   } catch (UnexpectedStateException e) {
-LOG.warn("Unexpected exception while trying to report " + 
regionNode +
-  " as open: " + e.getMessage(), e);
+LOG.warn(regionNode.toString() + " reported unexpteced OPEN: " 
+ e.getMessage(), e);
   }
 }
   } else if (!regionNode.isInState(State.CLOSING, State.SPLITTING)) {
 // TODO: We end up killing the RS if we get a report while we 
already
 // transitioned to close or split. we should have a 
timeout/timestamp to compare
-throw new UnexpectedStateException(
-"Reported OPEN but state found says " + regionNode.getState());
+throw new UnexpectedStateException(regionNode.toString() + " 
reported unexpected OPEN");
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c94c44d9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
index aa2627c..1c852c9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
@@ -173,7 +173,7 @@ public class RegionStates {
 public ServerName setRegionLocation(final ServerName serverName) {
   ServerName lastRegionLocation = this.regionLocation;
   if (serverName == null) {
-LOG.debug("REMOVE tracking when we are set to null", new 
Throwable("DEBUG"));
+LOG.debug("REMOVE tracking when we are set to null " 

[39/50] [abbrv] hbase git commit: Need to check server when doing ServerCrashProcedure; we had it but I removed it a few patches back... makes for SCPs stamping on each otehr failing ongoing assigns

2017-05-23 Thread stack
Need to check server when doing ServerCrashProcedure; we had it but I removed 
it a few patches back... makes for SCPs stamping on each otehr failing ongoing 
assigns


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/edad94e0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/edad94e0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/edad94e0

Branch: refs/heads/HBASE-14614
Commit: edad94e0f3771f7daa5f74e19842ce955f2d9f6a
Parents: 2e78073
Author: Michael Stack 
Authored: Sat May 13 21:37:52 2017 -0700
Committer: Michael Stack 
Committed: Tue May 23 08:36:53 2017 -0700

--
 .../hbase/master/assignment/AssignProcedure.java  |  8 
 .../master/assignment/RegionTransitionProcedure.java  |  6 ++
 .../hbase/master/assignment/UnassignProcedure.java|  5 +
 .../hbase/master/procedure/ServerCrashException.java  |  7 +--
 .../hbase/master/procedure/ServerCrashProcedure.java  | 14 --
 5 files changed, 36 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/edad94e0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
index 36f6f08..42ece16 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
@@ -327,4 +327,12 @@ public class AssignProcedure extends 
RegionTransitionProcedure {
 super.toStringClassDetails(sb);
 if (this.targetServer != null) sb.append(", 
target=").append(this.targetServer);
   }
+
+  @Override
+  public ServerName getServer(final MasterProcedureEnv env) {
+RegionStateNode node =
+
env.getAssignmentManager().getRegionStates().getRegionNode(this.getRegionInfo());
+if (node == null) return null;
+return node.getRegionLocation();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/edad94e0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
index 6dc809b..49124ea 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
@@ -372,4 +372,10 @@ public abstract class RegionTransitionProcedure
 // the client does not know about this procedure.
 return false;
   }
+
+  /**
+   * Used by ServerCrashProcedure to see if this Assign/Unassign needs 
processing.
+   * @return ServerName the Assign or Unassign is going against.
+   */
+  public abstract ServerName getServer(final MasterProcedureEnv env);
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/edad94e0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
index a82a2f5..126718a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
@@ -239,4 +239,9 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
 super.toStringClassDetails(sb);
 sb.append(", server=").append(this.destinationServer);
   }
+
+  @Override
+  public ServerName getServer(final MasterProcedureEnv env) {
+return this.destinationServer;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/edad94e0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashException.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashException.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashException.java
index dd1874b..26aba9e 100644
--- 

[04/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
index c5c6484..8872c63 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -30,18 +31,19 @@ import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyTableState;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
 
 @Category({MasterTests.class, MediumTests.class})
 public class TestModifyTableProcedure extends TestTableDDLProcedureBase {
-  @Rule
-  public TestName name = new TestName();
+  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+  withLookingForStuckThread(true).build();
+  @Rule public TestName name = new TestName();
 
   @Test(timeout=6)
   public void testModifyTable() throws Exception {
@@ -208,8 +210,7 @@ public class TestModifyTableProcedure extends 
TestTableDDLProcedureBase {
   new ModifyTableProcedure(procExec.getEnvironment(), htd));
 
 // Restart the executor and execute the step twice
-int numberOfSteps = ModifyTableState.values().length;
-MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, 
procId, numberOfSteps);
+MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, 
procId);
 
 // Validate descriptor
 HTableDescriptor currentHtd = 
UTIL.getAdmin().getTableDescriptor(tableName);
@@ -246,8 +247,7 @@ public class TestModifyTableProcedure extends 
TestTableDDLProcedureBase {
   new ModifyTableProcedure(procExec.getEnvironment(), htd));
 
 // Restart the executor and execute the step twice
-int numberOfSteps = ModifyTableState.values().length;
-MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, 
procId, numberOfSteps);
+MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, 
procId);
 
 // Validate descriptor
 HTableDescriptor currentHtd = 
UTIL.getAdmin().getTableDescriptor(tableName);
@@ -282,7 +282,7 @@ public class TestModifyTableProcedure extends 
TestTableDDLProcedureBase {
 long procId = procExec.submitProcedure(
   new ModifyTableProcedure(procExec.getEnvironment(), htd));
 
-int numberOfSteps = 1; // failing at pre operation
+int numberOfSteps = 0; // failing at pre operation
 MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, 
procId, numberOfSteps);
 
 // cf2 should not be present
@@ -315,7 +315,7 @@ public class TestModifyTableProcedure extends 
TestTableDDLProcedureBase {
   new ModifyTableProcedure(procExec.getEnvironment(), htd));
 
 // Restart the executor and rollback the step twice
-int numberOfSteps = 1; // failing at pre operation
+int numberOfSteps = 0; // failing at pre operation
 MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, 
procId, numberOfSteps);
 
 // cf2 should not be present

http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
index e6e90ef..47b1248 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java
@@ -18,12 +18,16 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
-import java.util.Random;
+import static 

[03/50] [abbrv] hbase git commit: HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Move to a new AssignmentManager, one that describes Assignment using a State Machine built on top

2017-05-23 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/db1dcf3e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 81846df..a64d102 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -39,7 +39,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
@@ -66,23 +66,26 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TestReplicasClient.SlowMeCopro;
-import org.apache.hadoop.hbase.coprocessor.MasterObserver;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterRpcServices;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.master.RegionStates;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.master.assignment.RegionStates;
+import org.apache.hadoop.hbase.master.NoSuchProcedureException;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -98,11 +101,11 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
 
 /**
  * The below tests are testing split region against a running cluster
@@ -110,8 +113,9 @@ import org.junit.rules.TestName;
 @Category({RegionServerTests.class, LargeTests.class})
 @SuppressWarnings("deprecation")
 public class TestSplitTransactionOnCluster {
-  private static final Log LOG =
-LogFactory.getLog(TestSplitTransactionOnCluster.class);
+  private static final Log LOG = 
LogFactory.getLog(TestSplitTransactionOnCluster.class);
+  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+  withLookingForStuckThread(true).build();
   private Admin admin = null;
   private MiniHBaseCluster cluster = null;
   private static final int NB_SERVERS = 3;
@@ -150,8 +154,11 @@ public class TestSplitTransactionOnCluster {
   throws IOException, InterruptedException {
 assertEquals(1, regions.size());
 HRegionInfo hri = regions.get(0).getRegionInfo();
-cluster.getMaster().getAssignmentManager()
-  .waitOnRegionToClearRegionsInTransition(hri, 60);
+try {
+ 

hbase git commit: HBASE-18077 Update JUnit licensing to use EPL

2017-05-23 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 71bf5afa3 -> 6ee7a4932


HBASE-18077 Update JUnit licensing to use EPL

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6ee7a493
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6ee7a493
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6ee7a493

Branch: refs/heads/branch-1.2
Commit: 6ee7a4932ab0a24956168d6482c30712a247a17a
Parents: 71bf5af
Author: Mike Drob 
Authored: Thu May 18 19:16:56 2017 -0700
Committer: Sean Busbey 
Committed: Tue May 23 10:33:29 2017 -0500

--
 .../src/main/resources/META-INF/LICENSE.vm  | 264 ++-
 .../src/main/resources/supplemental-models.xml  |   5 +-
 2 files changed, 266 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6ee7a493/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
--
diff --git a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm 
b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
index f403c89..9f9afb5 100644
--- a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
+++ b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
@@ -1551,6 +1551,8 @@ You can redistribute it and/or modify it under either the 
terms of the GPL
 #set($bsd3 = [])
 ## gather up CPL 1.0 works
 #set($cpl = [])
+## gather up EPL 1.0 works
+#set($epl = [])
 ## track commons-math
 #set($commons-math-two = false)
 #set($commons-math-three = false)
@@ -1561,7 +1563,9 @@ You can redistribute it and/or modify it under either the 
terms of the GPL
 ## Whitelist of licenses that it's safe to not aggregate as above.
 ## Note that this doesn't include ALv2 or the aforementioned aggregate
 ## license mentions.
-#set($non_aggregate_fine = [ 'Public Domain', 'New BSD license', 'BSD 
license', 'Mozilla Public License Version 2.0' ])
+##
+## See this FAQ link for justifications: 
https://www.apache.org/legal/resolved.html
+#set($non_aggregate_fine = [ 'Public Domain', 'New BSD license', 'BSD 
license', 'Mozilla Public License Version 1.1', 'Mozilla Public License Version 
2.0', 'Creative Commons Attribution License, Version 2.5' ])
 ## include LICENSE sections for anything not under ASL2.0
 #foreach( ${dep} in ${projects} )
 #if(${debug-print-included-work-info.equalsIgnoreCase("true")})
@@ -1637,6 +1641,9 @@ ${dep.scm.url}
 #if(${dep.licenses[0].name.equals("Common Public License Version 1.0")})
 #set($aggregated = $cpl.add($dep))
 #end
+#if(${dep.licenses[0].name.equals("Eclipse Public License 1.0")})
+#set($aggregated = $epl.add($dep))
+#end
 #if(!${aggregated})
 --
 This product includes ${dep.name} licensed under the ${dep.licenses[0].name}.
@@ -2563,4 +2570,259 @@ Common Public License - v 1.0
 #if($jruby)
 #jruby_license()
 #end
+#if(!(${epl.isEmpty()}))
+
+## print all the EPL 1.0 licensed works
+This product includes the following works licensed under the Eclipse Public 
License 1.0:
+
+#foreach($dep in $epl)
+#if( $dep.licenses[0].comments && !$dep.licenses[0].comments.empty )
+  * ${dep.name}, ${dep.licenses[0].comments}
+#else
+  * ${dep.name}
+#end
+#end
+
+  Eclipse Public License - v 1.0
+
+  THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE
+  PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
+  OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+  1. DEFINITIONS
+
+  "Contribution" means:
+
+  a) in the case of the initial Contributor, the initial code and
+ documentation distributed under this Agreement, and
+
+  b) in the case of each subsequent Contributor:
+
+  i) changes to the Program, and
+
+  ii) additions to the Program;
+  where such changes and/or additions to the Program
+  originate from and are distributed by that particular
+  Contributor. A Contribution 'originates' from a
+  Contributor if it was added to the Program by such
+  Contributor itself or anyone acting on such
+  Contributor's behalf. Contributions do not include
+  additions to the Program which: (i) are separate modules
+  of software distributed in conjunction with the Program
+  under their own license agreement, and (ii) are not
+  derivative works of the Program.
+
+  "Contributor" means any person or entity that distributes the Program.
+
+  "Licensed Patents" mean patent claims licensable by a Contributor
+  which are necessarily infringed by the use or sale of its
+  Contribution alone or when combined with the Program.
+
+  "Program" means the Contributions 

hbase git commit: HBASE-18077 Update JUnit licensing to use EPL

2017-05-23 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 9d21e89b0 -> d1b1eab10


HBASE-18077 Update JUnit licensing to use EPL

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d1b1eab1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d1b1eab1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d1b1eab1

Branch: refs/heads/branch-1.3
Commit: d1b1eab1036c3f69d06d573f52028aea00cdac5b
Parents: 9d21e89
Author: Mike Drob 
Authored: Thu May 18 19:16:56 2017 -0700
Committer: Sean Busbey 
Committed: Tue May 23 10:22:27 2017 -0500

--
 .../src/main/resources/META-INF/LICENSE.vm  | 264 ++-
 .../src/main/resources/supplemental-models.xml  |   5 +-
 2 files changed, 266 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d1b1eab1/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
--
diff --git a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm 
b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
index f403c89..9f9afb5 100644
--- a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
+++ b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
@@ -1551,6 +1551,8 @@ You can redistribute it and/or modify it under either the 
terms of the GPL
 #set($bsd3 = [])
 ## gather up CPL 1.0 works
 #set($cpl = [])
+## gather up EPL 1.0 works
+#set($epl = [])
 ## track commons-math
 #set($commons-math-two = false)
 #set($commons-math-three = false)
@@ -1561,7 +1563,9 @@ You can redistribute it and/or modify it under either the 
terms of the GPL
 ## Whitelist of licenses that it's safe to not aggregate as above.
 ## Note that this doesn't include ALv2 or the aforementioned aggregate
 ## license mentions.
-#set($non_aggregate_fine = [ 'Public Domain', 'New BSD license', 'BSD 
license', 'Mozilla Public License Version 2.0' ])
+##
+## See this FAQ link for justifications: 
https://www.apache.org/legal/resolved.html
+#set($non_aggregate_fine = [ 'Public Domain', 'New BSD license', 'BSD 
license', 'Mozilla Public License Version 1.1', 'Mozilla Public License Version 
2.0', 'Creative Commons Attribution License, Version 2.5' ])
 ## include LICENSE sections for anything not under ASL2.0
 #foreach( ${dep} in ${projects} )
 #if(${debug-print-included-work-info.equalsIgnoreCase("true")})
@@ -1637,6 +1641,9 @@ ${dep.scm.url}
 #if(${dep.licenses[0].name.equals("Common Public License Version 1.0")})
 #set($aggregated = $cpl.add($dep))
 #end
+#if(${dep.licenses[0].name.equals("Eclipse Public License 1.0")})
+#set($aggregated = $epl.add($dep))
+#end
 #if(!${aggregated})
 --
 This product includes ${dep.name} licensed under the ${dep.licenses[0].name}.
@@ -2563,4 +2570,259 @@ Common Public License - v 1.0
 #if($jruby)
 #jruby_license()
 #end
+#if(!(${epl.isEmpty()}))
+
+## print all the EPL 1.0 licensed works
+This product includes the following works licensed under the Eclipse Public 
License 1.0:
+
+#foreach($dep in $epl)
+#if( $dep.licenses[0].comments && !$dep.licenses[0].comments.empty )
+  * ${dep.name}, ${dep.licenses[0].comments}
+#else
+  * ${dep.name}
+#end
+#end
+
+  Eclipse Public License - v 1.0
+
+  THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE
+  PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
+  OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+  1. DEFINITIONS
+
+  "Contribution" means:
+
+  a) in the case of the initial Contributor, the initial code and
+ documentation distributed under this Agreement, and
+
+  b) in the case of each subsequent Contributor:
+
+  i) changes to the Program, and
+
+  ii) additions to the Program;
+  where such changes and/or additions to the Program
+  originate from and are distributed by that particular
+  Contributor. A Contribution 'originates' from a
+  Contributor if it was added to the Program by such
+  Contributor itself or anyone acting on such
+  Contributor's behalf. Contributions do not include
+  additions to the Program which: (i) are separate modules
+  of software distributed in conjunction with the Program
+  under their own license agreement, and (ii) are not
+  derivative works of the Program.
+
+  "Contributor" means any person or entity that distributes the Program.
+
+  "Licensed Patents" mean patent claims licensable by a Contributor
+  which are necessarily infringed by the use or sale of its
+  Contribution alone or when combined with the Program.
+
+  "Program" means the Contributions 

hbase git commit: HBASE-18077 Update JUnit licensing to use EPL

2017-05-23 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4249a1f3d -> abf03da41


HBASE-18077 Update JUnit licensing to use EPL

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/abf03da4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/abf03da4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/abf03da4

Branch: refs/heads/branch-1
Commit: abf03da41df57faf8e6426a58822ddf414ba9561
Parents: 4249a1f
Author: Mike Drob 
Authored: Thu May 18 19:16:56 2017 -0700
Committer: Sean Busbey 
Committed: Tue May 23 10:20:44 2017 -0500

--
 .../src/main/resources/META-INF/LICENSE.vm  | 264 ++-
 .../src/main/resources/supplemental-models.xml  |   5 +-
 2 files changed, 266 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/abf03da4/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
--
diff --git a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm 
b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
index f403c89..9f9afb5 100644
--- a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
+++ b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
@@ -1551,6 +1551,8 @@ You can redistribute it and/or modify it under either the 
terms of the GPL
 #set($bsd3 = [])
 ## gather up CPL 1.0 works
 #set($cpl = [])
+## gather up EPL 1.0 works
+#set($epl = [])
 ## track commons-math
 #set($commons-math-two = false)
 #set($commons-math-three = false)
@@ -1561,7 +1563,9 @@ You can redistribute it and/or modify it under either the 
terms of the GPL
 ## Whitelist of licenses that it's safe to not aggregate as above.
 ## Note that this doesn't include ALv2 or the aforementioned aggregate
 ## license mentions.
-#set($non_aggregate_fine = [ 'Public Domain', 'New BSD license', 'BSD 
license', 'Mozilla Public License Version 2.0' ])
+##
+## See this FAQ link for justifications: 
https://www.apache.org/legal/resolved.html
+#set($non_aggregate_fine = [ 'Public Domain', 'New BSD license', 'BSD 
license', 'Mozilla Public License Version 1.1', 'Mozilla Public License Version 
2.0', 'Creative Commons Attribution License, Version 2.5' ])
 ## include LICENSE sections for anything not under ASL2.0
 #foreach( ${dep} in ${projects} )
 #if(${debug-print-included-work-info.equalsIgnoreCase("true")})
@@ -1637,6 +1641,9 @@ ${dep.scm.url}
 #if(${dep.licenses[0].name.equals("Common Public License Version 1.0")})
 #set($aggregated = $cpl.add($dep))
 #end
+#if(${dep.licenses[0].name.equals("Eclipse Public License 1.0")})
+#set($aggregated = $epl.add($dep))
+#end
 #if(!${aggregated})
 --
 This product includes ${dep.name} licensed under the ${dep.licenses[0].name}.
@@ -2563,4 +2570,259 @@ Common Public License - v 1.0
 #if($jruby)
 #jruby_license()
 #end
+#if(!(${epl.isEmpty()}))
+
+## print all the EPL 1.0 licensed works
+This product includes the following works licensed under the Eclipse Public 
License 1.0:
+
+#foreach($dep in $epl)
+#if( $dep.licenses[0].comments && !$dep.licenses[0].comments.empty )
+  * ${dep.name}, ${dep.licenses[0].comments}
+#else
+  * ${dep.name}
+#end
+#end
+
+  Eclipse Public License - v 1.0
+
+  THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE
+  PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
+  OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+  1. DEFINITIONS
+
+  "Contribution" means:
+
+  a) in the case of the initial Contributor, the initial code and
+ documentation distributed under this Agreement, and
+
+  b) in the case of each subsequent Contributor:
+
+  i) changes to the Program, and
+
+  ii) additions to the Program;
+  where such changes and/or additions to the Program
+  originate from and are distributed by that particular
+  Contributor. A Contribution 'originates' from a
+  Contributor if it was added to the Program by such
+  Contributor itself or anyone acting on such
+  Contributor's behalf. Contributions do not include
+  additions to the Program which: (i) are separate modules
+  of software distributed in conjunction with the Program
+  under their own license agreement, and (ii) are not
+  derivative works of the Program.
+
+  "Contributor" means any person or entity that distributes the Program.
+
+  "Licensed Patents" mean patent claims licensable by a Contributor
+  which are necessarily infringed by the use or sale of its
+  Contribution alone or when combined with the Program.
+
+  "Program" means the Contributions 

hbase git commit: HBASE-18077 Update JUnit licensing to use EPL

2017-05-23 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/master 1d0295f4e -> 9e7b0c1a4


HBASE-18077 Update JUnit licensing to use EPL

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e7b0c1a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e7b0c1a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e7b0c1a

Branch: refs/heads/master
Commit: 9e7b0c1a4f24feeecb55498d7926596af9fc284a
Parents: 1d0295f
Author: Mike Drob 
Authored: Thu May 18 19:16:56 2017 -0700
Committer: Sean Busbey 
Committed: Tue May 23 10:12:15 2017 -0500

--
 .../src/main/resources/META-INF/LICENSE.vm  | 262 ++-
 .../src/main/resources/supplemental-models.xml  |   5 +-
 2 files changed, 264 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e7b0c1a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
--
diff --git a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm 
b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
index a9f0c81..29dd9d5 100644
--- a/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
+++ b/hbase-resource-bundle/src/main/resources/META-INF/LICENSE.vm
@@ -1551,6 +1551,8 @@ You can redistribute it and/or modify it under either the 
terms of the GPL
 #set($bsd3 = [])
 ## gather up CPL 1.0 works
 #set($cpl = [])
+## gather up EPL 1.0 works
+#set($epl = [])
 ## track commons-math
 #set($commons-math-two = false)
 #set($commons-math-three = false)
@@ -1563,7 +1565,7 @@ You can redistribute it and/or modify it under either the 
terms of the GPL
 ## license mentions.
 ##
 ## See this FAQ link for justifications: 
https://www.apache.org/legal/resolved.html
-#set($non_aggregate_fine = [ 'Public Domain', 'New BSD license', 'BSD 
license', 'Mozilla Public License Version 1.1', 'Mozilla Public License Version 
2.0', 'Creative Commons Attribution License, Version 2.5', 'Eclipse Public 
License 1.0'])
+#set($non_aggregate_fine = [ 'Public Domain', 'New BSD license', 'BSD 
license', 'Mozilla Public License Version 1.1', 'Mozilla Public License Version 
2.0', 'Creative Commons Attribution License, Version 2.5' ])
 ## include LICENSE sections for anything not under ASL2.0
 #foreach( ${dep} in ${projects} )
 ## if there are no licenses we'll fail the build later, so
@@ -1647,6 +1649,9 @@ ${dep.scm.url}
 #if(${dep.licenses[0].name.equals("Common Public License Version 1.0")})
 #set($aggregated = $cpl.add($dep))
 #end
+#if(${dep.licenses[0].name.equals("Eclipse Public License 1.0")})
+#set($aggregated = $epl.add($dep))
+#end
 #if(!${aggregated})
 --
 This product includes ${dep.name} licensed under the ${dep.licenses[0].name}.
@@ -2573,4 +2578,259 @@ Common Public License - v 1.0
 #if($jruby)
 #jruby_license()
 #end
+#if(!(${epl.isEmpty()}))
+
+## print all the EPL 1.0 licensed works
+This product includes the following works licensed under the Eclipse Public 
License 1.0:
+
+#foreach($dep in $epl)
+#if( $dep.licenses[0].comments && !$dep.licenses[0].comments.empty )
+  * ${dep.name}, ${dep.licenses[0].comments}
+#else
+  * ${dep.name}
+#end
+#end
+
+  Eclipse Public License - v 1.0
+
+  THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE
+  PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
+  OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+  1. DEFINITIONS
+
+  "Contribution" means:
+
+  a) in the case of the initial Contributor, the initial code and
+ documentation distributed under this Agreement, and
+
+  b) in the case of each subsequent Contributor:
+
+  i) changes to the Program, and
+
+  ii) additions to the Program;
+  where such changes and/or additions to the Program
+  originate from and are distributed by that particular
+  Contributor. A Contribution 'originates' from a
+  Contributor if it was added to the Program by such
+  Contributor itself or anyone acting on such
+  Contributor's behalf. Contributions do not include
+  additions to the Program which: (i) are separate modules
+  of software distributed in conjunction with the Program
+  under their own license agreement, and (ii) are not
+  derivative works of the Program.
+
+  "Contributor" means any person or entity that distributes the Program.
+
+  "Licensed Patents" mean patent claims licensable by a Contributor
+  which are necessarily infringed by the use or sale of its
+  Contribution alone or when combined with the Program.
+
+  "Program" means the Contributions distributed in accordance with
+  

[29/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index 85f90d1..85d7b38 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class ConnectionImplementation.MasterServiceState
+static class ConnectionImplementation.MasterServiceState
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 State of the MasterService connection/setup.
 
@@ -222,7 +222,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 connection
-Connection connection
+Connection connection
 
 
 
@@ -231,7 +231,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 stub
-org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 stub
+org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 stub
 
 
 
@@ -240,7 +240,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 userCount
-int userCount
+int userCount
 
 
 
@@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MasterServiceState
-MasterServiceState(Connectionconnection)
+MasterServiceState(Connectionconnection)
 
 
 
@@ -274,7 +274,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -287,7 +287,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getStub
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">ObjectgetStub()
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">ObjectgetStub()
 
 
 
@@ -296,7 +296,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 clearStub
-voidclearStub()
+voidclearStub()
 
 
 
@@ -305,7 +305,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isMasterRunning
-booleanisMasterRunning()
+booleanisMasterRunning()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
index 0a8e5ea..846a73b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class ConnectionImplementation.MasterServiceStubMaker
+private final class ConnectionImplementation.MasterServiceStubMaker
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Class to make a MasterServiceStubMaker stub.
 
@@ -197,7 +197,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MasterServiceStubMaker
-privateMasterServiceStubMaker()
+privateMasterServiceStubMaker()
 
 
 
@@ -214,7 +214,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isMasterRunning
-privatevoidisMasterRunning(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterfacestub)

[11/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/master/MetricsMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MetricsMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/MetricsMaster.html
index c2fd82e..099dbd7 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MetricsMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MetricsMaster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -145,6 +145,10 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 masterProcSource
 
 
+private MetricsMasterQuotaSource
+masterQuotaSource
+
+
 private MetricsMasterSource
 masterSource
 
@@ -185,13 +189,47 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 getMetricsProcSource()
 
 
+MetricsMasterQuotaSource
+getMetricsQuotaSource()
+
+
 MetricsMasterSource
 getMetricsSource()
 
-
+
+void
+incrementQuotaObserverTime(longexecutionTime)
+Sets the execution time of a period of the 
QuotaObserverChore.
+
+
+
 void
 incrementRequests(longinc)
 
+
+void
+setNumNamespacesInSpaceQuotaViolation(longnumNamespacesInViolation)
+Sets the number of namespaces in violation of a space 
quota.
+
+
+
+void
+setNumRegionSizeReports(longnumRegionReports)
+Sets the number of region size reports the master currently 
has in memory.
+
+
+
+void
+setNumSpaceQuotas(longnumSpaceQuotas)
+Sets the number of space quotas defined.
+
+
+
+void
+setNumTableInSpaceQuotaViolation(longnumTablesInViolation)
+Sets the number of table in violation of a space 
quota.
+
+
 
 
 
@@ -235,12 +273,21 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 masterProcSource
 privateMetricsMasterProcSource masterProcSource
 
 
+
+
+
+
+
+masterQuotaSource
+privateMetricsMasterQuotaSource masterQuotaSource
+
+
 
 
 
@@ -255,7 +302,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MetricsMaster
-publicMetricsMaster(MetricsMasterWrappermasterWrapper)
+publicMetricsMaster(MetricsMasterWrappermasterWrapper)
 
 
 
@@ -272,7 +319,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getMetricsSource
-publicMetricsMasterSourcegetMetricsSource()
+publicMetricsMasterSourcegetMetricsSource()
 
 
 
@@ -281,22 +328,103 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getMetricsProcSource
-publicMetricsMasterProcSourcegetMetricsProcSource()
+publicMetricsMasterProcSourcegetMetricsProcSource()
+
+
+
+
+
+
+
+getMetricsQuotaSource
+publicMetricsMasterQuotaSourcegetMetricsQuotaSource()
 
 
 
 
 
-
+
 
 incrementRequests
-publicvoidincrementRequests(longinc)
+publicvoidincrementRequests(longinc)
 
 Parameters:
 inc - How much to add to requests.
 
 
 
+
+
+
+
+
+setNumSpaceQuotas
+publicvoidsetNumSpaceQuotas(longnumSpaceQuotas)
+Sets the number of space quotas defined.
+
+See Also:
+MetricsMasterQuotaSource.updateNumSpaceQuotas(long)
+
+
+
+
+
+
+
+
+setNumTableInSpaceQuotaViolation
+publicvoidsetNumTableInSpaceQuotaViolation(longnumTablesInViolation)
+Sets the number of table in violation of a space 
quota.
+
+See Also:
+MetricsMasterQuotaSource.updateNumTablesInSpaceQuotaViolation(long)
+
+
+
+
+
+
+
+
+setNumNamespacesInSpaceQuotaViolation
+publicvoidsetNumNamespacesInSpaceQuotaViolation(longnumNamespacesInViolation)
+Sets the number of namespaces in violation of a space 
quota.
+
+See Also:
+MetricsMasterQuotaSource.updateNumNamespacesInSpaceQuotaViolation(long)
+
+
+
+
+
+
+
+
+setNumRegionSizeReports
+publicvoidsetNumRegionSizeReports(longnumRegionReports)
+Sets the number of region size reports the master currently 
has in memory.
+
+See Also:
+MetricsMasterQuotaSource.updateNumCurrentSpaceQuotaRegionSizeReports(long)
+
+
+
+
+
+
+
+
+incrementQuotaObserverTime
+publicvoidincrementQuotaObserverTime(longexecutionTime)
+Sets the execution time of a period of the 
QuotaObserverChore.
+
+Parameters:
+executionTime - The execution time in milliseconds.
+See Also:
+MetricsMasterQuotaSource.incrementSpaceQuotaObserverChoreTime(long)
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.html 
b/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.html
index 79f6ed7..6059339 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.html
+++ 

hbase-site git commit: INFRA-10751 Empty commit

2017-05-23 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site dab57116f -> 0d37a65d5


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/0d37a65d
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/0d37a65d
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/0d37a65d

Branch: refs/heads/asf-site
Commit: 0d37a65d51dbef3f6fce5f05ebd4320d718b3d11
Parents: dab5711
Author: jenkins 
Authored: Tue May 23 15:01:28 2017 +
Committer: jenkins 
Committed: Tue May 23 15:01:28 2017 +

--

--




[36/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
 
b/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
index 3e22b3c..afeafa2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class BackupCommands.CreateCommand
+public static class BackupCommands.CreateCommand
 extends BackupCommands.Command
 
 
@@ -245,7 +245,7 @@ extends 
 
 CreateCommand
-CreateCommand(org.apache.hadoop.conf.Configurationconf,
+CreateCommand(org.apache.hadoop.conf.Configurationconf,
   org.apache.commons.cli.CommandLinecmdline)
 
 
@@ -263,7 +263,7 @@ extends 
 
 requiresNoActiveSession
-protectedbooleanrequiresNoActiveSession()
+protectedbooleanrequiresNoActiveSession()
 Description copied from 
class:BackupCommands.Command
 The command can't be run if active backup session is in 
progress
 
@@ -280,7 +280,7 @@ extends 
 
 execute
-publicvoidexecute()
+publicvoidexecute()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Overrides:
@@ -296,7 +296,7 @@ extends 
 
 verifyPath
-privatebooleanverifyPath(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringpath)
+privatebooleanverifyPath(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringpath)
 
 
 
@@ -305,7 +305,7 @@ extends 
 
 getTablesForSet
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetTablesForSet(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetTablesForSet(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
org.apache.hadoop.conf.Configurationconf)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -320,7 +320,7 @@ extends 
 
 printUsage
-protectedvoidprintUsage()
+protectedvoidprintUsage()
 
 Specified by:
 printUsagein
 classBackupCommands.Command

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.DeleteCommand.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.DeleteCommand.html
 
b/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.DeleteCommand.html
index 0d98fbd..cb2c25b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.DeleteCommand.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.DeleteCommand.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class BackupCommands.DeleteCommand
+private static class BackupCommands.DeleteCommand
 extends BackupCommands.Command
 
 
@@ -236,7 +236,7 @@ extends 
 
 DeleteCommand
-DeleteCommand(org.apache.hadoop.conf.Configurationconf,
+DeleteCommand(org.apache.hadoop.conf.Configurationconf,
   org.apache.commons.cli.CommandLinecmdline)
 
 
@@ -254,7 +254,7 @@ extends 
 
 requiresNoActiveSession
-protectedbooleanrequiresNoActiveSession()
+protectedbooleanrequiresNoActiveSession()
 Description copied from 
class:BackupCommands.Command
 The command can't be run if active backup session is in 
progress
 
@@ -271,7 +271,7 @@ extends 
 
 execute
-publicvoidexecute()
+publicvoidexecute()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Overrides:
@@ -287,7 +287,7 @@ extends 
 
 printUsage
-protectedvoidprintUsage()
+protectedvoidprintUsage()
 
 Specified by:
 printUsagein
 classBackupCommands.Command

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.DescribeCommand.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.DescribeCommand.html
 

[44/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/apidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
index 7231169..61b88c6 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
@@ -31,468 +31,483 @@
 023import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
 024import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
 025import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.*;
-026import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
-027import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
-028import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
-029import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
-030import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
-031import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
-032import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
-033import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
-036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
-037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
-038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
-039import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
-040
-041/**
-042 * A short-circuit connection that can 
bypass the RPC layer (serialization, deserialization,
-043 * networking, etc..) when talking to a 
local master
-044 */
-045@InterfaceAudience.Public
-046public class ShortCircuitMasterConnection 
implements MasterKeepAliveConnection {
-047
-048  private final 
MasterService.BlockingInterface stub;
-049
-050  public 
ShortCircuitMasterConnection(MasterService.BlockingInterface stub) {
-051this.stub = stub;
-052  }
+026import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
+027import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
+028import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
+029import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
+030import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
+031import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
+032import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
+033import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
+034import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
+035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
+036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
+037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
+038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
+039import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
+040import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
+041import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
+042import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
+043import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
+044
+045/**
+046 * A short-circuit connection that can 
bypass the RPC layer 

[09/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.html 
b/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.html
index d2e7265..b34b28b 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MetricsMasterWrapperImpl
+public class MetricsMasterWrapperImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements MetricsMasterWrapper
 Impl for exposing HMaster Information through JMX
@@ -173,90 +173,106 @@ implements Method and Description
 
 
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
 title="class or interface in java.util">Map.Entryhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+convertSnapshot(SpaceQuotaSnapshotsnapshot)
+
+
 long
 getActiveTime()
 Get the hbase master active time
 
 
-
+
 double
 getAverageLoad()
 Get Average Load
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getClusterId()
 Get the Cluster ID
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]
 getCoprocessors()
 Get the co-processors
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getDeadRegionServers()
 Get the dead region servers
 
 
-
+
 boolean
 getIsActiveMaster()
 Whether this master is the active master
 
 
-
+
 long
 getMergePlanCount()
 Get the number of region merge plans executed.
 
 
-
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
 title="class or interface in java.util">Map.Entryhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+getNamespaceSpaceUtilization()
+Gets the space usage and limit for each namespace.
+
+
+
 int
 getNumDeadRegionServers()
 Get the number of dead region servers
 
 
-
+
 int
 getNumRegionServers()
 Get the number of live region servers
 
 
-
+
 long
 getNumWALFiles()
 Get the number of master WAL files.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getRegionServers()
 Get the live region servers
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getServerName()
 Get ServerName
 
 
-
+
 long
 getSplitPlanCount()
 Get the number of region split plans executed.
 
 
-
+
 long
 getStartTime()
 Get hbase master start time
 
 
-
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
 title="class or interface in java.util">Map.Entryhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+getTableSpaceUtilization()
+Gets the space usage and limit for each table.
+
+
+
 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.CallWriteListener.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.CallWriteListener.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.CallWriteListener.html
deleted file mode 100644
index 9cc7e38..000
--- 
a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.CallWriteListener.html
+++ /dev/null
@@ -1,331 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-NettyRpcServer.CallWriteListener (Apache HBase 2.0.0-SNAPSHOT 
API)
-
-
-
-
-
-var methods = {"i0":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.ipc
-Class 
NettyRpcServer.CallWriteListener
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.ipc.NettyRpcServer.CallWriteListener
-
-
-
-
-
-
-
-All Implemented Interfaces:
-io.netty.channel.ChannelFutureListener, 
io.netty.util.concurrent.GenericFutureListenerio.netty.channel.ChannelFuture,
 http://docs.oracle.com/javase/8/docs/api/java/util/EventListener.html?is-external=true;
 title="class or interface in java.util">EventListener
-
-
-Enclosing class:
-NettyRpcServer
-
-
-
-private class NettyRpcServer.CallWriteListener
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-implements io.netty.channel.ChannelFutureListener
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private NettyServerCall
-call
-
-
-
-
-
-
-Fields inherited from 
interfaceio.netty.channel.ChannelFutureListener
-CLOSE, CLOSE_ON_FAILURE, FIRE_EXCEPTION_ON_FAILURE
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-CallWriteListener(NettyServerCallcall)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-void
-operationComplete(io.netty.channel.ChannelFuturefuture)
-
-
-
-
-
-
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-Field Detail
-
-
-
-
-
-call
-privateNettyServerCall call
-
-
-
-
-

[38/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index aeb7adf..91644d7 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -452,6 +452,12 @@
 
 AbstractStateMachineTableProcedure(MasterProcedureEnv,
 ProcedurePrepareLatch) - Constructor for class 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure
 
+AbstractViolationPolicyEnforcement - Class in 
org.apache.hadoop.hbase.quotas.policies
+
+Abstract implementation for SpaceViolationPolicyEnforcement.
+
+AbstractViolationPolicyEnforcement()
 - Constructor for class org.apache.hadoop.hbase.quotas.policies.AbstractViolationPolicyEnforcement
+
 accept(Path)
 - Method in class org.apache.hadoop.hbase.backup.impl.IncrementalBackupManager.NewestLogFilter
 
 accept(Void,
 Throwable) - Method in class org.apache.hadoop.hbase.client.AsyncHBaseAdmin.ProcedureBiConsumer
@@ -829,6 +835,17 @@
 
 activeOperations
 - Variable in class org.apache.hadoop.hbase.regionserver.throttle.PressureAwareThroughputController
 
+activePolicies
 - Variable in class org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement
+
+ActivePolicyEnforcement - Class in org.apache.hadoop.hbase.quotas
+
+A class to ease dealing with tables that have and do not 
have violation policies
+ being enforced.
+
+ActivePolicyEnforcement(MapTableName,
 SpaceViolationPolicyEnforcement, MapTableName, SpaceQuotaSnapshot, 
RegionServerServices) - Constructor for class 
org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement
+
+ActivePolicyEnforcement(MapTableName,
 SpaceViolationPolicyEnforcement, MapTableName, SpaceQuotaSnapshot, 
RegionServerServices, SpaceViolationPolicyEnforcementFactory) - 
Constructor for class org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement
+
 activeReadHandlerCount
 - Variable in class org.apache.hadoop.hbase.ipc.RWQueueRpcExecutor
 
 activeScanHandlerCount
 - Variable in class org.apache.hadoop.hbase.ipc.RWQueueRpcExecutor
@@ -1936,6 +1953,10 @@
 
 addNamespaceQuota(Connection,
 String, QuotaProtos.Quotas) - Static method in class 
org.apache.hadoop.hbase.quotas.QuotaUtil
 
+addNamespaceQuotaTable(TableName)
 - Method in class org.apache.hadoop.hbase.quotas.QuotaObserverChore.TablesWithQuotas
+
+Adds a table with a namespace quota.
+
 addNewMetricIfAbsent(String,
 T, ClassT) - Method in class 
org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry
 
 addNewTableEntryInMap(MapString,
 ListPairbyte[], ListString, byte[], String, 
String) - Method in class 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSink
@@ -2105,6 +2126,8 @@
 
 addRegionServer(Configuration,
 int, User) - Method in class org.apache.hadoop.hbase.LocalHBaseCluster
 
+addRegionSize(HRegionInfo,
 long, long) - Method in class org.apache.hadoop.hbase.quotas.MasterQuotaManager
+
 addRegionSorted(int[],
 int) - Method in class org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster
 
 addRegionsToMeta(MasterProcedureEnv)
 - Method in class org.apache.hadoop.hbase.master.procedure.CloneSnapshotProcedure
@@ -2314,6 +2337,8 @@
 
 addShutdownHook(Thread,
 int) - Method in class org.apache.hadoop.hbase.util.ShutdownHookManager.ShutdownHookManagerV2
 
+addSingleFile(FileSystem,
 String) - Method in class 
org.apache.hadoop.hbase.quotas.policies.DefaultViolationPolicyEnforcement
+
 addSize(RpcCallContext,
 Result, Object) - Method in class 
org.apache.hadoop.hbase.regionserver.RSRpcServices
 
 Method to account for the size of retained cells and 
retained data blocks.
@@ -2384,6 +2409,10 @@
 
 addTableQuota(Connection,
 TableName, QuotaProtos.Quotas) - Static method in class 
org.apache.hadoop.hbase.quotas.QuotaUtil
 
+addTableQuotaTable(TableName)
 - Method in class org.apache.hadoop.hbase.quotas.QuotaObserverChore.TablesWithQuotas
+
+Adds a table with a table quota.
+
 addTables(TableName[])
 - Method in class org.apache.hadoop.hbase.backup.BackupInfo
 
 addTableToMeta(MasterProcedureEnv,
 HTableDescriptor, ListHRegionInfo) - Static method in class 
org.apache.hadoop.hbase.master.procedure.CreateTableProcedure
@@ -2746,6 +2775,8 @@
 
 allChannels
 - Variable in class org.apache.hadoop.hbase.ipc.NettyRpcServer
 
+allChannels
 - Variable in class org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder
+
 allCompactedFilesCached
 - Variable in class org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.State
 
 allFiles
 - Variable in class org.apache.hadoop.hbase.master.MasterMobCompactionThread.CompactionRunner
@@ -2983,7 +3014,7 @@
 
 Perform one or more append operations on a row.
 
-append(Region,
 OperationQuota, ClientProtos.MutationProto, CellScanner, long) - 
Method in class org.apache.hadoop.hbase.regionserver.RSRpcServices
+append(Region,
 OperationQuota, ClientProtos.MutationProto, CellScanner, long, 
ActivePolicyEnforcement) - Method in class 

[35/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html
 
b/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html
index 3dc0a70..b9ec622 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html
@@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
 
 
 Summary:
-Nested|
+Nested|
 Field|
 Constr|
 Method
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class IncrementalTableBackupClient
+public class IncrementalTableBackupClient
 extends TableBackupClient
 Incremental backup implementation.
  See the execute
 method.
@@ -125,6 +125,21 @@ extends 
 
 
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.backup.impl.TableBackupClient
+TableBackupClient.Stage
+
+
+
 
 
 
@@ -147,7 +162,7 @@ extends TableBackupClient
-BACKUP_CLIENT_IMPL_CLASS,
 backupId,
 backupInfo,
 backupManager,
 conf,
 conn,
 newTimestamps,
 tableList
+BACKUP_CLIENT_IMPL_CLASS,
 BACKUP_TEST_MODE_STAGE,
 backupId,
 backupInfo,
 backupManager,
 conf,
 conn,
 newTimestamps,
 tableList
 
 
 
@@ -160,10 +175,16 @@ extends 
 Constructors
 
-Constructor and Description
+Modifier
+Constructor and Description
 
 
-IncrementalTableBackupClient(Connectionconn,
+protected 
+IncrementalTableBackupClient()
+
+
+
+IncrementalTableBackupClient(Connectionconn,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringbackupId,
 BackupRequestrequest)
 
@@ -243,7 +264,7 @@ extends TableBackupClient
-addManifest,
 beginBackup,
 cleanupAndRestoreBackupSystem,
 cleanupDistCpLog,
 cleanupExportSnapshotLog,
 cleanupTargetDir,
 completeBackup,
 deleteBackupTableSnapshot,
 deleteSnapshots,
 failBackup,
 getMessage,
 init,
 obtainBackupMetaDataStr, restoreBackupTable,
 snapshotBackupTable,
 snapshotExists
+addManifest,
 beginBackup,
 cleanupAndRestoreBackupSystem,
 cleanupDistCpLog,
 cleanupExportSnapshotLog,
 cleanupTargetDir,
 completeBackup,
 deleteBackupTableSnapshot,
 deleteSnapshots,
 failBackup,
 failStageIf,
 getMessage,
 getTestStage,
 init,
 obtainBackupMetaDataStr,
 restoreBackupTable,
 snapshotBackupTable,
 snapshotExists
 
 
 
@@ -272,7 +293,7 @@ extends 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -283,13 +304,22 @@ extends 
+
+
+
+
+IncrementalTableBackupClient
+protectedIncrementalTableBackupClient()
+
+
 
 
 
 
 
 IncrementalTableBackupClient
-publicIncrementalTableBackupClient(Connectionconn,
+publicIncrementalTableBackupClient(Connectionconn,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringbackupId,
 BackupRequestrequest)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -313,7 +343,7 @@ extends 
 
 filterMissingFiles
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfilterMissingFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringincrBackupFileList)
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfilterMissingFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringincrBackupFileList)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -327,7 +357,7 @@ extends 
 
 isActiveWalPath
-protectedbooleanisActiveWalPath(org.apache.hadoop.fs.Pathp)
+protectedbooleanisActiveWalPath(org.apache.hadoop.fs.Pathp)
 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/quotas/ActivePolicyEnforcement.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/quotas/ActivePolicyEnforcement.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/ActivePolicyEnforcement.html
new file mode 100644
index 000..b31ef0d
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/ActivePolicyEnforcement.html
@@ -0,0 +1,475 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ActivePolicyEnforcement (Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.quotas
+Class 
ActivePolicyEnforcement
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+ @InterfaceStability.Evolving
+public class ActivePolicyEnforcement
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+A class to ease dealing with tables that have and do not 
have violation policies
+ being enforced. This class is immutable, expect for 
locallyCachedPolicies.
+
+ The locallyCachedPolicies are mutable given the current 
activePolicies
+ and snapshots. It is expected that when a new instance of this 
class is
+ instantiated, we also want to invalidate those previously cached policies (as 
they
+ may now be invalidate if we received new quota usage information).
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,SpaceViolationPolicyEnforcement
+activePolicies
+
+
+private SpaceViolationPolicyEnforcementFactory
+factory
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,SpaceViolationPolicyEnforcement
+locallyCachedPolicies
+
+
+private RegionServerServices
+rss
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,SpaceQuotaSnapshot
+snapshots
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+ActivePolicyEnforcement(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,SpaceViolationPolicyEnforcementactivePolicies,
+   http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,SpaceQuotaSnapshotsnapshots,
+   RegionServerServicesrss)
+
+
+ActivePolicyEnforcement(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,SpaceViolationPolicyEnforcementactivePolicies,
+   http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,SpaceQuotaSnapshotsnapshots,
+   RegionServerServicesrss,
+   SpaceViolationPolicyEnforcementFactoryfactory)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,SpaceViolationPolicyEnforcement
+getLocallyCachedPolicies()
+Returns an unmodifiable version of the policy enforcements 
that were cached because they are
+ not in violation of their quota.
+
+
+

[28/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index a6ce1bc..c98e496 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class ConnectionImplementation
+class ConnectionImplementation
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ClusterConnection, http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable
 Main implementation of Connection 
and ClusterConnection interfaces.
@@ -857,7 +857,7 @@ implements 
 
 RETRIES_BY_SERVER_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RETRIES_BY_SERVER_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RETRIES_BY_SERVER_KEY
 
 See Also:
 Constant
 Field Values
@@ -870,7 +870,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -879,7 +879,7 @@ implements 
 
 RESOLVE_HOSTNAME_ON_FAIL_KEY
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RESOLVE_HOSTNAME_ON_FAIL_KEY
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RESOLVE_HOSTNAME_ON_FAIL_KEY
 
 See Also:
 Constant
 Field Values
@@ -892,7 +892,7 @@ implements 
 
 hostnamesCanChange
-private finalboolean hostnamesCanChange
+private finalboolean hostnamesCanChange
 
 
 
@@ -901,7 +901,7 @@ implements 
 
 pause
-private finallong pause
+private finallong pause
 
 
 
@@ -910,7 +910,7 @@ implements 
 
 pauseForCQTBE
-private finallong pauseForCQTBE
+private finallong pauseForCQTBE
 
 
 
@@ -919,7 +919,7 @@ implements 
 
 useMetaReplicas
-privateboolean useMetaReplicas
+privateboolean useMetaReplicas
 
 
 
@@ -928,7 +928,7 @@ implements 
 
 metaReplicaCallTimeoutScanInMicroSecond
-private finalint metaReplicaCallTimeoutScanInMicroSecond
+private finalint metaReplicaCallTimeoutScanInMicroSecond
 
 
 
@@ -937,7 +937,7 @@ implements 
 
 numTries
-private finalint numTries
+private finalint numTries
 
 
 
@@ -946,7 +946,7 @@ implements 
 
 rpcTimeout
-finalint rpcTimeout
+finalint rpcTimeout
 
 
 
@@ -955,7 +955,7 @@ implements 
 
 nonceGenerator
-private static volatileNonceGenerator nonceGenerator
+private static volatileNonceGenerator nonceGenerator
 Global nonceGenerator shared per client.Currently there's 
no reason to limit its scope.
  Once it's set under nonceGeneratorCreateLock, it is never unset or 
changed.
 
@@ -966,7 +966,7 @@ implements 
 
 nonceGeneratorCreateLock
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object nonceGeneratorCreateLock
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object nonceGeneratorCreateLock
 The nonce generator lock. Only taken when creating 
Connection, which gets a private copy.
 
 
@@ -976,7 +976,7 @@ implements 
 
 asyncProcess
-private finalAsyncProcess asyncProcess
+private finalAsyncProcess asyncProcess
 
 
 
@@ -985,7 +985,7 @@ implements 
 
 stats
-private finalServerStatisticTracker stats
+private finalServerStatisticTracker stats
 
 
 
@@ -994,7 +994,7 @@ implements 
 
 closed
-private volatileboolean closed
+private volatileboolean closed
 
 
 
@@ -1003,7 +1003,7 @@ implements 
 
 aborted
-private volatileboolean aborted
+private volatileboolean aborted
 
 
 
@@ -1012,7 +1012,7 @@ implements 
 
 clusterStatusListener
-ClusterStatusListener clusterStatusListener
+ClusterStatusListener clusterStatusListener
 
 
 
@@ -1021,7 +1021,7 @@ implements 
 
 metaRegionLock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object metaRegionLock
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object metaRegionLock
 
 
 
@@ -1030,7 +1030,7 @@ implements 
 
 masterAndZKLock
-private 

[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html
deleted file mode 100644
index 0f15520..000
--- a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html
+++ /dev/null
@@ -1,338 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-NettyRpcServer.MessageEncoder (Apache HBase 2.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.ipc
-Class 
NettyRpcServer.MessageEncoder
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-io.netty.channel.ChannelHandlerAdapter
-
-
-io.netty.channel.ChannelOutboundHandlerAdapter
-
-
-org.apache.hadoop.hbase.ipc.NettyRpcServer.MessageEncoder
-
-
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-io.netty.channel.ChannelHandler, 
io.netty.channel.ChannelOutboundHandler
-
-
-Enclosing class:
-NettyRpcServer
-
-
-
-private class NettyRpcServer.MessageEncoder
-extends io.netty.channel.ChannelOutboundHandlerAdapter
-
-
-
-
-
-
-
-
-
-
-
-Nested Class Summary
-
-
-
-
-Nested classes/interfaces inherited from 
interfaceio.netty.channel.ChannelHandler
-io.netty.channel.ChannelHandler.Sharable
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Modifier
-Constructor and Description
-
-
-private 
-MessageEncoder()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-void
-write(io.netty.channel.ChannelHandlerContextctx,
- http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectmsg,
- io.netty.channel.ChannelPromisepromise)
-
-
-
-
-
-
-Methods inherited from 
classio.netty.channel.ChannelOutboundHandlerAdapter
-bind, close, connect, deregister, disconnect, flush, read
-
-
-
-
-
-Methods inherited from 
classio.netty.channel.ChannelHandlerAdapter
-exceptionCaught, handlerAdded, handlerRemoved, isSharable
-
-
-
-
-
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class 

  1   2   >