hbase git commit: HBASE-17617 Backport HBASE-16731 (Inconsistent results from the Get/Scan if we use the empty FilterList) to branch-1

2017-08-17 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1 6a1a97422 -> deeda60c6


HBASE-17617 Backport HBASE-16731 (Inconsistent results from the Get/Scan if we 
use the empty FilterList) to branch-1

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/deeda60c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/deeda60c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/deeda60c

Branch: refs/heads/branch-1
Commit: deeda60c69f564e3a54d85380deace5aebfd6d94
Parents: 6a1a974
Author: Pankaj Kumar 
Authored: Thu Aug 17 15:58:17 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Aug 17 18:31:57 2017 +0800

--
 .../org/apache/hadoop/hbase/client/Get.java |   5 +
 .../org/apache/hadoop/hbase/client/Query.java   |  35 
 .../org/apache/hadoop/hbase/client/Scan.java|  36 +---
 .../hadoop/hbase/protobuf/ProtobufUtil.java |   9 +-
 .../apache/hadoop/hbase/client/TestScan.java|  32 
 .../hbase/protobuf/generated/ClientProtos.java  | 185 +++
 hbase-protocol/src/main/protobuf/Client.proto   |   1 +
 .../hadoop/hbase/regionserver/HRegion.java  |   3 +
 .../hadoop/hbase/client/TestFromClientSide.java |  44 +
 .../hadoop/hbase/protobuf/TestProtobufUtil.java |   1 +
 10 files changed, 285 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/deeda60c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index 3286d57..f8c34bd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -110,6 +110,7 @@ public class Get extends Query
 this.storeOffset = get.getRowOffsetPerColumnFamily();
 this.tr = get.getTimeRange();
 this.checkExistenceOnly = get.isCheckExistenceOnly();
+this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
 this.closestRowBefore = get.isClosestRowBefore();
 Map> fams = get.getFamilyMap();
 for (Map.Entry> entry : fams.entrySet()) {
@@ -242,6 +243,10 @@ public class Get extends Query
 return this;
   }
 
+  public Get setLoadColumnFamiliesOnDemand(boolean value) {
+return (Get) super.setLoadColumnFamiliesOnDemand(value);
+  }
+
   /**
* Set the maximum number of values to return per row per Column Family
* @param limit the maximum number of values returned / row / CF

http://git-wip-us.apache.org/repos/asf/hbase/blob/deeda60c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index 99d5a6a..7d3b628 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -42,6 +42,7 @@ public abstract class Query extends OperationWithAttributes {
   protected int targetReplicaId = -1;
   protected Consistency consistency = Consistency.STRONG;
   protected Map colFamTimeRangeMap = 
Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
+  protected Boolean loadColumnFamiliesOnDemand = null;
 
   /**
* @return Filter
@@ -177,6 +178,40 @@ public abstract class Query extends 
OperationWithAttributes {
   IsolationLevel.fromBytes(attr);
   }
 
+  /**
+   * Set the value indicating whether loading CFs on demand should be allowed 
(cluster
+   * default is false). On-demand CF loading doesn't load column families 
until necessary, e.g.
+   * if you filter on one column, the other column family data will be loaded 
only for the rows
+   * that are included in result, not all rows like in normal case.
+   * With column-specific filters, like SingleColumnValueFilter 
w/filterIfMissing == true,
+   * this can deliver huge perf gains when there's a cf with lots of data; 
however, it can
+   * also lead to some inconsistent results, as follows:
+   * - if someone does a concurrent update to both column families in question 
you may get a row
+   *   that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { 
video => "my cat" } }
+   *   someone puts rowKey 5 with { cat_videos => 0 }, { video => "my 
dog" }, concurrent scan
+   *   filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 
1 },
+   *   { video => "my dog" } }.
+   * - if there's a concurrent split and you have more than 2 column families, 
some rows may be
+   *   missing some column famil

hbase git commit: HBASE-17617 Backport HBASE-16731 (Inconsistent results from the Get/Scan if we use the empty FilterList) to branch-1

2017-08-17 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 1fdc24764 -> a348d4913


HBASE-17617 Backport HBASE-16731 (Inconsistent results from the Get/Scan if we 
use the empty FilterList) to branch-1

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a348d491
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a348d491
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a348d491

Branch: refs/heads/branch-1.3
Commit: a348d4913d22c22218badc072632eb87d9df89a3
Parents: 1fdc247
Author: Pankaj Kumar 
Authored: Wed Aug 16 23:40:42 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Aug 17 18:29:39 2017 +0800

--
 .../org/apache/hadoop/hbase/client/Get.java |   5 +
 .../org/apache/hadoop/hbase/client/Query.java   |  35 
 .../org/apache/hadoop/hbase/client/Scan.java|  36 +---
 .../hadoop/hbase/protobuf/ProtobufUtil.java |   9 +-
 .../apache/hadoop/hbase/client/TestScan.java|  32 
 .../hbase/protobuf/generated/ClientProtos.java  | 181 ---
 hbase-protocol/src/main/protobuf/Client.proto   |   1 +
 .../hadoop/hbase/regionserver/HRegion.java  |   3 +
 .../hadoop/hbase/client/TestFromClientSide.java |  45 +
 .../hadoop/hbase/protobuf/TestProtobufUtil.java |   1 +
 10 files changed, 284 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a348d491/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index 88da0b0..72ab0ed 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -110,6 +110,7 @@ public class Get extends Query
 this.storeOffset = get.getRowOffsetPerColumnFamily();
 this.tr = get.getTimeRange();
 this.checkExistenceOnly = get.isCheckExistenceOnly();
+this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
 this.closestRowBefore = get.isClosestRowBefore();
 Map> fams = get.getFamilyMap();
 for (Map.Entry> entry : fams.entrySet()) {
@@ -241,6 +242,10 @@ public class Get extends Query
 return this;
   }
 
+  public Get setLoadColumnFamiliesOnDemand(boolean value) {
+return (Get) super.setLoadColumnFamiliesOnDemand(value);
+  }
+
   /**
* Set the maximum number of values to return per row per Column Family
* @param limit the maximum number of values returned / row / CF

http://git-wip-us.apache.org/repos/asf/hbase/blob/a348d491/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index 99d5a6a..7d3b628 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -42,6 +42,7 @@ public abstract class Query extends OperationWithAttributes {
   protected int targetReplicaId = -1;
   protected Consistency consistency = Consistency.STRONG;
   protected Map colFamTimeRangeMap = 
Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
+  protected Boolean loadColumnFamiliesOnDemand = null;
 
   /**
* @return Filter
@@ -177,6 +178,40 @@ public abstract class Query extends 
OperationWithAttributes {
   IsolationLevel.fromBytes(attr);
   }
 
+  /**
+   * Set the value indicating whether loading CFs on demand should be allowed 
(cluster
+   * default is false). On-demand CF loading doesn't load column families 
until necessary, e.g.
+   * if you filter on one column, the other column family data will be loaded 
only for the rows
+   * that are included in result, not all rows like in normal case.
+   * With column-specific filters, like SingleColumnValueFilter 
w/filterIfMissing == true,
+   * this can deliver huge perf gains when there's a cf with lots of data; 
however, it can
+   * also lead to some inconsistent results, as follows:
+   * - if someone does a concurrent update to both column families in question 
you may get a row
+   *   that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { 
video => "my cat" } }
+   *   someone puts rowKey 5 with { cat_videos => 0 }, { video => "my 
dog" }, concurrent scan
+   *   filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 
1 },
+   *   { video => "my dog" } }.
+   * - if there's a concurrent split and you have more than 2 column families, 
some rows may be
+   *   missing some column f

hbase git commit: HBASE-17617 Backport HBASE-16731 (Inconsistent results from the Get/Scan if we use the empty FilterList) to branch-1

2017-08-17 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 4f639e7d8 -> 509c1b63e


HBASE-17617 Backport HBASE-16731 (Inconsistent results from the Get/Scan if we 
use the empty FilterList) to branch-1

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/509c1b63
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/509c1b63
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/509c1b63

Branch: refs/heads/branch-1.2
Commit: 509c1b63ed8e051478e07f2047b9a09aec0b94ef
Parents: 4f639e7
Author: Pankaj Kumar 
Authored: Wed Aug 16 23:57:42 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Aug 17 18:30:57 2017 +0800

--
 .../org/apache/hadoop/hbase/client/Get.java |   5 +
 .../org/apache/hadoop/hbase/client/Query.java   |  35 
 .../org/apache/hadoop/hbase/client/Scan.java|  36 +---
 .../hadoop/hbase/protobuf/ProtobufUtil.java |   9 +-
 .../apache/hadoop/hbase/client/TestScan.java|  32 
 .../hbase/protobuf/generated/ClientProtos.java  | 173 ---
 hbase-protocol/src/main/protobuf/Client.proto   |   1 +
 .../hadoop/hbase/regionserver/HRegion.java  |   3 +
 .../hadoop/hbase/client/TestFromClientSide.java |  45 +
 .../hadoop/hbase/protobuf/TestProtobufUtil.java |   1 +
 10 files changed, 280 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/509c1b63/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index 88da0b0..72ab0ed 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -110,6 +110,7 @@ public class Get extends Query
 this.storeOffset = get.getRowOffsetPerColumnFamily();
 this.tr = get.getTimeRange();
 this.checkExistenceOnly = get.isCheckExistenceOnly();
+this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
 this.closestRowBefore = get.isClosestRowBefore();
 Map> fams = get.getFamilyMap();
 for (Map.Entry> entry : fams.entrySet()) {
@@ -241,6 +242,10 @@ public class Get extends Query
 return this;
   }
 
+  public Get setLoadColumnFamiliesOnDemand(boolean value) {
+return (Get) super.setLoadColumnFamiliesOnDemand(value);
+  }
+
   /**
* Set the maximum number of values to return per row per Column Family
* @param limit the maximum number of values returned / row / CF

http://git-wip-us.apache.org/repos/asf/hbase/blob/509c1b63/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index 53e680d..a49623a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -43,6 +43,7 @@ public abstract class Query extends OperationWithAttributes {
   protected int targetReplicaId = -1;
   protected Consistency consistency = Consistency.STRONG;
   protected Map colFamTimeRangeMap = 
Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
+  protected Boolean loadColumnFamiliesOnDemand = null;
 
   /**
* @return Filter
@@ -178,6 +179,40 @@ public abstract class Query extends 
OperationWithAttributes {
   IsolationLevel.fromBytes(attr);
   }
 
+  /**
+   * Set the value indicating whether loading CFs on demand should be allowed 
(cluster
+   * default is false). On-demand CF loading doesn't load column families 
until necessary, e.g.
+   * if you filter on one column, the other column family data will be loaded 
only for the rows
+   * that are included in result, not all rows like in normal case.
+   * With column-specific filters, like SingleColumnValueFilter 
w/filterIfMissing == true,
+   * this can deliver huge perf gains when there's a cf with lots of data; 
however, it can
+   * also lead to some inconsistent results, as follows:
+   * - if someone does a concurrent update to both column families in question 
you may get a row
+   *   that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { 
video => "my cat" } }
+   *   someone puts rowKey 5 with { cat_videos => 0 }, { video => "my 
dog" }, concurrent scan
+   *   filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 
1 },
+   *   { video => "my dog" } }.
+   * - if there's a concurrent split and you have more than 2 column families, 
some rows may be
+   *   missing some column f

hbase git commit: HBASE-17617 Backport HBASE-16731 (Inconsistent results from the Get/Scan if we use the empty FilterList) to branch-1

2017-08-17 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 ee87edd89 -> 197290a19


HBASE-17617 Backport HBASE-16731 (Inconsistent results from the Get/Scan if we 
use the empty FilterList) to branch-1

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/197290a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/197290a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/197290a1

Branch: refs/heads/branch-1.4
Commit: 197290a1961d788bcd0dce6c33b013b82f7b39be
Parents: ee87edd
Author: Pankaj Kumar 
Authored: Thu Aug 17 16:26:20 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Aug 17 18:31:33 2017 +0800

--
 .../org/apache/hadoop/hbase/client/Get.java |   5 +
 .../org/apache/hadoop/hbase/client/Query.java   |  35 
 .../org/apache/hadoop/hbase/client/Scan.java|  36 +---
 .../hadoop/hbase/protobuf/ProtobufUtil.java |   9 +-
 .../apache/hadoop/hbase/client/TestScan.java|  32 
 .../hbase/protobuf/generated/ClientProtos.java  | 185 +++
 hbase-protocol/src/main/protobuf/Client.proto   |   1 +
 .../hadoop/hbase/regionserver/HRegion.java  |   3 +
 .../hadoop/hbase/client/TestFromClientSide.java |  44 +
 .../hadoop/hbase/protobuf/TestProtobufUtil.java |   1 +
 10 files changed, 285 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/197290a1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index 3286d57..f8c34bd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -110,6 +110,7 @@ public class Get extends Query
 this.storeOffset = get.getRowOffsetPerColumnFamily();
 this.tr = get.getTimeRange();
 this.checkExistenceOnly = get.isCheckExistenceOnly();
+this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
 this.closestRowBefore = get.isClosestRowBefore();
 Map> fams = get.getFamilyMap();
 for (Map.Entry> entry : fams.entrySet()) {
@@ -242,6 +243,10 @@ public class Get extends Query
 return this;
   }
 
+  public Get setLoadColumnFamiliesOnDemand(boolean value) {
+return (Get) super.setLoadColumnFamiliesOnDemand(value);
+  }
+
   /**
* Set the maximum number of values to return per row per Column Family
* @param limit the maximum number of values returned / row / CF

http://git-wip-us.apache.org/repos/asf/hbase/blob/197290a1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index 99d5a6a..7d3b628 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -42,6 +42,7 @@ public abstract class Query extends OperationWithAttributes {
   protected int targetReplicaId = -1;
   protected Consistency consistency = Consistency.STRONG;
   protected Map colFamTimeRangeMap = 
Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
+  protected Boolean loadColumnFamiliesOnDemand = null;
 
   /**
* @return Filter
@@ -177,6 +178,40 @@ public abstract class Query extends 
OperationWithAttributes {
   IsolationLevel.fromBytes(attr);
   }
 
+  /**
+   * Set the value indicating whether loading CFs on demand should be allowed 
(cluster
+   * default is false). On-demand CF loading doesn't load column families 
until necessary, e.g.
+   * if you filter on one column, the other column family data will be loaded 
only for the rows
+   * that are included in result, not all rows like in normal case.
+   * With column-specific filters, like SingleColumnValueFilter 
w/filterIfMissing == true,
+   * this can deliver huge perf gains when there's a cf with lots of data; 
however, it can
+   * also lead to some inconsistent results, as follows:
+   * - if someone does a concurrent update to both column families in question 
you may get a row
+   *   that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { 
video => "my cat" } }
+   *   someone puts rowKey 5 with { cat_videos => 0 }, { video => "my 
dog" }, concurrent scan
+   *   filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 
1 },
+   *   { video => "my dog" } }.
+   * - if there's a concurrent split and you have more than 2 column families, 
some rows may be
+   *   missing some column f

[13/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.html
index b5f351a..de70e5b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.html
@@ -51,700 +51,703 @@
 043import 
org.apache.hadoop.hbase.favored.FavoredNodesPlan;
 044import 
org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position;
 045import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-046import 
org.apache.hadoop.hbase.master.MasterServices;
-047import 
org.apache.hadoop.hbase.master.RegionPlan;
-048import 
org.apache.hadoop.hbase.util.Pair;
-049
-050import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-051import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-052import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-053
-054/**
-055 * An implementation of the {@link 
org.apache.hadoop.hbase.master.LoadBalancer} that
-056 * assigns favored nodes for each region. 
There is a Primary RegionServer that hosts
-057 * the region, and then there is 
Secondary and Tertiary RegionServers. Currently, the
-058 * favored nodes information is used in 
creating HDFS files - the Primary RegionServer
-059 * passes the primary, secondary, 
tertiary node addresses as hints to the
-060 * DistributedFileSystem API for creating 
files on the filesystem. These nodes are
-061 * treated as hints by the HDFS to place 
the blocks of the file. This alleviates the
-062 * problem to do with reading from remote 
nodes (since we can make the Secondary
-063 * RegionServer as the new Primary 
RegionServer) after a region is recovered. This
-064 * should help provide consistent read 
latencies for the regions even when their
-065 * primary region servers die. This 
provides two
-066 * {@link 
org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CandidateGenerator}
-067 *
-068 */
-069public class FavoredStochasticBalancer 
extends StochasticLoadBalancer implements
-070FavoredNodesPromoter {
-071
-072  private static final Log LOG = 
LogFactory.getLog(FavoredStochasticBalancer.class);
-073  private FavoredNodesManager fnm;
-074
-075  @Override
-076  public void initialize() throws 
HBaseIOException {
-077configureGenerators();
-078super.initialize();
-079  }
-080
-081  protected void configureGenerators() 
{
-082List 
fnPickers = new ArrayList<>(2);
-083fnPickers.add(new 
FavoredNodeLoadPicker());
-084fnPickers.add(new 
FavoredNodeLocalityPicker());
-085setCandidateGenerators(fnPickers);
-086  }
-087
-088  @Override
-089  public void 
setMasterServices(MasterServices masterServices) {
-090
super.setMasterServices(masterServices);
-091fnm = 
masterServices.getFavoredNodesManager();
-092  }
-093
-094  /*
-095   * Round robin assignment: Segregate 
the regions into two types:
-096   *
-097   * 1. The regions that have favored 
node assignment where at least one of the favored node
-098   * is still alive. In this case, try to 
adhere to the current favored nodes assignment as
-099   * much as possible - i.e., if the 
current primary is gone, then make the secondary or
-100   * tertiary as the new host for the 
region (based on their current load). Note that we don't
-101   * change the favored node assignments 
here (even though one or more favored node is
-102   * currently down). That will be done 
by the admin operations.
-103   *
-104   * 2. The regions that currently don't 
have favored node assignments. Generate favored nodes
-105   * for them and then assign. Generate 
the primary fn in round robin fashion and generate
-106   * secondary and tertiary as per 
favored nodes constraints.
-107   */
-108  @Override
-109  public Map> roundRobinAssignment(List 
regions,
-110  List servers) 
throws HBaseIOException {
-111
-112
metricsBalancer.incrMiscInvocations();
-113
-114Set regionSet = 
Sets.newHashSet(regions);
-115Map> assignmentMap = assignMasterRegions(regions, 
servers);
-116if (assignmentMap != null && 
!assignmentMap.isEmpty()) {
-117  servers = new 
ArrayList<>(servers);
-118  // Guarantee not to put other 
regions on master
-119  servers.remove(masterServerName);
-120  List 
masterRegions = assignmentMap.get(masterServerName);
-121  if (!masterRegions.isEmpty()) {
-122for (HRegionInfo region: 
masterRegions) {
-123  regionSet.remove(region);
-124}
-125  }
-126  

[25/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index 6d380af..883a87d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -488,1358 +488,1367 @@
 480synchronized 
(checkIfShouldMoveSystemRegionLock) {
 481  List plans = 
new ArrayList<>();
 482  for (ServerName server : 
getExcludedServersForSystemTable()) {
-483List 
regionsShouldMove = getCarryingSystemTables(server);
-484if 
(!regionsShouldMove.isEmpty()) {
-485  for (HRegionInfo regionInfo 
: regionsShouldMove) {
-486// null value for dest 
forces destination server to be selected by balancer
-487RegionPlan plan = new 
RegionPlan(regionInfo, server, null);
-488if 
(regionInfo.isMetaRegion()) {
-489  // Must move meta 
region first.
-490  moveAsync(plan);
-491} else {
-492  plans.add(plan);
-493}
-494  }
-495}
-496for (RegionPlan plan : plans) 
{
-497  moveAsync(plan);
-498}
-499  }
-500}
-501  } catch (Throwable t) {
-502LOG.error(t);
-503  }
-504}).start();
-505  }
-506
-507  private List 
getCarryingSystemTables(ServerName serverName) {
-508Set regions = 
this.getRegionStates().getServerNode(serverName).getRegions();
-509if (regions == null) {
-510  return new ArrayList<>();
-511}
-512return regions.stream()
-513
.map(RegionStateNode::getRegionInfo)
-514
.filter(HRegionInfo::isSystemTable)
-515.collect(Collectors.toList());
-516  }
-517
-518  public void assign(final HRegionInfo 
regionInfo) throws IOException {
-519assign(regionInfo, true);
-520  }
-521
-522  public void assign(final HRegionInfo 
regionInfo, final boolean forceNewPlan) throws IOException {
-523AssignProcedure proc = 
createAssignProcedure(regionInfo, forceNewPlan);
-524
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
+483if 
(master.getServerManager().isServerDead(server)) {
+484  // TODO: See HBASE-18494 
and HBASE-18495. Though getExcludedServersForSystemTable()
+485  // considers only online 
servers, the server could be queued for dead server
+486  // processing. As region 
assignments for crashed server is handled by
+487  // ServerCrashProcedure, do 
NOT handle them here. The goal is to handle this through
+488  // regular flow of 
LoadBalancer as a favored node and not to have this special
+489  // handling.
+490  continue;
+491}
+492List 
regionsShouldMove = getCarryingSystemTables(server);
+493if 
(!regionsShouldMove.isEmpty()) {
+494  for (HRegionInfo regionInfo 
: regionsShouldMove) {
+495// null value for dest 
forces destination server to be selected by balancer
+496RegionPlan plan = new 
RegionPlan(regionInfo, server, null);
+497if 
(regionInfo.isMetaRegion()) {
+498  // Must move meta 
region first.
+499  moveAsync(plan);
+500} else {
+501  plans.add(plan);
+502}
+503  }
+504}
+505for (RegionPlan plan : plans) 
{
+506  moveAsync(plan);
+507}
+508  }
+509}
+510  } catch (Throwable t) {
+511LOG.error(t);
+512  }
+513}).start();
+514  }
+515
+516  private List 
getCarryingSystemTables(ServerName serverName) {
+517Set regions = 
this.getRegionStates().getServerNode(serverName).getRegions();
+518if (regions == null) {
+519  return new ArrayList<>();
+520}
+521return regions.stream()
+522
.map(RegionStateNode::getRegionInfo)
+523
.filter(HRegionInfo::isSystemTable)
+524.collect(Collectors.toList());
 525  }
 526
-527  public void unassign(final HRegionInfo 
regionInfo) throws IOException {
-528unassign(regionInfo, false);
+527  public void assign(final HRegionInfo 
regionInfo) throws IOException {
+528assign(regionInfo, true);
 529  }
 530
-531  public void unassign(final HRegionInfo 
regionInfo, final boolean forceNewPlan)
-532  throws IOException {
-533// TODO: rename this reassign
-534Region

[17/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
index d9c59a0..13f64df 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
@@ -69,1532 +69,1492 @@
 061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
 062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-064
-065/**
-066 * The base class for load balancers. It 
provides the the functions used to by
-067 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-068 * in the edge cases. It doesn't provide 
an implementation of the
-069 * actual balancing algorithm.
-070 *
-071 */
-072public abstract class BaseLoadBalancer 
implements LoadBalancer {
-073  protected static final int 
MIN_SERVER_BALANCE = 2;
-074  private volatile boolean stopped = 
false;
-075
-076  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
-077
-078  static final 
Predicate IDLE_SERVER_PREDICATOR
-079= load -> 
load.getNumberOfRegions() == 0;
-080
-081  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-082
-083  private static class DefaultRackManager 
extends RackManager {
-084@Override
-085public String getRack(ServerName 
server) {
-086  return UNKNOWN_RACK;
-087}
-088  }
-089
-090  /**
-091   * The constructor that uses the basic 
MetricsBalancer
-092   */
-093  protected BaseLoadBalancer() {
-094metricsBalancer = new 
MetricsBalancer();
-095  }
-096
-097  /**
-098   * This Constructor accepts an instance 
of MetricsBalancer,
-099   * which will be used instead of 
creating a new one
-100   */
-101  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-102this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-103  }
-104
-105  /**
-106   * An efficient array based 
implementation similar to ClusterState for keeping
-107   * the status of the cluster in terms 
of region assignment and distribution.
-108   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-109   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-110   * class uses mostly indexes and 
arrays.
-111   *
-112   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-113   * topology in terms of server names, 
hostnames and racks.
-114   */
-115  protected static class Cluster {
-116ServerName[] servers;
-117String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-118String[] racks;
-119boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-120
-121ArrayList tables;
-122HRegionInfo[] regions;
-123Deque[] 
regionLoads;
-124private RegionLocationFinder 
regionFinder;
-125
-126int[][] regionLocations; 
//regionIndex -> list of serverIndex sorted by locality
-127
-128int[]   serverIndexToHostIndex;  
//serverIndex -> host index
-129int[]   serverIndexToRackIndex;  
//serverIndex -> rack index
-130
-131int[][] regionsPerServer;
//serverIndex -> region list
-132int[][] regionsPerHost;  
//hostIndex -> list of regions
-133int[][] regionsPerRack;  
//rackIndex -> region list
-134int[][] primariesOfRegionsPerServer; 
//serverIndex -> sorted list of regions by primary region index
-135int[][] primariesOfRegionsPerHost;   
//hostIndex -> sorted list of regions by primary region index
-136int[][] primariesOfRegionsPerRack;   
//rackIndex -> sorted list of regions by primary region index
-137
-138int[][] serversPerHost;  
//hostIndex -> list of server indexes
-139int[][] serversPerRack;  
//rackIndex -> list of server indexes
-140int[]   regionIndexToServerIndex;
//regionIndex -> serverIndex
-141int[]   
initialRegionIndexToServerIndex;//regionIndex -> serverIndex (initial 
cluster state)
-142int[]   regionIndexToTableIndex; 
//regionIndex -> tableIndex
-143int[][] numRegionsPerServerPerTable; 
//serverIndex -> tableIndex -> # regions
-144int[]   numMaxRegionsPerTable;   
//tableIndex -> max number of regions in a single RS
-145int[]   regionIndexToPrimaryIndex;   
//r

[12/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index 3d1c07d..61c9b54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -124,3648 +124,3649 @@
 116import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 117import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
 118import 
org.apache.hadoop.hbase.master.HMaster;
-119import 
org.apache.hadoop.hbase.master.RegionState.State;
-120import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-121import 
org.apache.hadoop.hbase.mob.MobCacheConfig;
-122import 
org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost;
-123import 
org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore;
-124import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-125import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
-126import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-127import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
-128import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-129import 
org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
-130import 
org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
-131import 
org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler;
-132import 
org.apache.hadoop.hbase.regionserver.throttle.FlushThroughputControllerFactory;
-133import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-134import 
org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
-135import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-136import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-137import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad;
-138import 
org.apache.hadoop.hbase.security.Superusers;
-139import 
org.apache.hadoop.hbase.security.User;
-140import 
org.apache.hadoop.hbase.security.UserProvider;
-141import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannel;
-142import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-143import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-144import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-145import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor.Builder;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
-159import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-160import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-161import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
-162import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.Region

[07/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/export_control.html
--
diff --git a/export_control.html b/export_control.html
index aab8ad1..bbef738 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Export Control
@@ -336,7 +336,7 @@ for more details.
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index 349bfe2..a6c1f15 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations – Checkstyle Results
 
@@ -273,7 +273,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index 98fc8af..ff61f7c 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations – Project Dependencies
 
@@ -377,7 +377,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 0dd6d01..2ec76a9 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations – Reactor Dependency 
Convergence
 
@@ -571,7 +571,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index e1f4619..6571392 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations – Dependency Information
 
@@ -147,7 +147,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-annotations/dependency-management.html
--
diff --git a/hbase-annotations/dependency-management.html 
b/hbase-annotations/dependency-management.html
index 0b2b992..9da0fc6 100644
--- a/hbase-annotations/dependency-management.html
+++ b/hbase-annotations/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations – Project Dependency 
Management
 
@@ -736,7 +736,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-annotations/index.html
--
diff --git a/hbase-annotations/index.html b/hbase-annotations/index.html
index 1fb4fa7..fecf39e 100644
--- a/hbase-annota

[31/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index ce35234..f8432f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -538,2952 +538,2964 @@
 530}
 531  }
 532
-533  // return the actual infoPort, -1 means 
disable info server.
-534  private int putUpJettyServer() throws 
IOException {
-535if 
(!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
-536  return -1;
-537}
-538final int infoPort = 
conf.getInt("hbase.master.info.port.orig",
-539  
HConstants.DEFAULT_MASTER_INFOPORT);
-540// -1 is for disabling info server, 
so no redirecting
-541if (infoPort < 0 || infoServer == 
null) {
-542  return -1;
-543}
-544if(infoPort == infoServer.getPort()) 
{
-545  return infoPort;
-546}
-547final String addr = 
conf.get("hbase.master.info.bindAddress", "0.0.0.0");
-548if 
(!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
-549  String msg =
-550  "Failed to start redirecting 
jetty server. Address " + addr
-551  + " does not belong to this 
host. Correct configuration parameter: "
-552  + 
"hbase.master.info.bindAddress";
-553  LOG.error(msg);
-554  throw new IOException(msg);
-555}
-556
-557// TODO I'm pretty sure we could just 
add another binding to the InfoServer run by
-558// the RegionServer and have it run 
the RedirectServlet instead of standing up
-559// a second entire stack here.
-560masterJettyServer = new Server();
-561final ServerConnector connector = new 
ServerConnector(masterJettyServer);
-562connector.setHost(addr);
-563connector.setPort(infoPort);
-564
masterJettyServer.addConnector(connector);
-565
masterJettyServer.setStopAtShutdown(true);
-566
-567final String redirectHostname = 
shouldUseThisHostnameInstead() ? useThisHostnameInstead : null;
-568
-569final RedirectServlet redirect = new 
RedirectServlet(infoServer, redirectHostname);
-570final WebAppContext context = new 
WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);
-571context.addServlet(new 
ServletHolder(redirect), "/*");
-572
context.setServer(masterJettyServer);
-573
-574try {
-575  masterJettyServer.start();
-576} catch (Exception e) {
-577  throw new IOException("Failed to 
start redirecting jetty server", e);
-578}
-579return connector.getLocalPort();
-580  }
-581
-582  @Override
-583  protected TableDescriptors 
getFsTableDescriptors() throws IOException {
-584return 
super.getFsTableDescriptors();
-585  }
-586
-587  /**
-588   * For compatibility, if failed with 
regionserver credentials, try the master one
-589   */
-590  @Override
-591  protected void login(UserProvider user, 
String host) throws IOException {
-592try {
-593  super.login(user, host);
-594} catch (IOException ie) {
-595  
user.login("hbase.master.keytab.file",
-596
"hbase.master.kerberos.principal", host);
-597}
-598  }
-599
-600  /**
-601   * If configured to put regions on 
active master,
-602   * wait till a backup master becomes 
active.
-603   * Otherwise, loop till the server is 
stopped or aborted.
-604   */
-605  @Override
-606  protected void waitForMasterActive(){
-607boolean tablesOnMaster = 
BaseLoadBalancer.tablesOnMaster(conf);
-608while (!(tablesOnMaster && 
activeMaster)
-609&& !isStopped() 
&& !isAborted()) {
-610  sleeper.sleep();
-611}
-612  }
-613
-614  @VisibleForTesting
-615  public MasterRpcServices 
getMasterRpcServices() {
-616return 
(MasterRpcServices)rpcServices;
-617  }
-618
-619  public boolean balanceSwitch(final 
boolean b) throws IOException {
-620return 
getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
-621  }
-622
-623  @Override
-624  protected String getProcessName() {
-625return MASTER;
-626  }
-627
-628  @Override
-629  protected boolean canCreateBaseZNode() 
{
-630return true;
+533  // Main run loop. Calls through to the 
regionserver run loop.
+534  @Override
+535  public void run() {
+536try {
+537  super.run();
+538} finally {
+539  // If on way out, then we are no 
longer active master.
+540  this.activeMaster = false;
+541}
+542  }
+543
+544  // return the actual infoPort, -1 means 
disable info server.
+545  private int putUpJettyServer() throws 
IOException {
+546if 
(!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
+547  return -1;
+548}
+549final int infoPort = 
conf.get

[08/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 3d1c07d..61c9b54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -124,3648 +124,3649 @@
 116import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 117import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
 118import 
org.apache.hadoop.hbase.master.HMaster;
-119import 
org.apache.hadoop.hbase.master.RegionState.State;
-120import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-121import 
org.apache.hadoop.hbase.mob.MobCacheConfig;
-122import 
org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost;
-123import 
org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore;
-124import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-125import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
-126import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-127import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
-128import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-129import 
org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
-130import 
org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
-131import 
org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler;
-132import 
org.apache.hadoop.hbase.regionserver.throttle.FlushThroughputControllerFactory;
-133import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-134import 
org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
-135import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-136import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-137import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad;
-138import 
org.apache.hadoop.hbase.security.Superusers;
-139import 
org.apache.hadoop.hbase.security.User;
-140import 
org.apache.hadoop.hbase.security.UserProvider;
-141import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannel;
-142import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-143import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-144import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-145import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor.Builder;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
-159import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-160import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-161import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
-162import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.gene

[21/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
index d9c59a0..13f64df 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
@@ -69,1532 +69,1492 @@
 061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
 062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-064
-065/**
-066 * The base class for load balancers. It 
provides the the functions used to by
-067 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-068 * in the edge cases. It doesn't provide 
an implementation of the
-069 * actual balancing algorithm.
-070 *
-071 */
-072public abstract class BaseLoadBalancer 
implements LoadBalancer {
-073  protected static final int 
MIN_SERVER_BALANCE = 2;
-074  private volatile boolean stopped = 
false;
-075
-076  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
-077
-078  static final 
Predicate IDLE_SERVER_PREDICATOR
-079= load -> 
load.getNumberOfRegions() == 0;
-080
-081  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-082
-083  private static class DefaultRackManager 
extends RackManager {
-084@Override
-085public String getRack(ServerName 
server) {
-086  return UNKNOWN_RACK;
-087}
-088  }
-089
-090  /**
-091   * The constructor that uses the basic 
MetricsBalancer
-092   */
-093  protected BaseLoadBalancer() {
-094metricsBalancer = new 
MetricsBalancer();
-095  }
-096
-097  /**
-098   * This Constructor accepts an instance 
of MetricsBalancer,
-099   * which will be used instead of 
creating a new one
-100   */
-101  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-102this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-103  }
-104
-105  /**
-106   * An efficient array based 
implementation similar to ClusterState for keeping
-107   * the status of the cluster in terms 
of region assignment and distribution.
-108   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-109   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-110   * class uses mostly indexes and 
arrays.
-111   *
-112   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-113   * topology in terms of server names, 
hostnames and racks.
-114   */
-115  protected static class Cluster {
-116ServerName[] servers;
-117String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-118String[] racks;
-119boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-120
-121ArrayList tables;
-122HRegionInfo[] regions;
-123Deque[] 
regionLoads;
-124private RegionLocationFinder 
regionFinder;
-125
-126int[][] regionLocations; 
//regionIndex -> list of serverIndex sorted by locality
-127
-128int[]   serverIndexToHostIndex;  
//serverIndex -> host index
-129int[]   serverIndexToRackIndex;  
//serverIndex -> rack index
-130
-131int[][] regionsPerServer;
//serverIndex -> region list
-132int[][] regionsPerHost;  
//hostIndex -> list of regions
-133int[][] regionsPerRack;  
//rackIndex -> region list
-134int[][] primariesOfRegionsPerServer; 
//serverIndex -> sorted list of regions by primary region index
-135int[][] primariesOfRegionsPerHost;   
//hostIndex -> sorted list of regions by primary region index
-136int[][] primariesOfRegionsPerRack;   
//rackIndex -> sorted list of regions by primary region index
-137
-138int[][] serversPerHost;  
//hostIndex -> list of server indexes
-139int[][] serversPerRack;  
//rackIndex -> list of server indexes
-140int[]   regionIndexToServerIndex;
//regionIndex -> serverIndex
-141int[]   
initialRegionIndexToServerIndex;//regionIndex -> serverIndex (initial 
cluster state)
-142int[]   regionIndexToTableIndex; 
//regionIndex -> tableIndex
-143int[][] numRegionsPerServerPerTable; 
//serverIndex -> tableIndex -> # regions
-144int[]   numMaxRegionsPerTable;   
//tableIndex -> max number of regions in a single RS
-145int[]   regionIndexToPrimaryInd

[16/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
index d9c59a0..13f64df 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
@@ -69,1532 +69,1492 @@
 061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
 062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-064
-065/**
-066 * The base class for load balancers. It 
provides the the functions used to by
-067 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-068 * in the edge cases. It doesn't provide 
an implementation of the
-069 * actual balancing algorithm.
-070 *
-071 */
-072public abstract class BaseLoadBalancer 
implements LoadBalancer {
-073  protected static final int 
MIN_SERVER_BALANCE = 2;
-074  private volatile boolean stopped = 
false;
-075
-076  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
-077
-078  static final 
Predicate IDLE_SERVER_PREDICATOR
-079= load -> 
load.getNumberOfRegions() == 0;
-080
-081  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-082
-083  private static class DefaultRackManager 
extends RackManager {
-084@Override
-085public String getRack(ServerName 
server) {
-086  return UNKNOWN_RACK;
-087}
-088  }
-089
-090  /**
-091   * The constructor that uses the basic 
MetricsBalancer
-092   */
-093  protected BaseLoadBalancer() {
-094metricsBalancer = new 
MetricsBalancer();
-095  }
-096
-097  /**
-098   * This Constructor accepts an instance 
of MetricsBalancer,
-099   * which will be used instead of 
creating a new one
-100   */
-101  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-102this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-103  }
-104
-105  /**
-106   * An efficient array based 
implementation similar to ClusterState for keeping
-107   * the status of the cluster in terms 
of region assignment and distribution.
-108   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-109   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-110   * class uses mostly indexes and 
arrays.
-111   *
-112   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-113   * topology in terms of server names, 
hostnames and racks.
-114   */
-115  protected static class Cluster {
-116ServerName[] servers;
-117String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-118String[] racks;
-119boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-120
-121ArrayList tables;
-122HRegionInfo[] regions;
-123Deque[] 
regionLoads;
-124private RegionLocationFinder 
regionFinder;
-125
-126int[][] regionLocations; 
//regionIndex -> list of serverIndex sorted by locality
-127
-128int[]   serverIndexToHostIndex;  
//serverIndex -> host index
-129int[]   serverIndexToRackIndex;  
//serverIndex -> rack index
-130
-131int[][] regionsPerServer;
//serverIndex -> region list
-132int[][] regionsPerHost;  
//hostIndex -> list of regions
-133int[][] regionsPerRack;  
//rackIndex -> region list
-134int[][] primariesOfRegionsPerServer; 
//serverIndex -> sorted list of regions by primary region index
-135int[][] primariesOfRegionsPerHost;   
//hostIndex -> sorted list of regions by primary region index
-136int[][] primariesOfRegionsPerRack;   
//rackIndex -> sorted list of regions by primary region index
-137
-138int[][] serversPerHost;  
//hostIndex -> list of server indexes
-139int[][] serversPerRack;  
//rackIndex -> list of server indexes
-140int[]   regionIndexToServerIndex;
//regionIndex -> serverIndex
-141int[]   
initialRegionIndexToServerIndex;//regionIndex -> serverIndex (initial 
cluster state)
-142int[]   regionIndexToTableIndex; 
//regionIndex -> tableIndex
-143int[][] numRegionsPerServerPerTable; 
//serverIndex -> tableIndex -> # regions
-144int[]   numMaxRegionsPerTable;   
//tableIndex -> max number of regions in a single RS
-145int[]   regionIndexToPrimaryIndex;   
//regionIndex -> regionIndex of the primary
-146boolean hasRegionReplicas = false;   
//whethe

[20/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
index d9c59a0..13f64df 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
@@ -69,1532 +69,1492 @@
 061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
 062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-064
-065/**
-066 * The base class for load balancers. It 
provides the the functions used to by
-067 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-068 * in the edge cases. It doesn't provide 
an implementation of the
-069 * actual balancing algorithm.
-070 *
-071 */
-072public abstract class BaseLoadBalancer 
implements LoadBalancer {
-073  protected static final int 
MIN_SERVER_BALANCE = 2;
-074  private volatile boolean stopped = 
false;
-075
-076  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
-077
-078  static final 
Predicate IDLE_SERVER_PREDICATOR
-079= load -> 
load.getNumberOfRegions() == 0;
-080
-081  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-082
-083  private static class DefaultRackManager 
extends RackManager {
-084@Override
-085public String getRack(ServerName 
server) {
-086  return UNKNOWN_RACK;
-087}
-088  }
-089
-090  /**
-091   * The constructor that uses the basic 
MetricsBalancer
-092   */
-093  protected BaseLoadBalancer() {
-094metricsBalancer = new 
MetricsBalancer();
-095  }
-096
-097  /**
-098   * This Constructor accepts an instance 
of MetricsBalancer,
-099   * which will be used instead of 
creating a new one
-100   */
-101  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-102this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-103  }
-104
-105  /**
-106   * An efficient array based 
implementation similar to ClusterState for keeping
-107   * the status of the cluster in terms 
of region assignment and distribution.
-108   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-109   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-110   * class uses mostly indexes and 
arrays.
-111   *
-112   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-113   * topology in terms of server names, 
hostnames and racks.
-114   */
-115  protected static class Cluster {
-116ServerName[] servers;
-117String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-118String[] racks;
-119boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-120
-121ArrayList tables;
-122HRegionInfo[] regions;
-123Deque[] 
regionLoads;
-124private RegionLocationFinder 
regionFinder;
-125
-126int[][] regionLocations; 
//regionIndex -> list of serverIndex sorted by locality
-127
-128int[]   serverIndexToHostIndex;  
//serverIndex -> host index
-129int[]   serverIndexToRackIndex;  
//serverIndex -> rack index
-130
-131int[][] regionsPerServer;
//serverIndex -> region list
-132int[][] regionsPerHost;  
//hostIndex -> list of regions
-133int[][] regionsPerRack;  
//rackIndex -> region list
-134int[][] primariesOfRegionsPerServer; 
//serverIndex -> sorted list of regions by primary region index
-135int[][] primariesOfRegionsPerHost;   
//hostIndex -> sorted list of regions by primary region index
-136int[][] primariesOfRegionsPerRack;   
//rackIndex -> sorted list of regions by primary region index
-137
-138int[][] serversPerHost;  
//hostIndex -> list of server indexes
-139int[][] serversPerRack;  
//rackIndex -> list of server indexes
-140int[]   regionIndexToServerIndex;
//regionIndex -> serverIndex
-141int[]   
initialRegionIndexToServerIndex;//regionIndex -> serverIndex (initial 
cluster state)
-142int[]   regionIndexToTableIndex; 
//regionIndex -> tableIndex
-143int[][] numRegionsPerServerPerTable; 
//serverIndex -> tableIndex -> # regions
-144int[]   numMaxRegionsPerTable;   
//tableIndex -> max number of regions in a single RS
-145int[]   reg

[06/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/dependency-info.html 
b/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
index d1b5a37..4c9ec7e 100644
--- a/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
+++ b/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype – 
Dependency Information
 
@@ -147,7 +147,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
--
diff --git 
a/hbase-archetypes/hbase-shaded-client-project/dependency-management.html 
b/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
index 0925a3c..edbc920 100644
--- a/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
+++ b/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype – 
Project Dependency Management
 
@@ -736,7 +736,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-archetypes/hbase-shaded-client-project/index.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/index.html 
b/hbase-archetypes/hbase-shaded-client-project/index.html
index ceefc0d..e5b166e 100644
--- a/hbase-archetypes/hbase-shaded-client-project/index.html
+++ b/hbase-archetypes/hbase-shaded-client-project/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype – 
About
 
@@ -119,7 +119,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-archetypes/hbase-shaded-client-project/integration.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/integration.html 
b/hbase-archetypes/hbase-shaded-client-project/integration.html
index 12293fb..a65dd73 100644
--- a/hbase-archetypes/hbase-shaded-client-project/integration.html
+++ b/hbase-archetypes/hbase-shaded-client-project/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype – 
CI Management
 
@@ -126,7 +126,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html 
b/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html
index c180dbc..ffd63ea 100644
--- a/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html
+++ b/hbase-archetypes/hbase-shaded-client-project/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype – 
Issue Management
 
@@ -123,7 +123,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/hbase-archetypes/hbase-shaded-client-project/license.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/li

[34/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 0e11aef..74e24f3 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -693,20 +693,20 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.ScanType
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
-org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
-org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
 org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
-org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
 org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.RegionOpeningState
+org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
 org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.RegionOpeningState
+org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
+org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
+org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.ScanType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 2023b7d..3a39079 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -131,10 +131,10 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
 org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
 org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteCompare
-org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
index 33cb6b6..7cba16c 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
@@ -248,8 +248,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apac

[15/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLoadPicker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLoadPicker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLoadPicker.html
index b5f351a..de70e5b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLoadPicker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLoadPicker.html
@@ -51,700 +51,703 @@
 043import 
org.apache.hadoop.hbase.favored.FavoredNodesPlan;
 044import 
org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position;
 045import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-046import 
org.apache.hadoop.hbase.master.MasterServices;
-047import 
org.apache.hadoop.hbase.master.RegionPlan;
-048import 
org.apache.hadoop.hbase.util.Pair;
-049
-050import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-051import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-052import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-053
-054/**
-055 * An implementation of the {@link 
org.apache.hadoop.hbase.master.LoadBalancer} that
-056 * assigns favored nodes for each region. 
There is a Primary RegionServer that hosts
-057 * the region, and then there is 
Secondary and Tertiary RegionServers. Currently, the
-058 * favored nodes information is used in 
creating HDFS files - the Primary RegionServer
-059 * passes the primary, secondary, 
tertiary node addresses as hints to the
-060 * DistributedFileSystem API for creating 
files on the filesystem. These nodes are
-061 * treated as hints by the HDFS to place 
the blocks of the file. This alleviates the
-062 * problem to do with reading from remote 
nodes (since we can make the Secondary
-063 * RegionServer as the new Primary 
RegionServer) after a region is recovered. This
-064 * should help provide consistent read 
latencies for the regions even when their
-065 * primary region servers die. This 
provides two
-066 * {@link 
org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CandidateGenerator}
-067 *
-068 */
-069public class FavoredStochasticBalancer 
extends StochasticLoadBalancer implements
-070FavoredNodesPromoter {
-071
-072  private static final Log LOG = 
LogFactory.getLog(FavoredStochasticBalancer.class);
-073  private FavoredNodesManager fnm;
-074
-075  @Override
-076  public void initialize() throws 
HBaseIOException {
-077configureGenerators();
-078super.initialize();
-079  }
-080
-081  protected void configureGenerators() 
{
-082List 
fnPickers = new ArrayList<>(2);
-083fnPickers.add(new 
FavoredNodeLoadPicker());
-084fnPickers.add(new 
FavoredNodeLocalityPicker());
-085setCandidateGenerators(fnPickers);
-086  }
-087
-088  @Override
-089  public void 
setMasterServices(MasterServices masterServices) {
-090
super.setMasterServices(masterServices);
-091fnm = 
masterServices.getFavoredNodesManager();
-092  }
-093
-094  /*
-095   * Round robin assignment: Segregate 
the regions into two types:
-096   *
-097   * 1. The regions that have favored 
node assignment where at least one of the favored node
-098   * is still alive. In this case, try to 
adhere to the current favored nodes assignment as
-099   * much as possible - i.e., if the 
current primary is gone, then make the secondary or
-100   * tertiary as the new host for the 
region (based on their current load). Note that we don't
-101   * change the favored node assignments 
here (even though one or more favored node is
-102   * currently down). That will be done 
by the admin operations.
-103   *
-104   * 2. The regions that currently don't 
have favored node assignments. Generate favored nodes
-105   * for them and then assign. Generate 
the primary fn in round robin fashion and generate
-106   * secondary and tertiary as per 
favored nodes constraints.
-107   */
-108  @Override
-109  public Map> roundRobinAssignment(List 
regions,
-110  List servers) 
throws HBaseIOException {
-111
-112
metricsBalancer.incrMiscInvocations();
-113
-114Set regionSet = 
Sets.newHashSet(regions);
-115Map> assignmentMap = assignMasterRegions(regions, 
servers);
-116if (assignmentMap != null && 
!assignmentMap.isEmpty()) {
-117  servers = new 
ArrayList<>(servers);
-118  // Guarantee not to put other 
regions on master
-119  servers.remove(masterServerName);
-120  List 
masterRegions = assignmentMap.get(masterServerName);
-121  if (!masterRegions.isEmpty()) {
-122for (

[30/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index ce35234..f8432f4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -538,2952 +538,2964 @@
 530}
 531  }
 532
-533  // return the actual infoPort, -1 means 
disable info server.
-534  private int putUpJettyServer() throws 
IOException {
-535if 
(!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
-536  return -1;
-537}
-538final int infoPort = 
conf.getInt("hbase.master.info.port.orig",
-539  
HConstants.DEFAULT_MASTER_INFOPORT);
-540// -1 is for disabling info server, 
so no redirecting
-541if (infoPort < 0 || infoServer == 
null) {
-542  return -1;
-543}
-544if(infoPort == infoServer.getPort()) 
{
-545  return infoPort;
-546}
-547final String addr = 
conf.get("hbase.master.info.bindAddress", "0.0.0.0");
-548if 
(!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
-549  String msg =
-550  "Failed to start redirecting 
jetty server. Address " + addr
-551  + " does not belong to this 
host. Correct configuration parameter: "
-552  + 
"hbase.master.info.bindAddress";
-553  LOG.error(msg);
-554  throw new IOException(msg);
-555}
-556
-557// TODO I'm pretty sure we could just 
add another binding to the InfoServer run by
-558// the RegionServer and have it run 
the RedirectServlet instead of standing up
-559// a second entire stack here.
-560masterJettyServer = new Server();
-561final ServerConnector connector = new 
ServerConnector(masterJettyServer);
-562connector.setHost(addr);
-563connector.setPort(infoPort);
-564
masterJettyServer.addConnector(connector);
-565
masterJettyServer.setStopAtShutdown(true);
-566
-567final String redirectHostname = 
shouldUseThisHostnameInstead() ? useThisHostnameInstead : null;
-568
-569final RedirectServlet redirect = new 
RedirectServlet(infoServer, redirectHostname);
-570final WebAppContext context = new 
WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);
-571context.addServlet(new 
ServletHolder(redirect), "/*");
-572
context.setServer(masterJettyServer);
-573
-574try {
-575  masterJettyServer.start();
-576} catch (Exception e) {
-577  throw new IOException("Failed to 
start redirecting jetty server", e);
-578}
-579return connector.getLocalPort();
-580  }
-581
-582  @Override
-583  protected TableDescriptors 
getFsTableDescriptors() throws IOException {
-584return 
super.getFsTableDescriptors();
-585  }
-586
-587  /**
-588   * For compatibility, if failed with 
regionserver credentials, try the master one
-589   */
-590  @Override
-591  protected void login(UserProvider user, 
String host) throws IOException {
-592try {
-593  super.login(user, host);
-594} catch (IOException ie) {
-595  
user.login("hbase.master.keytab.file",
-596
"hbase.master.kerberos.principal", host);
-597}
-598  }
-599
-600  /**
-601   * If configured to put regions on 
active master,
-602   * wait till a backup master becomes 
active.
-603   * Otherwise, loop till the server is 
stopped or aborted.
-604   */
-605  @Override
-606  protected void waitForMasterActive(){
-607boolean tablesOnMaster = 
BaseLoadBalancer.tablesOnMaster(conf);
-608while (!(tablesOnMaster && 
activeMaster)
-609&& !isStopped() 
&& !isAborted()) {
-610  sleeper.sleep();
-611}
-612  }
-613
-614  @VisibleForTesting
-615  public MasterRpcServices 
getMasterRpcServices() {
-616return 
(MasterRpcServices)rpcServices;
-617  }
-618
-619  public boolean balanceSwitch(final 
boolean b) throws IOException {
-620return 
getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
-621  }
-622
-623  @Override
-624  protected String getProcessName() {
-625return MASTER;
-626  }
-627
-628  @Override
-629  protected boolean canCreateBaseZNode() 
{
-630return true;
+533  // Main run loop. Calls through to the 
regionserver run loop.
+534  @Override
+535  public void run() {
+536try {
+537  super.run();
+538} finally {
+539  // If on way out, then we are no 
longer active master.
+540  this.activeMaster = false;
+541}
+542  }
+543
+544  // return the actual infoPort, -1 means 
disable info server.
+545  private int putUpJettyServer() throws 
IOException {
+546if 
(!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
+547  return -1;
+548}
+549final int infoPort = 
conf.getInt("hbase.master.info.port.orig",
+550  
HConstants.DEFAULT_MASTER_INFOPORT);
+

[02/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
index 6bcdd5d..26df960 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestFromClientSide
+public class TestFromClientSide
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Run tests that use the HBase clients; Table.
  Sets up the HBase mini cluster once at start and runs through all client 
tests.
@@ -1080,7 +1080,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -1089,7 +1089,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-protected static final HBaseTestingUtility TEST_UTIL
+protected static final HBaseTestingUtility TEST_UTIL
 
 
 
@@ -1098,7 +1098,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ROW
-private static byte[] ROW
+private static byte[] ROW
 
 
 
@@ -1107,7 +1107,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FAMILY
-private static byte[] FAMILY
+private static byte[] FAMILY
 
 
 
@@ -1116,7 +1116,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 QUALIFIER
-private static byte[] QUALIFIER
+private static byte[] QUALIFIER
 
 
 
@@ -1125,7 +1125,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 VALUE
-private static byte[] VALUE
+private static byte[] VALUE
 
 
 
@@ -1134,7 +1134,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SLAVES
-protected static int SLAVES
+protected static int SLAVES
 
 
 
@@ -1143,7 +1143,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-public org.junit.rules.TestName name
+public org.junit.rules.TestName name
 
 
 
@@ -1160,7 +1160,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TestFromClientSide
-public TestFromClientSide()
+public TestFromClientSide()
 
 
 
@@ -1177,7 +1177,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setUpBeforeClass
-public static void setUpBeforeClass()
+public static void setUpBeforeClass()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -1191,7 +1191,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tearDownAfterClass
-public static void tearDownAfterClass()
+public static void tearDownAfterClass()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -1205,7 +1205,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setUp
-public void setUp()
+public void setUp()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -1219,7 +1219,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tearDown
-public void tearDown()
+public void tearDown()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -1233,7 +1233,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testDuplicateAppend
-public void testDuplicateAppend()
+public void testDuplicateAppend()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 Test append result when there are duplicate rpc 
request.
 
@@ -1248,7 +1248,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testKeepDeletedCells
-public void testKeepDeletedCells()
+public void testKeepDeletedCells()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 Basic client side validation of HBASE-4536
 
@@ -1263,7 +1263,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testPurgeFutureDeletes
-public void testPurgeFutureDeletes(

[09/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
index 3d1c07d..61c9b54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
@@ -124,3648 +124,3649 @@
 116import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 117import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
 118import 
org.apache.hadoop.hbase.master.HMaster;
-119import 
org.apache.hadoop.hbase.master.RegionState.State;
-120import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-121import 
org.apache.hadoop.hbase.mob.MobCacheConfig;
-122import 
org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost;
-123import 
org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore;
-124import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-125import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
-126import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-127import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
-128import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-129import 
org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
-130import 
org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
-131import 
org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler;
-132import 
org.apache.hadoop.hbase.regionserver.throttle.FlushThroughputControllerFactory;
-133import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-134import 
org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
-135import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-136import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-137import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad;
-138import 
org.apache.hadoop.hbase.security.Superusers;
-139import 
org.apache.hadoop.hbase.security.User;
-140import 
org.apache.hadoop.hbase.security.UserProvider;
-141import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannel;
-142import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-143import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-144import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-145import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor.Builder;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
-159import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-160import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-161import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
-162import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
-167import 
org.apache.hadoop.hbase.sh

[11/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index 3d1c07d..61c9b54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -124,3648 +124,3649 @@
 116import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 117import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
 118import 
org.apache.hadoop.hbase.master.HMaster;
-119import 
org.apache.hadoop.hbase.master.RegionState.State;
-120import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-121import 
org.apache.hadoop.hbase.mob.MobCacheConfig;
-122import 
org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost;
-123import 
org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore;
-124import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-125import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
-126import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-127import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
-128import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-129import 
org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
-130import 
org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
-131import 
org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler;
-132import 
org.apache.hadoop.hbase.regionserver.throttle.FlushThroughputControllerFactory;
-133import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-134import 
org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
-135import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-136import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-137import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad;
-138import 
org.apache.hadoop.hbase.security.Superusers;
-139import 
org.apache.hadoop.hbase.security.User;
-140import 
org.apache.hadoop.hbase.security.UserProvider;
-141import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannel;
-142import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-143import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-144import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-145import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor.Builder;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
-159import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-160import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-161import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
-162import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStat

[41/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index 51b520f..8ea3013 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -1286,7 +1286,7 @@ implements 
 
 UNASSIGNED_PROCEDURE_FOR_TYPE_INFO
-private static final UnassignProcedure[] UNASSIGNED_PROCEDURE_FOR_TYPE_INFO
+private static final UnassignProcedure[] UNASSIGNED_PROCEDURE_FOR_TYPE_INFO
 
 
 
@@ -1295,7 +1295,7 @@ implements 
 
 pendingAssignQueue
-private final http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayList 
pendingAssignQueue
+private final http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayList 
pendingAssignQueue
 
 
 
@@ -1304,7 +1304,7 @@ implements 
 
 assignQueueLock
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
 
 
 
@@ -1313,7 +1313,7 @@ implements 
 
 assignQueueFullCond
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">Condition assignQueueFullCond
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">Condition assignQueueFullCond
 
 
 
@@ -1741,7 +1741,7 @@ implements 
 
 getCarryingSystemTables
-private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getCarryingSystemTables(ServerName serverName)
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getCarryingSystemTables(ServerName serverName)
 
 
 
@@ -1750,7 +1750,7 @@ implements 
 
 assign
-public void assign(HRegionInfo regionInfo)
+public void assign(HRegionInfo regionInfo)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1764,7 +1764,7 @@ implements 
 
 assign
-public void assign(HRegionInfo regionInfo,
+public void assign(HRegionInfo regionInfo,
boolean forceNewPlan)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -1779,7 +1779,7 @@ implements 
 
 unassign
-public void unassign(HRegionInfo regionInfo)
+public void unassign(HRegionInfo regionInfo)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1793,7 +1793,7 @@ implements 
 
 unassign
-public void unassign(HRegionInfo regionInfo,
+public void unassign(HRegionInfo regionInfo,
  boolean forceNewPlan)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -1808,7 +1808,7 @@ implements 
 
 moveAsync
-public http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in 
java.util.concurrent">Future moveAsync(RegionPlan regionPlan)
+public http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in 
java.util.concurrent">Future moveAsync(RegionPlan regionPlan)
 
 
 
@@ -1817,7 +1817,7 @@ implements 
 
 waitForAssignment
-public boolean waitForAssignment(HRegionInfo regionInfo)
+public boolean waitForAssignment(HRegionInfo regionInfo)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1831,7 +1831,7 @@ implements 
 
 waitForAssignment
-public boolean waitForAssignment(HRegionInfo regionInfo,
+public boolean waitForAssignment(HRegionInfo regionInfo,
  long timeout)

[26/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index 6d380af..883a87d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -488,1358 +488,1367 @@
 480synchronized 
(checkIfShouldMoveSystemRegionLock) {
 481  List plans = 
new ArrayList<>();
 482  for (ServerName server : 
getExcludedServersForSystemTable()) {
-483List 
regionsShouldMove = getCarryingSystemTables(server);
-484if 
(!regionsShouldMove.isEmpty()) {
-485  for (HRegionInfo regionInfo 
: regionsShouldMove) {
-486// null value for dest 
forces destination server to be selected by balancer
-487RegionPlan plan = new 
RegionPlan(regionInfo, server, null);
-488if 
(regionInfo.isMetaRegion()) {
-489  // Must move meta 
region first.
-490  moveAsync(plan);
-491} else {
-492  plans.add(plan);
-493}
-494  }
-495}
-496for (RegionPlan plan : plans) 
{
-497  moveAsync(plan);
-498}
-499  }
-500}
-501  } catch (Throwable t) {
-502LOG.error(t);
-503  }
-504}).start();
-505  }
-506
-507  private List 
getCarryingSystemTables(ServerName serverName) {
-508Set regions = 
this.getRegionStates().getServerNode(serverName).getRegions();
-509if (regions == null) {
-510  return new ArrayList<>();
-511}
-512return regions.stream()
-513
.map(RegionStateNode::getRegionInfo)
-514
.filter(HRegionInfo::isSystemTable)
-515.collect(Collectors.toList());
-516  }
-517
-518  public void assign(final HRegionInfo 
regionInfo) throws IOException {
-519assign(regionInfo, true);
-520  }
-521
-522  public void assign(final HRegionInfo 
regionInfo, final boolean forceNewPlan) throws IOException {
-523AssignProcedure proc = 
createAssignProcedure(regionInfo, forceNewPlan);
-524
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
+483if 
(master.getServerManager().isServerDead(server)) {
+484  // TODO: See HBASE-18494 
and HBASE-18495. Though getExcludedServersForSystemTable()
+485  // considers only online 
servers, the server could be queued for dead server
+486  // processing. As region 
assignments for crashed server is handled by
+487  // ServerCrashProcedure, do 
NOT handle them here. The goal is to handle this through
+488  // regular flow of 
LoadBalancer as a favored node and not to have this special
+489  // handling.
+490  continue;
+491}
+492List 
regionsShouldMove = getCarryingSystemTables(server);
+493if 
(!regionsShouldMove.isEmpty()) {
+494  for (HRegionInfo regionInfo 
: regionsShouldMove) {
+495// null value for dest 
forces destination server to be selected by balancer
+496RegionPlan plan = new 
RegionPlan(regionInfo, server, null);
+497if 
(regionInfo.isMetaRegion()) {
+498  // Must move meta 
region first.
+499  moveAsync(plan);
+500} else {
+501  plans.add(plan);
+502}
+503  }
+504}
+505for (RegionPlan plan : plans) 
{
+506  moveAsync(plan);
+507}
+508  }
+509}
+510  } catch (Throwable t) {
+511LOG.error(t);
+512  }
+513}).start();
+514  }
+515
+516  private List 
getCarryingSystemTables(ServerName serverName) {
+517Set regions = 
this.getRegionStates().getServerNode(serverName).getRegions();
+518if (regions == null) {
+519  return new ArrayList<>();
+520}
+521return regions.stream()
+522
.map(RegionStateNode::getRegionInfo)
+523
.filter(HRegionInfo::isSystemTable)
+524.collect(Collectors.toList());
 525  }
 526
-527  public void unassign(final HRegionInfo 
regionInfo) throws IOException {
-528unassign(regionInfo, false);
+527  public void assign(final HRegionInfo 
regionInfo) throws IOException {
+528assign(regionInfo, true);
 529  }
 530
-531  public void unassign(final HRegionInfo 
regi

[38/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
index 5bcefd1..5704d34 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":9,"i28":9};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public abstract class BaseLoadBalancer
+public abstract class BaseLoadBalancer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements LoadBalancer
 The base class for load balancers. It provides the the 
functions used to by
@@ -177,33 +177,33 @@ implements config 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[]
-DEFAULT_TABLES_ON_MASTER 
-
-
 private static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 EMPTY_REGION_LIST 
 
-
+
 (package private) static http://docs.oracle.com/javase/8/docs/api/java/util/function/Predicate.html?is-external=true";
 title="class or interface in java.util.function">Predicate
 IDLE_SERVER_PREDICATOR 
 
-
+
 private static 
org.apache.commons.logging.Log
 LOG 
 
-
+
 protected ServerName
 masterServerName 
 
-
+
 protected MetricsBalancer
 metricsBalancer 
 
-
+
 protected static int
 MIN_SERVER_BALANCE 
 
+
+protected boolean
+onlySystemTablesOnMaster 
+
 
 protected float
 overallSlop 
@@ -233,11 +233,7 @@ implements stopped 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
-TABLES_ON_MASTER 
-
-
-protected http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetString>
+protected boolean
 tablesOnMaster 
 
 
@@ -246,7 +242,7 @@ implements LoadBalancer
-BOGUS_SERVER_NAME
+BOGUS_SERVER_NAME,
 SYSTEM_TABLES_ON_MASTER,
 TABLES_ON_MASTER
 
 
 
@@ -285,7 +281,7 @@ implements 
-All Methods Static Methods Instance Methods Concrete Methods 
+All Methods Instance Methods Concrete Methods 
 
 Modifier and Type
 Method and Description
@@ -300,9 +296,10 @@ implements 
 protected http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>
-assignMasterRegions(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection regions,
-   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers)
-Assign the regions that should be on master 
regionserver.
+assignMasterSystemRegions(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection regions,
+ http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers)
+If master is configured to carry system tables only, in 
here is
+ where we figure what to assign it.
 
 
 
@@ -330,34 +327,27 @@ implements getRegionAssignmentsByServer(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection regions) 
 
 
-protected stat

hbase-site git commit: INFRA-10751 Empty commit

2017-08-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 33bc9e06b -> f6c74dcd5


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/f6c74dcd
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/f6c74dcd
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/f6c74dcd

Branch: refs/heads/asf-site
Commit: f6c74dcd508432f8d7a893bb93cc54f2cb48ff9c
Parents: 33bc9e0
Author: jenkins 
Authored: Thu Aug 17 15:07:15 2017 +
Committer: jenkins 
Committed: Thu Aug 17 15:07:15 2017 +

--

--




[28/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
index 9112346..dc4506d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
@@ -43,395 +43,395 @@
 035import 
java.util.concurrent.ConcurrentNavigableMap;
 036import 
java.util.concurrent.ConcurrentSkipListMap;
 037import 
java.util.concurrent.CopyOnWriteArrayList;
-038import java.util.function.Predicate;
-039
-040import org.apache.commons.logging.Log;
-041import 
org.apache.commons.logging.LogFactory;
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.hbase.ClockOutOfSyncException;
-044import 
org.apache.hadoop.hbase.HConstants;
-045import 
org.apache.hadoop.hbase.HRegionInfo;
-046import 
org.apache.hadoop.hbase.NotServingRegionException;
-047import 
org.apache.hadoop.hbase.RegionLoad;
-048import 
org.apache.hadoop.hbase.ServerLoad;
-049import 
org.apache.hadoop.hbase.ServerName;
-050import 
org.apache.hadoop.hbase.YouAreDeadException;
-051import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-053import 
org.apache.hadoop.hbase.client.ClusterConnection;
-054import 
org.apache.hadoop.hbase.client.RetriesExhaustedException;
-055import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-058import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-059import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-068import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-069import 
org.apache.zookeeper.KeeperException;
-070
-071import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-072
-073/**
-074 * The ServerManager class manages info 
about region servers.
-075 * 

-076 * Maintains lists of online and dead servers. Processes the startups, -077 * shutdowns, and deaths of region servers. -078 *

-079 * Servers are distinguished in two different ways. A given server has a -080 * location, specified by hostname and port, and of which there can only be one -081 * online at any given time. A server instance is specified by the location -082 * (hostname and port) as well as the startcode (timestamp from when the server -083 * was started). This is used to differentiate a restarted instance of a given -084 * server from the original instance. -085 *

-086 * If a sever is known not to be running any more, it is called dead. The dead -087 * server needs to be handled by a ServerShutdownHandler. If the handler is not -088 * enabled yet, the server can't be handled right away so it is queued up. -089 * After the handler is enabled, the server will be submitted to a handler to handle. -090 * However, the handler may be just partially enabled. If so, -091 * the server cannot be fully processed, and be queued up for further processing. -092 * A server is fully processed only after the handler is fully enabled -093 * and has completed the handling. -094 */ -095@InterfaceAudience.Private -096public class ServerManager { -097 public static final String WAIT_ON_REGIONSERVERS_MAXTOSTART = -098 "hbase.master.wait.on.regionservers.maxtostart"; -099 -100 public static final String WAIT_ON_REGIONSERVERS_MINTOSTART = -101 "hbase.master.wait.on.regionservers.mintostart"; -102 -103 public static final String WAIT_ON_REGIONSERVERS_TIMEOUT = -104 "hbase.master.wait.on.regionservers.timeout"; -105 -106 public static final String WAIT_ON_REGIONSERVERS_INTERVAL = -107 "hbase.master.wait.on.regionservers.interval"; -108 -109 private static final Log LOG = LogFactory.getLog(ServerManager.class); -110 -111 // Set if we are to shutdown the cluster. -112 private volatile boolean clusterShutdown = false; -113 -114 /** -115 * The last flushed sequence id for a region. -116 */ -117 private final C


[24/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
index d9c59a0..13f64df 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
@@ -69,1532 +69,1492 @@
 061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
 062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-064
-065/**
-066 * The base class for load balancers. It 
provides the the functions used to by
-067 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-068 * in the edge cases. It doesn't provide 
an implementation of the
-069 * actual balancing algorithm.
-070 *
-071 */
-072public abstract class BaseLoadBalancer 
implements LoadBalancer {
-073  protected static final int 
MIN_SERVER_BALANCE = 2;
-074  private volatile boolean stopped = 
false;
-075
-076  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
-077
-078  static final 
Predicate IDLE_SERVER_PREDICATOR
-079= load -> 
load.getNumberOfRegions() == 0;
-080
-081  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-082
-083  private static class DefaultRackManager 
extends RackManager {
-084@Override
-085public String getRack(ServerName 
server) {
-086  return UNKNOWN_RACK;
-087}
-088  }
-089
-090  /**
-091   * The constructor that uses the basic 
MetricsBalancer
-092   */
-093  protected BaseLoadBalancer() {
-094metricsBalancer = new 
MetricsBalancer();
-095  }
-096
-097  /**
-098   * This Constructor accepts an instance 
of MetricsBalancer,
-099   * which will be used instead of 
creating a new one
-100   */
-101  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-102this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-103  }
-104
-105  /**
-106   * An efficient array based 
implementation similar to ClusterState for keeping
-107   * the status of the cluster in terms 
of region assignment and distribution.
-108   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-109   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-110   * class uses mostly indexes and 
arrays.
-111   *
-112   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-113   * topology in terms of server names, 
hostnames and racks.
-114   */
-115  protected static class Cluster {
-116ServerName[] servers;
-117String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-118String[] racks;
-119boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-120
-121ArrayList tables;
-122HRegionInfo[] regions;
-123Deque[] 
regionLoads;
-124private RegionLocationFinder 
regionFinder;
-125
-126int[][] regionLocations; 
//regionIndex -> list of serverIndex sorted by locality
-127
-128int[]   serverIndexToHostIndex;  
//serverIndex -> host index
-129int[]   serverIndexToRackIndex;  
//serverIndex -> rack index
-130
-131int[][] regionsPerServer;
//serverIndex -> region list
-132int[][] regionsPerHost;  
//hostIndex -> list of regions
-133int[][] regionsPerRack;  
//rackIndex -> region list
-134int[][] primariesOfRegionsPerServer; 
//serverIndex -> sorted list of regions by primary region index
-135int[][] primariesOfRegionsPerHost;   
//hostIndex -> sorted list of regions by primary region index
-136int[][] primariesOfRegionsPerRack;   
//rackIndex -> sorted list of regions by primary region index
-137
-138int[][] serversPerHost;  
//hostIndex -> list of server indexes
-139int[][] serversPerRack;  
//rackIndex -> list of server indexes
-140int[]   regionIndexToServerIndex;
//regionIndex -> serverIndex
-141int[]   
initialRegionIndexToServerIndex;//regionIndex -> serverIndex (initial 
cluster state)
-142int[]   regionIndexToTableIndex; 
//regionIndex -> tableIndex
-143int[][] numRegionsPerServerPerTable; 
//serverIndex -> tableIndex -> # regions
-144int[]   numMaxRegionsPerTable;   
//tableIndex -> max number of regions in a single RS
-145int[]   regionIndexToPrimaryIndex;  

[01/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 6fa5abd53 -> 33bc9e06b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/testdevapidocs/org/apache/hadoop/hbase/fs/TestBlockReorder.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/fs/TestBlockReorder.html 
b/testdevapidocs/org/apache/hadoop/hbase/fs/TestBlockReorder.html
index 42b105d..3c03714 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/fs/TestBlockReorder.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/fs/TestBlockReorder.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestBlockReorder
+public class TestBlockReorder
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Tests for the hdfs fix from HBASE-6435.
 
@@ -269,7 +269,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -278,7 +278,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 conf
-private org.apache.hadoop.conf.Configuration conf
+private org.apache.hadoop.conf.Configuration conf
 
 
 
@@ -287,7 +287,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 cluster
-private org.apache.hadoop.hdfs.MiniDFSCluster cluster
+private org.apache.hadoop.hdfs.MiniDFSCluster cluster
 
 
 
@@ -296,7 +296,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 htu
-private HBaseTestingUtility htu
+private HBaseTestingUtility htu
 
 
 
@@ -305,7 +305,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 dfs
-private org.apache.hadoop.hdfs.DistributedFileSystem dfs
+private org.apache.hadoop.hdfs.DistributedFileSystem dfs
 
 
 
@@ -314,7 +314,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 host1
-private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String host1
+private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String host1
 
 See Also:
 Constant
 Field Values
@@ -327,7 +327,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 host2
-private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String host2
+private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String host2
 
 See Also:
 Constant
 Field Values
@@ -340,7 +340,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 host3
-private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String host3
+private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String host3
 
 See Also:
 Constant
 Field Values
@@ -353,7 +353,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-public org.junit.rules.TestName name
+public org.junit.rules.TestName name
 
 
 
@@ -370,7 +370,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TestBlockReorder
-public TestBlockReorder()
+public TestBlockReorder()
 
 
 
@@ -387,7 +387,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setUp
-public void setUp()
+public void setUp()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -401,7 +401,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tearDownAfterClass
-public void tearDownAfterClass()
+public void tearDownAfterClass()
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -415,7 +415,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testBlockLocationReorder
-public void testBlockLocationReorder()
+public void testBlockLocationReorder()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 Test that we're can add a hook, and that this hook works 
when we try to read the file in HDFS.
 
@@ -430,7 +430,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getHostName
-private http://docs.oracle.com/javase/8/docs/api/java/la

[35/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 2227828..46ecaca 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
-public class HRegionServer
+public class HRegionServer
 extends HasThread
 implements RegionServerServices, LastSequenceId, 
ConfigurationObserver
 HRegionServer makes a set of HRegions available to clients. 
It checks in with
@@ -1534,7 +1534,7 @@ implements 
 
 INIT_PAUSE_TIME_MS
-private static final int INIT_PAUSE_TIME_MS
+private static final int INIT_PAUSE_TIME_MS
 
 See Also:
 Constant
 Field Values
@@ -1547,7 +1547,7 @@ implements 
 
 REGION_LOCK_AWAIT_TIME_SEC
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String REGION_LOCK_AWAIT_TIME_SEC
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String REGION_LOCK_AWAIT_TIME_SEC
 
 See Also:
 Constant
 Field Values
@@ -1560,7 +1560,7 @@ implements 
 
 DEFAULT_REGION_LOCK_AWAIT_TIME_SEC
-public static final int DEFAULT_REGION_LOCK_AWAIT_TIME_SEC
+public static final int DEFAULT_REGION_LOCK_AWAIT_TIME_SEC
 
 See Also:
 Constant
 Field Values
@@ -1573,7 +1573,7 @@ implements 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -1582,7 +1582,7 @@ implements 
 
 TEST_SKIP_REPORTING_TRANSITION
-public static boolean TEST_SKIP_REPORTING_TRANSITION
+public static boolean TEST_SKIP_REPORTING_TRANSITION
 For testing only!  Set to true to skip notifying region 
assignment to master .
 
 
@@ -1592,7 +1592,7 @@ implements 
 
 OPEN
-protected static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String OPEN
+protected static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String OPEN
 
 See Also:
 Constant
 Field Values
@@ -1605,7 +1605,7 @@ implements 
 
 CLOSE
-protected static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String CLOSE
+protected static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String CLOSE
 
 See Also:
 Constant
 Field Values
@@ -1618,7 +1618,7 @@ implements 
 
 regionsInTransitionInRS
-protected final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentMapBoolean> regionsInTransitionInRS
+protected final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentMapBoolean> regionsInTransitionInRS
 
 
 
@@ -1627,7 +1627,7 @@ implements 
 
 cacheFlusher
-protected MemStoreFlusher cacheFlusher
+protected MemStoreFlusher cacheFlusher
 
 
 
@@ -1636,7 +1636,7 @@ implements 
 
 hMemManager
-protected HeapMemoryManager hMemManager
+protected HeapMemoryManager hMemManager
 
 
 
@@ -1645,7 +1645,7 @@ implements 
 
 initLatch
-protected http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true";
 title="class or interface in java.util.concurrent">CountDownLatch initLatch
+protected http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true";
 title="class or interface in java.util.concurrent">CountDownLatch initLatch
 
 
 
@@ -1654,7 +1654,7 @@ implements 
 
 clusterConnection
-protected ClusterConnection clusterConnection
+protected ClusterConnection clusterConnection
 Cluster connection to be shared by services.
  Initialized at server startup and closed when server shuts down.
  Clients must never close it explicitly.
@@ -1666,7 +1666,7 @@ implements 
 
 metaTableLocator
-protected MetaTableLocator metaTableLocator
+protected MetaTableLocator metaTableLocator
 
 
 
@@ -1675,7 +1675,7 @@ implements 
 
 recoveringRegionWatcher
-private RecoveringRegionWatcher 
recoveringRegionWatcher
+p

[47/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
index 6e29bdd..ec4f659 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
@@ -93,7 +93,7 @@ var activeTableTab = "activeTableTab";
 
 
 org.apache.hadoop.hbase.client
-Interface 
AsyncAdminBuilder
+Interface 
AsyncAdminBuilder
 
 
 
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public interface AsyncAdminBuilder
+public interface AsyncAdminBuilder
 For creating AsyncAdmin. The implementation 
should have default configurations set before
  returning the builder to user. So users are free to only set the configs they 
care about to
  create a new AsyncAdmin instance.
@@ -129,46 +129,46 @@ public interface Method and Description
 
 
-T
+AsyncAdmin
 build()
 Create a AsyncAdmin 
instance.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 setMaxAttempts(int maxAttempts)
 Set the max attempt times for an admin operation.
 
 
 
-default AsyncAdminBuilder
+default AsyncAdminBuilder
 setMaxRetries(int maxRetries)
 Set the max retry times for an admin operation.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 setOperationTimeout(long timeout,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set timeout for a whole admin operation.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 setRetryPause(long timeout,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set the base pause time for retrying.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 setRpcTimeout(long timeout,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set timeout for each rpc request.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 setStartLogErrorsCnt(int startLogErrorsCnt)
 Set the number of retries that are allowed before we start 
to log.
 
@@ -194,8 +194,8 @@ public interface 
 
 setOperationTimeout
-AsyncAdminBuilder setOperationTimeout(long timeout,
- http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
+AsyncAdminBuilder setOperationTimeout(long timeout,
+  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set timeout for a whole admin operation. Operation timeout 
and max attempt times(or max retry
  times) are both limitations for retrying, we will stop retrying when we reach 
any of the
  limitations.
@@ -214,8 +214,8 @@ public interface 
 
 setRpcTimeout
-AsyncAdminBuilder setRpcTimeout(long timeout,
-   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
+AsyncAdminBuilder setRpcTimeout(long timeout,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set timeout for each rpc request.
 
 Parameters:
@@ -232,8 +232,8 @@ public interface 
 
 setRetryPause
-AsyncAdminBuilder setRetryPause(long timeout,
-   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
+AsyncAdminBuilder setRetryPause(long timeout,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set the base pause time for retrying. We use an exponential 
policy to generate sleep time when
  retrying.
 
@@ -251,7 +251,7 @@ public interface 
 
 setMaxRetries
-default AsyncAdminBuilder setMaxRetries(int maxRetries)
+default AsyncAdminBuilder setMaxRetries(int maxRetries)
 Set the max retry times for an admin operation. Usually it 
is the max attempt times minus 1.
  Operation timeout and max attempt times(or max retry times) are both 
limitations for retrying,
  we will stop retrying when we reach any of t

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
index b5f351a..de70e5b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
@@ -51,700 +51,703 @@
 043import 
org.apache.hadoop.hbase.favored.FavoredNodesPlan;
 044import 
org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position;
 045import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-046import 
org.apache.hadoop.hbase.master.MasterServices;
-047import 
org.apache.hadoop.hbase.master.RegionPlan;
-048import 
org.apache.hadoop.hbase.util.Pair;
-049
-050import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-051import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-052import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-053
-054/**
-055 * An implementation of the {@link 
org.apache.hadoop.hbase.master.LoadBalancer} that
-056 * assigns favored nodes for each region. 
There is a Primary RegionServer that hosts
-057 * the region, and then there is 
Secondary and Tertiary RegionServers. Currently, the
-058 * favored nodes information is used in 
creating HDFS files - the Primary RegionServer
-059 * passes the primary, secondary, 
tertiary node addresses as hints to the
-060 * DistributedFileSystem API for creating 
files on the filesystem. These nodes are
-061 * treated as hints by the HDFS to place 
the blocks of the file. This alleviates the
-062 * problem to do with reading from remote 
nodes (since we can make the Secondary
-063 * RegionServer as the new Primary 
RegionServer) after a region is recovered. This
-064 * should help provide consistent read 
latencies for the regions even when their
-065 * primary region servers die. This 
provides two
-066 * {@link 
org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.CandidateGenerator}
-067 *
-068 */
-069public class FavoredStochasticBalancer 
extends StochasticLoadBalancer implements
-070FavoredNodesPromoter {
-071
-072  private static final Log LOG = 
LogFactory.getLog(FavoredStochasticBalancer.class);
-073  private FavoredNodesManager fnm;
-074
-075  @Override
-076  public void initialize() throws 
HBaseIOException {
-077configureGenerators();
-078super.initialize();
-079  }
-080
-081  protected void configureGenerators() 
{
-082List 
fnPickers = new ArrayList<>(2);
-083fnPickers.add(new 
FavoredNodeLoadPicker());
-084fnPickers.add(new 
FavoredNodeLocalityPicker());
-085setCandidateGenerators(fnPickers);
-086  }
-087
-088  @Override
-089  public void 
setMasterServices(MasterServices masterServices) {
-090
super.setMasterServices(masterServices);
-091fnm = 
masterServices.getFavoredNodesManager();
-092  }
-093
-094  /*
-095   * Round robin assignment: Segregate 
the regions into two types:
-096   *
-097   * 1. The regions that have favored 
node assignment where at least one of the favored node
-098   * is still alive. In this case, try to 
adhere to the current favored nodes assignment as
-099   * much as possible - i.e., if the 
current primary is gone, then make the secondary or
-100   * tertiary as the new host for the 
region (based on their current load). Note that we don't
-101   * change the favored node assignments 
here (even though one or more favored node is
-102   * currently down). That will be done 
by the admin operations.
-103   *
-104   * 2. The regions that currently don't 
have favored node assignments. Generate favored nodes
-105   * for them and then assign. Generate 
the primary fn in round robin fashion and generate
-106   * secondary and tertiary as per 
favored nodes constraints.
-107   */
-108  @Override
-109  public Map> roundRobinAssignment(List 
regions,
-110  List servers) 
throws HBaseIOException {
-111
-112
metricsBalancer.incrMiscInvocations();
-113
-114Set regionSet = 
Sets.newHashSet(regions);
-115Map> assignmentMap = assignMasterRegions(regions, 
servers);
-116if (assignmentMap != null && 
!assignmentMap.isEmpty()) {
-117  servers = new 
ArrayList<>(servers);
-118  // Guarantee not to put other 
regions on master
-119  servers.remove(masterServerName);
-120  List 
masterRegions = assignmentMap.get(masterServerName);
-121  if (!masterRegions.isEmpty())

[10/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index 3d1c07d..61c9b54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -124,3648 +124,3649 @@
 116import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 117import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
 118import 
org.apache.hadoop.hbase.master.HMaster;
-119import 
org.apache.hadoop.hbase.master.RegionState.State;
-120import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-121import 
org.apache.hadoop.hbase.mob.MobCacheConfig;
-122import 
org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost;
-123import 
org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore;
-124import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-125import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
-126import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-127import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
-128import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-129import 
org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
-130import 
org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
-131import 
org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler;
-132import 
org.apache.hadoop.hbase.regionserver.throttle.FlushThroughputControllerFactory;
-133import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-134import 
org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
-135import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-136import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-137import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad;
-138import 
org.apache.hadoop.hbase.security.Superusers;
-139import 
org.apache.hadoop.hbase.security.User;
-140import 
org.apache.hadoop.hbase.security.UserProvider;
-141import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannel;
-142import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-143import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-144import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-145import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor.Builder;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
-159import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-160import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-161import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
-162import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.genera

[33/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
index 8004358..e068c92 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
@@ -188,212 +188,193 @@
 180" doesn't match the original one 
" +  Bytes.toStringBinary(this.row));
 181}
 182byte [] family = 
CellUtil.cloneFamily(kv);
-183List list = 
familyMap.get(family);
-184if (list == null) {
-185  list = new ArrayList<>(1);
-186  familyMap.put(family, list);
-187}
-188list.add(kv);
-189return this;
-190  }
-191
-192  /**
-193   * Delete all versions of all columns 
of the specified family.
-194   * 

-195 * Overrides previous calls to deleteColumn and deleteColumns for the -196 * specified family. -197 * @param family family name -198 * @return this for invocation chaining -199 */ -200 public Delete addFamily(final byte [] family) { -201this.addFamily(family, this.ts); -202return this; -203 } -204 -205 /** -206 * Delete all columns of the specified family with a timestamp less than -207 * or equal to the specified timestamp. -208 *

-209 * Overrides previous calls to deleteColumn and deleteColumns for the -210 * specified family. -211 * @param family family name -212 * @param timestamp maximum version timestamp -213 * @return this for invocation chaining -214 */ -215 public Delete addFamily(final byte [] family, final long timestamp) { -216if (timestamp < 0) { -217 throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); +183List list = getCellList(family); +184list.add(kv); +185return this; +186 } +187 +188 /** +189 * Delete all versions of all columns of the specified family. +190 *

+191 * Overrides previous calls to deleteColumn and deleteColumns for the +192 * specified family. +193 * @param family family name +194 * @return this for invocation chaining +195 */ +196 public Delete addFamily(final byte [] family) { +197this.addFamily(family, this.ts); +198return this; +199 } +200 +201 /** +202 * Delete all columns of the specified family with a timestamp less than +203 * or equal to the specified timestamp. +204 *

+205 * Overrides previous calls to deleteColumn and deleteColumns for the +206 * specified family. +207 * @param family family name +208 * @param timestamp maximum version timestamp +209 * @return this for invocation chaining +210 */ +211 public Delete addFamily(final byte [] family, final long timestamp) { +212if (timestamp < 0) { +213 throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); +214} +215List list = getCellList(family); +216if(!list.isEmpty()) { +217 list.clear(); 218} -219List list = familyMap.get(family); -220if(list == null) { -221 list = new ArrayList<>(1); -222 familyMap.put(family, list); -223} else if(!list.isEmpty()) { -224 list.clear(); -225} -226KeyValue kv = new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamily); -227list.add(kv); -228return this; -229 } -230 -231 /** -232 * Delete all columns of the specified family with a timestamp equal to -233 * the specified timestamp. -234 * @param family family name -235 * @param timestamp version timestamp -236 * @return this for invocation chaining -237 */ -238 public Delete addFamilyVersion(final byte [] family, final long timestamp) { -239List list = familyMap.get(family); -240if(list == null) { -241 list = new ArrayList<>(1); -242 familyMap.put(family, list); -243} -244list.add(new KeyValue(row, family, null, timestamp, -245 KeyValue.Type.DeleteFamilyVersion)); +219KeyValue kv = new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamily); +220list.add(kv); +221return this; +222 } +223 +224 /** +225 * Delete all columns of the specified family with a timestamp equal to +226 * the specified timestamp. +227 * @param family family name +228 * @param timestamp version timestamp +229 * @return this for invocation chaining +230 */ +231 public Delete addFamilyVersion(final byte [] family, final long timestamp) { +232List list = getCellList(family); +233list.add(new KeyValue(row, family, null, timestamp, +234 KeyValue.Type.DeleteFamilyVersion)); +235return this; +236 } +237 +238 /** +239 * Delete all versions of the specified column. +240 * @param family family name +241 * @param qualifier column qualifier +242 *


[03/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
index 79f5bcc..a183fb8 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestAdmin1
+public class TestAdmin1
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Class to test HBaseAdmin.
  Spins up the minicluster once at test start and then takes it down afterward.
@@ -360,7 +360,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -369,7 +369,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private static final HBaseTestingUtility TEST_UTIL
+private static final HBaseTestingUtility TEST_UTIL
 
 
 
@@ -378,7 +378,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 admin
-private org.apache.hadoop.hbase.client.Admin admin
+private org.apache.hadoop.hbase.client.Admin admin
 
 
 
@@ -387,7 +387,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-public org.junit.rules.TestName name
+public org.junit.rules.TestName name
 
 
 
@@ -404,7 +404,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TestAdmin1
-public TestAdmin1()
+public TestAdmin1()
 
 
 
@@ -421,7 +421,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setUpBeforeClass
-public static void setUpBeforeClass()
+public static void setUpBeforeClass()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -435,7 +435,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tearDownAfterClass
-public static void tearDownAfterClass()
+public static void tearDownAfterClass()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -449,7 +449,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setUp
-public void setUp()
+public void setUp()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -463,7 +463,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tearDown
-public void tearDown()
+public void tearDown()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -477,7 +477,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testSplitFlushCompactUnknownTable
-public void testSplitFlushCompactUnknownTable()
+public void testSplitFlushCompactUnknownTable()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true";
 title="class or interface in java.lang">InterruptedException
 
 Throws:
@@ -491,7 +491,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testDeleteEditUnknownColumnFamilyAndOrTable
-public void testDeleteEditUnknownColumnFamilyAndOrTable()
+public void testDeleteEditUnknownColumnFamilyAndOrTable()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -505,7 +505,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testDisableAndEnableTable
-public void testDisableAndEnableTable()
+public void testDisableAndEnableTable()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -519,7 +519,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getStateFromMeta
-private org.apache.hadoop.hbase.client.TableState.State getStateFromMeta(org.apache.hadoop.hbase.TableName table)
+private org.apache.hadoop.hbase.client.TableState.State getStateFromMeta(org.apache.hadoop.hbase.TableName table)
   throws http://docs.oracle.com/javase/8/d

[32/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index ce35234..f8432f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -538,2952 +538,2964 @@
 530}
 531  }
 532
-533  // return the actual infoPort, -1 means 
disable info server.
-534  private int putUpJettyServer() throws 
IOException {
-535if 
(!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
-536  return -1;
-537}
-538final int infoPort = 
conf.getInt("hbase.master.info.port.orig",
-539  
HConstants.DEFAULT_MASTER_INFOPORT);
-540// -1 is for disabling info server, 
so no redirecting
-541if (infoPort < 0 || infoServer == 
null) {
-542  return -1;
-543}
-544if(infoPort == infoServer.getPort()) 
{
-545  return infoPort;
-546}
-547final String addr = 
conf.get("hbase.master.info.bindAddress", "0.0.0.0");
-548if 
(!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
-549  String msg =
-550  "Failed to start redirecting 
jetty server. Address " + addr
-551  + " does not belong to this 
host. Correct configuration parameter: "
-552  + 
"hbase.master.info.bindAddress";
-553  LOG.error(msg);
-554  throw new IOException(msg);
-555}
-556
-557// TODO I'm pretty sure we could just 
add another binding to the InfoServer run by
-558// the RegionServer and have it run 
the RedirectServlet instead of standing up
-559// a second entire stack here.
-560masterJettyServer = new Server();
-561final ServerConnector connector = new 
ServerConnector(masterJettyServer);
-562connector.setHost(addr);
-563connector.setPort(infoPort);
-564
masterJettyServer.addConnector(connector);
-565
masterJettyServer.setStopAtShutdown(true);
-566
-567final String redirectHostname = 
shouldUseThisHostnameInstead() ? useThisHostnameInstead : null;
-568
-569final RedirectServlet redirect = new 
RedirectServlet(infoServer, redirectHostname);
-570final WebAppContext context = new 
WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);
-571context.addServlet(new 
ServletHolder(redirect), "/*");
-572
context.setServer(masterJettyServer);
-573
-574try {
-575  masterJettyServer.start();
-576} catch (Exception e) {
-577  throw new IOException("Failed to 
start redirecting jetty server", e);
-578}
-579return connector.getLocalPort();
-580  }
-581
-582  @Override
-583  protected TableDescriptors 
getFsTableDescriptors() throws IOException {
-584return 
super.getFsTableDescriptors();
-585  }
-586
-587  /**
-588   * For compatibility, if failed with 
regionserver credentials, try the master one
-589   */
-590  @Override
-591  protected void login(UserProvider user, 
String host) throws IOException {
-592try {
-593  super.login(user, host);
-594} catch (IOException ie) {
-595  
user.login("hbase.master.keytab.file",
-596
"hbase.master.kerberos.principal", host);
-597}
-598  }
-599
-600  /**
-601   * If configured to put regions on 
active master,
-602   * wait till a backup master becomes 
active.
-603   * Otherwise, loop till the server is 
stopped or aborted.
-604   */
-605  @Override
-606  protected void waitForMasterActive(){
-607boolean tablesOnMaster = 
BaseLoadBalancer.tablesOnMaster(conf);
-608while (!(tablesOnMaster && 
activeMaster)
-609&& !isStopped() 
&& !isAborted()) {
-610  sleeper.sleep();
-611}
-612  }
-613
-614  @VisibleForTesting
-615  public MasterRpcServices 
getMasterRpcServices() {
-616return 
(MasterRpcServices)rpcServices;
-617  }
-618
-619  public boolean balanceSwitch(final 
boolean b) throws IOException {
-620return 
getMasterRpcServices().switchBalancer(b, BalanceSwitchMode.ASYNC);
-621  }
-622
-623  @Override
-624  protected String getProcessName() {
-625return MASTER;
-626  }
-627
-628  @Override
-629  protected boolean canCreateBaseZNode() 
{
-630return true;
+533  // Main run loop. Calls through to the 
regionserver run loop.
+534  @Override
+535  public void run() {
+536try {
+537  super.run();
+538} finally {
+539  // If on way out, then we are no 
longer active master.
+540  this.activeMaster = false;
+541}
+542  }
+543
+544  // return the actual infoPort, -1 means 
disable info server.
+545  private int putUpJettyServer() throws 
IOException {
+546if 
(!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
+547  return -1;
+548}
+549

[27/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
index 6d380af..883a87d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
@@ -488,1358 +488,1367 @@
 480synchronized 
(checkIfShouldMoveSystemRegionLock) {
 481  List plans = 
new ArrayList<>();
 482  for (ServerName server : 
getExcludedServersForSystemTable()) {
-483List 
regionsShouldMove = getCarryingSystemTables(server);
-484if 
(!regionsShouldMove.isEmpty()) {
-485  for (HRegionInfo regionInfo 
: regionsShouldMove) {
-486// null value for dest 
forces destination server to be selected by balancer
-487RegionPlan plan = new 
RegionPlan(regionInfo, server, null);
-488if 
(regionInfo.isMetaRegion()) {
-489  // Must move meta 
region first.
-490  moveAsync(plan);
-491} else {
-492  plans.add(plan);
-493}
-494  }
-495}
-496for (RegionPlan plan : plans) 
{
-497  moveAsync(plan);
-498}
-499  }
-500}
-501  } catch (Throwable t) {
-502LOG.error(t);
-503  }
-504}).start();
-505  }
-506
-507  private List 
getCarryingSystemTables(ServerName serverName) {
-508Set regions = 
this.getRegionStates().getServerNode(serverName).getRegions();
-509if (regions == null) {
-510  return new ArrayList<>();
-511}
-512return regions.stream()
-513
.map(RegionStateNode::getRegionInfo)
-514
.filter(HRegionInfo::isSystemTable)
-515.collect(Collectors.toList());
-516  }
-517
-518  public void assign(final HRegionInfo 
regionInfo) throws IOException {
-519assign(regionInfo, true);
-520  }
-521
-522  public void assign(final HRegionInfo 
regionInfo, final boolean forceNewPlan) throws IOException {
-523AssignProcedure proc = 
createAssignProcedure(regionInfo, forceNewPlan);
-524
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
+483if 
(master.getServerManager().isServerDead(server)) {
+484  // TODO: See HBASE-18494 
and HBASE-18495. Though getExcludedServersForSystemTable()
+485  // considers only online 
servers, the server could be queued for dead server
+486  // processing. As region 
assignments for crashed server is handled by
+487  // ServerCrashProcedure, do 
NOT handle them here. The goal is to handle this through
+488  // regular flow of 
LoadBalancer as a favored node and not to have this special
+489  // handling.
+490  continue;
+491}
+492List 
regionsShouldMove = getCarryingSystemTables(server);
+493if 
(!regionsShouldMove.isEmpty()) {
+494  for (HRegionInfo regionInfo 
: regionsShouldMove) {
+495// null value for dest 
forces destination server to be selected by balancer
+496RegionPlan plan = new 
RegionPlan(regionInfo, server, null);
+497if 
(regionInfo.isMetaRegion()) {
+498  // Must move meta 
region first.
+499  moveAsync(plan);
+500} else {
+501  plans.add(plan);
+502}
+503  }
+504}
+505for (RegionPlan plan : plans) 
{
+506  moveAsync(plan);
+507}
+508  }
+509}
+510  } catch (Throwable t) {
+511LOG.error(t);
+512  }
+513}).start();
+514  }
+515
+516  private List 
getCarryingSystemTables(ServerName serverName) {
+517Set regions = 
this.getRegionStates().getServerNode(serverName).getRegions();
+518if (regions == null) {
+519  return new ArrayList<>();
+520}
+521return regions.stream()
+522
.map(RegionStateNode::getRegionInfo)
+523
.filter(HRegionInfo::isSystemTable)
+524.collect(Collectors.toList());
 525  }
 526
-527  public void unassign(final HRegionInfo 
regionInfo) throws IOException {
-528unassign(regionInfo, false);
+527  public void assign(final HRegionInfo 
regionInfo) throws IOException {
+528assign(regionInfo, true);
 529  }
 530
-531  public void unassign(final HRegionInfo 

[39/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
index d0bd3d1..26de3d5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected static class BaseLoadBalancer.Cluster
+protected static class BaseLoadBalancer.Cluster
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 An efficient array based implementation similar to 
ClusterState for keeping
  the status of the cluster in terms of region assignment and distribution.
@@ -579,7 +579,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 servers
-ServerName[] servers
+ServerName[] servers
 
 
 
@@ -588,7 +588,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 hosts
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] hosts
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] hosts
 
 
 
@@ -597,7 +597,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 racks
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] racks
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] racks
 
 
 
@@ -606,7 +606,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 multiServersPerHost
-boolean multiServersPerHost
+boolean multiServersPerHost
 
 
 
@@ -615,7 +615,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tables
-http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayListString> tables
+http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayListString> tables
 
 
 
@@ -624,7 +624,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regions
-HRegionInfo[] regions
+HRegionInfo[] regions
 
 
 
@@ -633,7 +633,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionLoads
-http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true";
 title="class or interface in java.util">Deque[] regionLoads
+http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true";
 title="class or interface in java.util">Deque[] regionLoads
 
 
 
@@ -642,7 +642,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionFinder
-private RegionLocationFinder regionFinder
+private RegionLocationFinder regionFinder
 
 
 
@@ -651,7 +651,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionLocations
-int[][] regionLocations
+int[][] regionLocations
 
 
 
@@ -660,7 +660,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 serverIndexToHostIndex
-int[] serverIndexToHostIndex
+int[] serverIndexToHostIndex
 
 
 
@@ -669,7 +669,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 serverIndexToRackIndex
-int[] serverIndexToRackIndex
+int[] serverIndexToRackIndex
 
 
 
@@ -678,7 +678,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionsPerServer
-int[][] regionsPerServer
+int[][] regionsPerServer
 
 
 
@@ -687,7 +687,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionsPerHost
-int[][] regionsPerHost
+int[][] regionsPerHost
 
 
 
@@ -696,7 +696,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionsPerRack
-int[][] regionsPerRack
+int[][] regionsPerRack
 
 
 
@@ -705,7 +705,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 primariesOfRegionsPerServer
-int[][] primariesOfRegionsPerServer
+int[][] primariesOfRegionsPerServer
 
 
 
@@ -714,7 +714,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 primariesOfRegionsPerHos

[44/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 8e662c4..c262a95 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":9,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":9,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":9,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":9,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":9,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":9,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":9,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":9,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":9,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":9,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -1230,31 +1230,37 @@ implements 
+void
+run()
+The HRegionServer sticks in this loop until closed.
+
+
+
 private void
 sanityCheckTableDescriptor(HTableDescriptor htd)
 Checks whether the table conforms to some sane limits, and 
configured
  values (compression, etc) work.
 
 
-
+
 void
 setCatalogJanitorEnabled(boolean b)
 Switch for the background CatalogJanitor thread.
 
 
-
+
 void
 setInitialized(boolean isInitialized) 
 
-
+
 void
 setServerCrashProcessingEnabled(boolean b) 
 
-
+
 void
 shutdown() 
 
-
+
 long
 splitRegion(HRegionInfo regionInfo,
byte[] splitRow,
@@ -1263,37 +1269,37 @@ implements Split a region.
 
 
-
+
 private void
 startActiveMasterManager(int infoPort) 
 
-
+
 private void
 startProcedureExecutor() 
 
-
+
 private void
 startServiceThreads() 
 
-
+
 private void
 stopChores() 
 
-
+
 void
 stopMaster() 
 
-
+
 private void
 stopProcedureExecutor() 
 
-
+
 protected void
 stopServiceThreads()
 Wait on all threads to finish.
 
 
-
+
 long
 truncateTable(TableName tableName,
  boolean preserveSplits,
@@ -1302,32 +1308,32 @@ implements Truncate a table
 
 
-
+
 void
 updateConfigurationForSpaceQuotaObserver(org.apache.hadoop.conf.Configuration conf)
 Adds the MasterSpaceQuotaObserver to the list 
of configured Master observer

[51/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/33bc9e06
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/33bc9e06
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/33bc9e06

Branch: refs/heads/asf-site
Commit: 33bc9e06b8c90e5caa481cc43aeb83622f71aa69
Parents: 6fa5abd
Author: jenkins 
Authored: Thu Aug 17 15:06:52 2017 +
Committer: jenkins 
Committed: Thu Aug 17 15:06:52 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 6 +-
 apidocs/index-all.html  | 2 +-
 .../org/apache/hadoop/hbase/client/Append.html  |20 +-
 .../hadoop/hbase/client/AsyncAdminBuilder.html  |38 +-
 .../hadoop/hbase/client/AsyncConnection.html| 8 +-
 .../org/apache/hadoop/hbase/client/Delete.html  |38 +-
 .../hbase/client/class-use/AsyncAdmin.html  |23 +-
 .../client/class-use/AsyncAdminBuilder.html |16 +-
 .../hadoop/hbase/client/package-summary.html| 2 +-
 .../hadoop/hbase/client/package-tree.html   | 2 +-
 apidocs/overview-tree.html  | 2 +-
 .../org/apache/hadoop/hbase/client/Append.html  |   119 +-
 .../hadoop/hbase/client/AsyncAdminBuilder.html  |16 +-
 .../hadoop/hbase/client/AsyncConnection.html| 4 +-
 .../org/apache/hadoop/hbase/client/Delete.html  |   369 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   |  6898 -
 checkstyle.rss  |12 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |39 +-
 devapidocs/index-all.html   |44 +-
 .../hadoop/hbase/backup/package-tree.html   | 2 +-
 .../hadoop/hbase/class-use/HRegionInfo.html |16 +-
 .../hadoop/hbase/class-use/ServerName.html  |14 +-
 .../class-use/InterfaceAudience.Private.html| 2 +-
 .../class-use/InterfaceAudience.Public.html | 2 +-
 .../hbase/classification/package-tree.html  | 6 +-
 .../org/apache/hadoop/hbase/client/Append.html  |20 +-
 .../hadoop/hbase/client/AsyncAdminBuilder.html  |38 +-
 .../hbase/client/AsyncAdminBuilderBase.html |46 +-
 .../hadoop/hbase/client/AsyncConnection.html| 8 +-
 .../hbase/client/AsyncConnectionImpl.html   | 8 +-
 .../org/apache/hadoop/hbase/client/Delete.html  |38 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.html | 4 +-
 .../hbase/client/class-use/AsyncAdmin.html  |29 +-
 .../client/class-use/AsyncAdminBuilder.html |32 +-
 .../client/class-use/AsyncAdminBuilderBase.html | 2 +-
 .../client/class-use/AsyncConnectionImpl.html   | 2 +-
 .../hbase/client/class-use/AsyncHBaseAdmin.html |50 +-
 .../client/class-use/RawAsyncHBaseAdmin.html|19 -
 .../hadoop/hbase/client/package-summary.html| 4 +-
 .../hadoop/hbase/client/package-tree.html   |32 +-
 .../apache/hadoop/hbase/client/package-use.html |   355 +-
 .../hbase/favored/FavoredNodeLoadBalancer.html  |13 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 4 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 4 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../org/apache/hadoop/hbase/master/HMaster.html |   381 +-
 .../master/HMasterCommandLine.LocalHMaster.html | 2 +-
 .../hadoop/hbase/master/LoadBalancer.html   |   124 +-
 .../hadoop/hbase/master/ServerManager.html  |85 +-
 ...signmentManager.RegionInTransitionChore.html | 6 +-
 ...ssignmentManager.RegionInTransitionStat.html |40 +-
 .../master/assignment/AssignmentManager.html|   170 +-
 .../BaseLoadBalancer.Cluster.Action.Type.html   |14 +-
 .../BaseLoadBalancer.Cluster.Action.html|10 +-
 ...LoadBalancer.Cluster.AssignRegionAction.html |12 +-
 .../BaseLoadBalancer.Cluster.LocalityType.html  |10 +-
 ...seLoadBalancer.Cluster.MoveRegionAction.html |14 +-
 ...eLoadBalancer.Cluster.SwapRegionsAction.html |16 +-
 .../balancer/BaseLoadBalancer.Cluster.html  |   148 +-
 .../BaseLoadBalancer.DefaultRackManager.html| 6 +-
 .../hbase/master/balancer/BaseLoadBalancer.html |   268 +-
 ...tochasticBalancer.FavoredNodeLoadPicker.html |12 +-
 ...asticBalancer.FavoredNodeLocalityPicker.html |10 +-
 .../balancer/FavoredStoch

[05/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html 
b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
index b5114b0..c078682 100644
--- a/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
+++ b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
@@ -1488,10 +1488,11 @@ extends 
 
 getRegionServerThreads
-public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getRegionServerThreads()
+public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getRegionServerThreads()
 
 Returns:
-List of region server threads.
+List of region server threads. Does not return the master even though it 
is also
+ a region server.
 
 
 
@@ -1501,7 +1502,7 @@ extends 
 
 getLiveRegionServerThreads
-public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getLiveRegionServerThreads()
+public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getLiveRegionServerThreads()
 
 Returns:
 List of live region server threads (skips the aborted and the killed)
@@ -1514,7 +1515,7 @@ extends 
 
 getRegionServer
-public HRegionServer getRegionServer(int serverNumber)
+public HRegionServer getRegionServer(int serverNumber)
 Grab a numbered region server of your choice.
 
 Parameters:
@@ -1530,7 +1531,7 @@ extends 
 
 getRegionServer
-public HRegionServer getRegionServer(ServerName serverName)
+public HRegionServer getRegionServer(ServerName serverName)
 
 
 
@@ -1539,7 +1540,7 @@ extends 
 
 getRegions
-public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getRegions(byte[] tableName)
+public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getRegions(byte[] tableName)
 
 
 
@@ -1548,7 +1549,7 @@ extends 
 
 getRegions
-public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getRegions(TableName tableName)
+public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getRegions(TableName tableName)
 
 
 
@@ -1557,7 +1558,7 @@ extends 
 
 getServerWithMeta
-public int getServerWithMeta()
+public int getServerWithMeta()
 
 Returns:
 Index into List of getRegionServerThreads()
@@ -1571,7 +1572,7 @@ extends 
 
 getServerWith
-public int getServerWith(byte[] regionName)
+public int getServerWith(byte[] regionName)
 Get the location of the specified region
 
 Parameters:
@@ -1588,7 +1589,7 @@ extends 
 
 getServerHoldingRegion
-public ServerName getServerHoldingRegion(TableName tn,
+public ServerName getServerHoldingRegion(TableName tn,
  byte[] regionName)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
class: org.apache.hadoop.hbase.HBaseCluster
@@ -1612,7 +1613,7 @@ extends 
 
 countServedRegions
-public long countServedRegions()
+public long countServedRegions()
 Counts the total numbers of regions being served by the 
currently online
  region servers by asking each how many regions they have.  Does not look
  at hbase:meta at all.  Count includes catalog tables.
@@ -1628,7 +1629,7 @@ extends 
 
 killAll
-public void killAll()
+public void killAll()
 Do a simulated kill all masters and regionservers. Useful 
when it is
  impossible to bring the mini-cluster back for clean shutdown.
 
@@ -1639,7 +1640,7 @@ extends 
 
 waitUntilShutDown
-public void waitUntilShutDown()
+public void waitUntilShutDown()
 Description copied from 
class: org.apache.hadoop.hbase.HBaseCluster
 Wait for HBase Cluster to shut down.
 
@@ -1654,7 +1655,7 @@ extends 
 
 findRegionsForTable
-public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List findRegionsForTable(TableName tableName)
+public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List findRegionsForTable(TableName tableName)
 
 
 
@@ -1663,7 +1664,7 @@ extends 
 
 getRegionServerIndex
-protected int getRegionServerIndex(ServerName serverName)
+protected 

[49/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 043eb24..36ac913 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -289,7 +289,7 @@
 2026
 0
 0
-12838
+12854
 
 Files
 
@@ -777,7 +777,7 @@
 org/apache/hadoop/hbase/client/Append.java
 0
 0
-4
+5
 
 org/apache/hadoop/hbase/client/AsyncAdmin.java
 0
@@ -992,7 +992,7 @@
 org/apache/hadoop/hbase/client/Delete.java
 0
 0
-7
+8
 
 org/apache/hadoop/hbase/client/Get.java
 0
@@ -3082,7 +3082,7 @@
 org/apache/hadoop/hbase/master/LoadBalancer.java
 0
 0
-16
+23
 
 org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
 0
@@ -3282,7 +3282,7 @@
 org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 0
 0
-61
+67
 
 org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java
 0
@@ -4257,7 +4257,7 @@
 org/apache/hadoop/hbase/regionserver/HRegionServer.java
 0
 0
-130
+131
 
 org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
 0
@@ -7077,7 +7077,7 @@
 
 imports
 http://checkstyle.sourceforge.net/config_imports.html#AvoidStarImport";>AvoidStarImport
-13
+14
  Error
 
 
@@ -7086,7 +7086,7 @@
 ordered: "true"
 sortStaticImportsAlphabetically: "true"
 option: "top"
-1035
+1036
  Error
 
 
@@ -7098,7 +7098,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports";>UnusedImports
 
 processJavadoc: "true"
-114
+128
  Error
 
 indentation
@@ -7121,7 +7121,7 @@
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription";>NonEmptyAtclauseDescription
-3232
+3231
  Error
 
 misc
@@ -7139,7 +7139,7 @@
 
 max: "100"
 ignorePattern: "^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated"
-958
+959
  Error
 
 
@@ -14653,23 +14653,29 @@
 Line
 
  Error
+imports
+UnusedImports
+Unused import - java.util.ArrayList.
+20
+
+ Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 78
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 94
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 95
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
@@ -14678,127 +14684,127 @@
 
 org/apache/hadoop/hbase/client/AsyncAdmin.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
  Error
 imports
 ImportOrder
 Wrong order for 'java.util.Collection' import.
 21
-
+
  Error
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.NamespaceDescriptor' import.
 35
-
+
  Error
 imports
 ImportOrder
 Wrong order for 
'org.apache.hadoop.hbase.client.RawAsyncTable.CoprocessorCallable' import.
 41
-
+
  Error
 imports
 ImportOrder
 Wrong order for 'com.google.protobuf.RpcChannel' import.
 48
-
+
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
 172
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 182
-
+
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
 188
-
+
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 106).
 191
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 418
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 432
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 818
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 867
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 886
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 915
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 922
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 923
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 980
-
+
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
 1007
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 1035
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
@@ -14807,61 +14813,61 @@
 
 org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 38
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 39
-
+
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 46
-
+
  Er

[22/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.AssignRegionAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.AssignRegionAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.AssignRegionAction.html
index d9c59a0..13f64df 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.AssignRegionAction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.AssignRegionAction.html
@@ -69,1532 +69,1492 @@
 061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
 062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-064
-065/**
-066 * The base class for load balancers. It 
provides the the functions used to by
-067 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-068 * in the edge cases. It doesn't provide 
an implementation of the
-069 * actual balancing algorithm.
-070 *
-071 */
-072public abstract class BaseLoadBalancer 
implements LoadBalancer {
-073  protected static final int 
MIN_SERVER_BALANCE = 2;
-074  private volatile boolean stopped = 
false;
-075
-076  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
-077
-078  static final 
Predicate IDLE_SERVER_PREDICATOR
-079= load -> 
load.getNumberOfRegions() == 0;
-080
-081  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-082
-083  private static class DefaultRackManager 
extends RackManager {
-084@Override
-085public String getRack(ServerName 
server) {
-086  return UNKNOWN_RACK;
-087}
-088  }
-089
-090  /**
-091   * The constructor that uses the basic 
MetricsBalancer
-092   */
-093  protected BaseLoadBalancer() {
-094metricsBalancer = new 
MetricsBalancer();
-095  }
-096
-097  /**
-098   * This Constructor accepts an instance 
of MetricsBalancer,
-099   * which will be used instead of 
creating a new one
-100   */
-101  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-102this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-103  }
-104
-105  /**
-106   * An efficient array based 
implementation similar to ClusterState for keeping
-107   * the status of the cluster in terms 
of region assignment and distribution.
-108   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-109   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-110   * class uses mostly indexes and 
arrays.
-111   *
-112   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-113   * topology in terms of server names, 
hostnames and racks.
-114   */
-115  protected static class Cluster {
-116ServerName[] servers;
-117String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-118String[] racks;
-119boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-120
-121ArrayList tables;
-122HRegionInfo[] regions;
-123Deque[] 
regionLoads;
-124private RegionLocationFinder 
regionFinder;
-125
-126int[][] regionLocations; 
//regionIndex -> list of serverIndex sorted by locality
-127
-128int[]   serverIndexToHostIndex;  
//serverIndex -> host index
-129int[]   serverIndexToRackIndex;  
//serverIndex -> rack index
-130
-131int[][] regionsPerServer;
//serverIndex -> region list
-132int[][] regionsPerHost;  
//hostIndex -> list of regions
-133int[][] regionsPerRack;  
//rackIndex -> region list
-134int[][] primariesOfRegionsPerServer; 
//serverIndex -> sorted list of regions by primary region index
-135int[][] primariesOfRegionsPerHost;   
//hostIndex -> sorted list of regions by primary region index
-136int[][] primariesOfRegionsPerRack;   
//rackIndex -> sorted list of regions by primary region index
-137
-138int[][] serversPerHost;  
//hostIndex -> list of server indexes
-139int[][] serversPerRack;  
//rackIndex -> list of server indexes
-140int[]   regionIndexToServerIndex;
//regionIndex -> serverIndex
-141int[]   
initialRegionIndexToServerIndex;//regionIndex -> serverIndex (initial 
cluster state)
-142int[]   regionIndexToTableIndex; 
//regionIndex -> tableIndex
-143int[][] numRegionsPerServerPerTable; 
//serverIndex -> tableIndex -> # regions
-144int[]   numMaxRegionsPerTable;   
//tableIndex -> max number of regions in a single RS
-145i

[40/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
index 4c1689f..9f3bbd2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static enum BaseLoadBalancer.Cluster.Action.Type
+public static enum BaseLoadBalancer.Cluster.Action.Type
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum
 
 
@@ -216,7 +216,7 @@ the order they are declared.
 
 
 ASSIGN_REGION
-public static final BaseLoadBalancer.Cluster.Action.Type
 ASSIGN_REGION
+public static final BaseLoadBalancer.Cluster.Action.Type
 ASSIGN_REGION
 
 
 
@@ -225,7 +225,7 @@ the order they are declared.
 
 
 MOVE_REGION
-public static final BaseLoadBalancer.Cluster.Action.Type
 MOVE_REGION
+public static final BaseLoadBalancer.Cluster.Action.Type
 MOVE_REGION
 
 
 
@@ -234,7 +234,7 @@ the order they are declared.
 
 
 SWAP_REGIONS
-public static final BaseLoadBalancer.Cluster.Action.Type
 SWAP_REGIONS
+public static final BaseLoadBalancer.Cluster.Action.Type
 SWAP_REGIONS
 
 
 
@@ -243,7 +243,7 @@ the order they are declared.
 
 
 NULL
-public static final BaseLoadBalancer.Cluster.Action.Type
 NULL
+public static final BaseLoadBalancer.Cluster.Action.Type
 NULL
 
 
 
@@ -260,7 +260,7 @@ the order they are declared.
 
 
 values
-public static BaseLoadBalancer.Cluster.Action.Type[] values()
+public static BaseLoadBalancer.Cluster.Action.Type[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -280,7 +280,7 @@ for (BaseLoadBalancer.Cluster.Action.Type c : 
BaseLoadBalancer.Cluster.Action.Ty
 
 
 valueOf
-public static BaseLoadBalancer.Cluster.Action.Type valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static BaseLoadBalancer.Cluster.Action.Type valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
index cb624c3..531908d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class BaseLoadBalancer.Cluster.Action
+public static class BaseLoadBalancer.Cluster.Action
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 An action to move or swap a region
 
@@ -231,7 +231,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 type
-public BaseLoadBalancer.Cluster.Action.Type
 type
+public BaseLoadBalancer.Cluster.Action.Type
 type
 
 
 
@@ -248,7 +248,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Action
-public Action(BaseLoadBalancer.Cluster.Action.Type type)
+public Action(BaseLoadBalancer.Cluster.Action.Type type)
 
 
 
@@ -265,7 +265,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 undoAction
-public BaseLoadBalancer.Cluster.Action undoAction()
+public BaseLoadBalancer.Cluster.Action undoAction()
 Returns an Action which would undo this action
 
 
@@ -275,7 +275,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 toString
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String

[23/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
index d9c59a0..13f64df 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
@@ -69,1532 +69,1492 @@
 061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
 062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-064
-065/**
-066 * The base class for load balancers. It 
provides the the functions used to by
-067 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-068 * in the edge cases. It doesn't provide 
an implementation of the
-069 * actual balancing algorithm.
-070 *
-071 */
-072public abstract class BaseLoadBalancer 
implements LoadBalancer {
-073  protected static final int 
MIN_SERVER_BALANCE = 2;
-074  private volatile boolean stopped = 
false;
-075
-076  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
-077
-078  static final 
Predicate IDLE_SERVER_PREDICATOR
-079= load -> 
load.getNumberOfRegions() == 0;
-080
-081  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-082
-083  private static class DefaultRackManager 
extends RackManager {
-084@Override
-085public String getRack(ServerName 
server) {
-086  return UNKNOWN_RACK;
-087}
-088  }
-089
-090  /**
-091   * The constructor that uses the basic 
MetricsBalancer
-092   */
-093  protected BaseLoadBalancer() {
-094metricsBalancer = new 
MetricsBalancer();
-095  }
-096
-097  /**
-098   * This Constructor accepts an instance 
of MetricsBalancer,
-099   * which will be used instead of 
creating a new one
-100   */
-101  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-102this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-103  }
-104
-105  /**
-106   * An efficient array based 
implementation similar to ClusterState for keeping
-107   * the status of the cluster in terms 
of region assignment and distribution.
-108   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-109   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-110   * class uses mostly indexes and 
arrays.
-111   *
-112   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-113   * topology in terms of server names, 
hostnames and racks.
-114   */
-115  protected static class Cluster {
-116ServerName[] servers;
-117String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-118String[] racks;
-119boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-120
-121ArrayList tables;
-122HRegionInfo[] regions;
-123Deque[] 
regionLoads;
-124private RegionLocationFinder 
regionFinder;
-125
-126int[][] regionLocations; 
//regionIndex -> list of serverIndex sorted by locality
-127
-128int[]   serverIndexToHostIndex;  
//serverIndex -> host index
-129int[]   serverIndexToRackIndex;  
//serverIndex -> rack index
-130
-131int[][] regionsPerServer;
//serverIndex -> region list
-132int[][] regionsPerHost;  
//hostIndex -> list of regions
-133int[][] regionsPerRack;  
//rackIndex -> region list
-134int[][] primariesOfRegionsPerServer; 
//serverIndex -> sorted list of regions by primary region index
-135int[][] primariesOfRegionsPerHost;   
//hostIndex -> sorted list of regions by primary region index
-136int[][] primariesOfRegionsPerRack;   
//rackIndex -> sorted list of regions by primary region index
-137
-138int[][] serversPerHost;  
//hostIndex -> list of server indexes
-139int[][] serversPerRack;  
//rackIndex -> list of server indexes
-140int[]   regionIndexToServerIndex;
//regionIndex -> serverIndex
-141int[]   
initialRegionIndexToServerIndex;//regionIndex -> serverIndex (initial 
cluster state)
-142int[]   regionIndexToTableIndex; 
//regionIndex -> tableIndex
-143int[][] numRegionsPerServerPerTable; 
//serverIndex -> tableIndex -> # regions
-144int[]   numMaxRegionsPerTable;   
//tableIndex -> max number of regions in a single RS
-145int[]   regionIndexToPrimaryIndex;   
//regionIndex -> region

[04/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 9f71980..42866a5 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -2809,6 +2809,8 @@
  
 C
 - Static variable in class org.apache.hadoop.hbase.coprocessor.TestRowProcessorEndpoint
  
+c
 - Variable in class org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions
+ 
 C 
- Static variable in class org.apache.hadoop.hbase.regionserver.TestWideScanner
  
 c
 - Static variable in class org.apache.hadoop.hbase.replication.TestReplicationWALEntryFilters
@@ -3389,6 +3391,8 @@
  
 checkAppend()
 - Method in class org.apache.hadoop.hbase.coprocessor.TestHTableWrapper
  
+checkBalance(int,
 int) - Method in class org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions
+ 
 checkBatch()
 - Method in class org.apache.hadoop.hbase.coprocessor.TestHTableWrapper
  
 checkBindAddress(String,
 int, boolean) - Method in class org.apache.hadoop.hbase.http.TestHttpServer
@@ -3417,6 +3421,8 @@
  
 checkCoprocessorService()
 - Method in class org.apache.hadoop.hbase.coprocessor.TestHTableWrapper
  
+checkCount(int,
 int) - Method in class org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions
+ 
 checkCounterExists(String,
 BaseSource) - Method in interface org.apache.hadoop.hbase.test.MetricsAssertHelper
 
 Check if a dynamic counter exists.
@@ -19575,6 +19581,8 @@
  
 LOG
 - Static variable in class org.apache.hadoop.hbase.master.balancer.TestFavoredStochasticLoadBalancer
  
+LOG
 - Static variable in class org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions
+ 
 LOG
 - Static variable in class org.apache.hadoop.hbase.master.balancer.TestRSGroupBasedLoadBalancer
  
 LOG
 - Static variable in class org.apache.hadoop.hbase.master.balancer.TestStochasticLoadBalancer
@@ -20896,6 +20904,8 @@
  
 MASTERS
 - Static variable in class org.apache.hadoop.hbase.client.TestClientClusterStatus
  
+MASTERS
 - Static variable in class org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions
+ 
 masterServices
 - Static variable in class org.apache.hadoop.hbase.master.locking.TestLockManager
  
 masterServices
 - Static variable in class org.apache.hadoop.hbase.master.normalizer.TestSimpleRegionNormalizer
@@ -22347,6 +22357,8 @@
  
 name
 - Variable in class org.apache.hadoop.hbase.master.balancer.TestDefaultLoadBalancer
  
+name
 - Variable in class org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions
+ 
 name
 - Variable in class org.apache.hadoop.hbase.master.cleaner.TestHFileLinkCleaner
  
 name
 - Variable in class org.apache.hadoop.hbase.master.normalizer.TestSimpleRegionNormalizer
@@ -23414,6 +23426,8 @@
  
 NUM_RS
 - Static variable in class org.apache.hadoop.hbase.master.TestDistributedLogSplitting
  
+NUM_RS
 - Static variable in class org.apache.hadoop.hbase.master.TestMasterMetricsWrapper
+ 
 NUM_RS
 - Static variable in class org.apache.hadoop.hbase.procedure.TestProcedureManager
  
 NUM_RS
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestCompactSplitThread
@@ -28148,6 +28162,8 @@
  
 REGIONS
 - Static variable in class org.apache.hadoop.hbase.master.balancer.TestFavoredStochasticBalancerPickers
  
+REGIONS
 - Static variable in class org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions
+ 
 regions
 - Variable in class org.apache.hadoop.hbase.MockRegionServerServices
  
 REGIONS 
- Static variable in class org.apache.hadoop.hbase.wal.TestWALSplit
@@ -31971,6 +31987,8 @@
  
 setup()
 - Method in class org.apache.hadoop.hbase.master.assignment.TestSplitTableRegionProcedure
  
+setup()
 - Method in class org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions
+ 
 setup()
 - Method in class org.apache.hadoop.hbase.master.cleaner.TestReplicationHFileCleaner
  
 setup()
 - Method in class org.apache.hadoop.hbase.master.cleaner.TestSnapshotFromMaster
@@ -33798,6 +33816,8 @@
  
 SLAVES
 - Static variable in class org.apache.hadoop.hbase.master.balancer.TestFavoredStochasticLoadBalancer
  
+SLAVES
 - Static variable in class org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions
+ 
 SLAVES
 - Static variable in class org.apache.hadoop.hbase.master.TestRegionPlacement
  
 SLAVES
 - Static variable in class org.apache.hadoop.hbase.master.TestRegionPlacement2
@@ -35508,12 +35528,16 @@
  
 syncUp(HBaseTestingUtility)
 - Method in class org.apache.hadoop.hbase.replication.TestReplicationSyncUpTool
  
+SYSTEM_REGIONS
 - Static variable in class org.apache.hadoop.hbase.master.balancer.TestRegionsOnMasterOptions
+ 
 SystemCoprocessor()
 - Constructor for class org.apache.hadoop.hbase.coprocessor.TestCoprocessorConfiguration.SystemCoprocessor
  
 systemCoprocessorLoaded
 - Static variable in class org.apache.hadoop.hbase.coprocessor.TestCoprocessorC

[19/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
index d9c59a0..13f64df 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
@@ -69,1532 +69,1492 @@
 061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
 062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-064
-065/**
-066 * The base class for load balancers. It 
provides the the functions used to by
-067 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-068 * in the edge cases. It doesn't provide 
an implementation of the
-069 * actual balancing algorithm.
-070 *
-071 */
-072public abstract class BaseLoadBalancer 
implements LoadBalancer {
-073  protected static final int 
MIN_SERVER_BALANCE = 2;
-074  private volatile boolean stopped = 
false;
-075
-076  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
-077
-078  static final 
Predicate IDLE_SERVER_PREDICATOR
-079= load -> 
load.getNumberOfRegions() == 0;
-080
-081  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-082
-083  private static class DefaultRackManager 
extends RackManager {
-084@Override
-085public String getRack(ServerName 
server) {
-086  return UNKNOWN_RACK;
-087}
-088  }
-089
-090  /**
-091   * The constructor that uses the basic 
MetricsBalancer
-092   */
-093  protected BaseLoadBalancer() {
-094metricsBalancer = new 
MetricsBalancer();
-095  }
-096
-097  /**
-098   * This Constructor accepts an instance 
of MetricsBalancer,
-099   * which will be used instead of 
creating a new one
-100   */
-101  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-102this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-103  }
-104
-105  /**
-106   * An efficient array based 
implementation similar to ClusterState for keeping
-107   * the status of the cluster in terms 
of region assignment and distribution.
-108   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-109   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-110   * class uses mostly indexes and 
arrays.
-111   *
-112   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-113   * topology in terms of server names, 
hostnames and racks.
-114   */
-115  protected static class Cluster {
-116ServerName[] servers;
-117String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-118String[] racks;
-119boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-120
-121ArrayList tables;
-122HRegionInfo[] regions;
-123Deque[] 
regionLoads;
-124private RegionLocationFinder 
regionFinder;
-125
-126int[][] regionLocations; 
//regionIndex -> list of serverIndex sorted by locality
-127
-128int[]   serverIndexToHostIndex;  
//serverIndex -> host index
-129int[]   serverIndexToRackIndex;  
//serverIndex -> rack index
-130
-131int[][] regionsPerServer;
//serverIndex -> region list
-132int[][] regionsPerHost;  
//hostIndex -> list of regions
-133int[][] regionsPerRack;  
//rackIndex -> region list
-134int[][] primariesOfRegionsPerServer; 
//serverIndex -> sorted list of regions by primary region index
-135int[][] primariesOfRegionsPerHost;   
//hostIndex -> sorted list of regions by primary region index
-136int[][] primariesOfRegionsPerRack;   
//rackIndex -> sorted list of regions by primary region index
-137
-138int[][] serversPerHost;  
//hostIndex -> list of server indexes
-139int[][] serversPerRack;  
//rackIndex -> list of server indexes
-140int[]   regionIndexToServerIndex;
//regionIndex -> serverIndex
-141int[]   
initialRegionIndexToServerIndex;//regionIndex -> serverIndex (initial 
cluster state)
-142int[]   regionIndexToTableIndex; 
//regionIndex -> tableIndex
-143int[][] numRegionsPerServerPerTable; 
//serverIndex -> tableIndex -> # regions
-144int[]   numMaxRegionsPerTable;   
//tableIndex -> max number of regions in a single RS
-145int[] 

[50/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html 
b/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
index 5127945..c02814a 100644
--- a/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
@@ -97,21 +97,6 @@
 
 
 Uses of AsyncAdmin in org.apache.hadoop.hbase.client
-
-Classes in org.apache.hadoop.hbase.client
 with type parameters of type AsyncAdmin 
-
-Modifier and Type
-Interface and Description
-
-
-
-interface 
-AsyncAdminBuilder
-For creating AsyncAdmin.
-
-
-
-
 
 Methods in org.apache.hadoop.hbase.client
 that return AsyncAdmin 
 
@@ -120,12 +105,18 @@
 
 
 
+AsyncAdmin
+AsyncAdminBuilder.build()
+Create a AsyncAdmin 
instance.
+
+
+
 default AsyncAdmin
 AsyncConnection.getAdmin()
 Retrieve an AsyncAdmin 
implementation to administer an HBase cluster.
 
 
-
+
 default AsyncAdmin
 AsyncConnection.getAdmin(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService pool)
 Retrieve an AsyncAdmin 
implementation to administer an HBase cluster.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilder.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilder.html 
b/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilder.html
index fa9d08d..b5d3271 100644
--- a/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilder.html
+++ b/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilder.html
@@ -105,52 +105,52 @@
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncConnection.getAdminBuilder()
 Returns an AsyncAdminBuilder for creating 
AsyncAdmin.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncConnection.getAdminBuilder(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService pool)
 Returns an AsyncAdminBuilder for creating 
AsyncAdmin.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilder.setMaxAttempts(int maxAttempts)
 Set the max attempt times for an admin operation.
 
 
 
-default AsyncAdminBuilder
+default AsyncAdminBuilder
 AsyncAdminBuilder.setMaxRetries(int maxRetries)
 Set the max retry times for an admin operation.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilder.setOperationTimeout(long timeout,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set timeout for a whole admin operation.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilder.setRetryPause(long timeout,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set the base pause time for retrying.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilder.setRpcTimeout(long timeout,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set timeout for each rpc request.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilder.setStartLogErrorsCnt(int startLogErrorsCnt)
 Set the number of retries that are allowed before we start 
to log.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/apidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/apidocs/org/apache/hadoop/hbase/client/package-summary.html
index 29789c3..26423e6 100644
--- a/apidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/apidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -99,7 +99,7 @@
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 
 For creating AsyncAdmin.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/apidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/apidocs/org/apache/hadoop/hbase/client/package-tree.html
index 00b39b4..17e7550 100644
--- a/apidocs/org/apache/hadoop/hbase/client/package-tree.ht

[45/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/client/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-use.html
index ad92e41..606d211 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-use.html
@@ -694,313 +694,308 @@ service.
 
 
 
-AsyncHBaseAdmin
-The implementation of AsyncAdmin.
-
-
-
 AsyncMasterRequestRpcRetryingCaller
 Retry caller for a request call to master.
 
 
-
+
 AsyncMasterRequestRpcRetryingCaller.Callable 
 
-
+
 AsyncMetaRegionLocator
 The asynchronous locator for meta region.
 
 
-
+
 AsyncNonMetaRegionLocator
 The asynchronous locator for regions other than meta.
 
 
-
+
 AsyncNonMetaRegionLocator.LocateRequest 
 
-
+
 AsyncNonMetaRegionLocator.TableCache 
 
-
+
 AsyncProcess
 This class  allows a continuous flow of requests.
 
 
-
+
 AsyncProcessTask
 Contains the attributes of a task which will be executed
  by AsyncProcess.
 
 
-
+
 AsyncProcessTask.Builder 
 
-
+
 AsyncProcessTask.SubmittedRows
 The number of processed rows.
 
 
-
+
 AsyncRegionLocator
 The asynchronous region locator.
 
 
-
+
 AsyncRegistry
 Implementations hold cluster information such as this 
cluster's id, location of hbase:meta, etc..
 
 
-
+
 AsyncRequestFuture
 The context used to wait for results from one submit 
call.
 
 
-
+
 AsyncRequestFutureImpl
 The context, and return value, for a single 
submit/submitAll call.
 
 
-
+
 AsyncRequestFutureImpl.ReplicaResultState
 Sync point for calls to multiple replicas for the same user 
request (Get).
 
 
-
+
 AsyncRequestFutureImpl.Retry
 For AsyncRequestFutureImpl.manageError(int,
 Row, Retry, Throwable, ServerName).
 
 
-
+
 AsyncRequestFutureImpl.SingleServerRequestRunnable
 Runnable (that can be submitted to thread pool) that 
submits MultiAction to a
  single server.
 
 
-
+
 AsyncRpcRetryingCaller 
 
-
+
 AsyncRpcRetryingCallerFactory
 Factory to create an AsyncRpcRetryCaller.
 
 
-
+
 AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder 
 
-
+
 AsyncRpcRetryingCallerFactory.BatchCallerBuilder 
 
-
+
 AsyncRpcRetryingCallerFactory.BuilderBase 
 
-
+
 AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder 
 
-
+
 AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder 
 
-
+
 AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder 
 
-
+
 AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder 
 
-
+
 AsyncScanSingleRegionRpcRetryingCaller
 Retry caller for scanning a region.
 
 
-
+
 AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState 
 
-
+
 AsyncScanSingleRegionRpcRetryingCaller.ScanResumerImpl 
 
-
+
 AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState 
 
-
+
 AsyncServerRequestRpcRetryingCaller
 Retry caller for a request call to region server.
 
 
-
+
 AsyncServerRequestRpcRetryingCaller.Callable 
 
-
+
 AsyncSingleRequestRpcRetryingCaller
 Retry caller for a single request, such as get, put, 
delete, etc.
 
 
-
+
 AsyncSingleRequestRpcRetryingCaller.Callable 
 
-
+
 AsyncTable
 The asynchronous table for normal users.
 
 
-
+
 AsyncTableBase
 The base interface for asynchronous version of Table.
 
 
-
+
 AsyncTableBuilder
 For creating AsyncTable 
or RawAsyncTable.
 
 
-
+
 AsyncTableBuilderBase
 Base class for all asynchronous table builders.
 
 
-
+
 AsyncTableRegionLocator
 The asynchronous version of RegionLocator.
 
 
-
+
 Attributes 
 
-
+
 BatchErrors 
 
-
+
 BufferedMutator
 Used to communicate with a single HBase table similar to Table but 
meant for
  batched, asynchronous puts.
 
 
-
+
 BufferedMutator.ExceptionListener
 Listens for asynchronous exceptions on a BufferedMutator.
 
 
-
+
 BufferedMutatorImpl.QueueRowAccess 
 
-
+
 BufferedMutatorParams
 Parameters for instantiating a BufferedMutator.
 
 
-
+
 Cancellable
 This should be implemented by the Get/Scan implementations 
that
  talk to replica regions.
 
 
-
+
 CancellableRegionServerCallable
 This class is used to unify HTable calls with AsyncProcess 
Framework.
 
 
-
+
 ClientScanner
 Implements the scanner interface for the HBase client.
 
 
-
+
 ClientServiceCallable
 A RegionServerCallable set to use the Client protocol.
 
 
-
+
 ClientSideRegionScanner
 A client scanner for a region opened for read-only on the 
client side.
 
 
-
+
 ClientSimpleScanner
 ClientSimpleScanner implements a sync scanner 
behaviour.
 
 
-
+
 ClusterConnection
 Internal methods on Connection that should not be used by 
user code.
 
 
-
+
 ClusterStatusListener
 A class that receives the cluster status, and provide it as 
a set of service to the client.
 
 
-
+
 ClusterStatusListener.DeadServerHandler
 Class to be extended to manage a new dead server.
 
 
-
+
 ClusterStatusListener.Listener
 The interface to be implemented by a listener of a cluster 
status event.
 
 
-
+
 ColumnFamilyDescript

[37/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLoadPicker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLoadPicker.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLoadPicker.html
index 86ffc03..5b376f5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLoadPicker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLoadPicker.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class FavoredStochasticBalancer.FavoredNodeLoadPicker
+class FavoredStochasticBalancer.FavoredNodeLoadPicker
 extends StochasticLoadBalancer.CandidateGenerator
 
 
@@ -209,7 +209,7 @@ extends 
 
 FavoredNodeLoadPicker
-FavoredNodeLoadPicker()
+FavoredNodeLoadPicker()
 
 
 
@@ -226,7 +226,7 @@ extends 
 
 generate
-BaseLoadBalancer.Cluster.Action generate(BaseLoadBalancer.Cluster cluster)
+BaseLoadBalancer.Cluster.Action generate(BaseLoadBalancer.Cluster cluster)
 
 Specified by:
 generate in
 class StochasticLoadBalancer.CandidateGenerator
@@ -239,7 +239,7 @@ extends 
 
 pickLeastLoadedServer
-private int pickLeastLoadedServer(BaseLoadBalancer.Cluster cluster,
+private int pickLeastLoadedServer(BaseLoadBalancer.Cluster cluster,
   int thisServer)
 
 
@@ -249,7 +249,7 @@ extends 
 
 pickLeastLoadedFNServer
-private int pickLeastLoadedFNServer(BaseLoadBalancer.Cluster cluster,
+private int pickLeastLoadedFNServer(BaseLoadBalancer.Cluster cluster,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List favoredNodes,
 int currentServerIndex)
 
@@ -260,7 +260,7 @@ extends 
 
 pickMostLoadedServer
-private int pickMostLoadedServer(BaseLoadBalancer.Cluster cluster)
+private int pickMostLoadedServer(BaseLoadBalancer.Cluster cluster)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
index c86970f..bde3f16 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.FavoredNodeLocalityPicker.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class FavoredStochasticBalancer.FavoredNodeLocalityPicker
+private class FavoredStochasticBalancer.FavoredNodeLocalityPicker
 extends StochasticLoadBalancer.CandidateGenerator
 
 
@@ -207,7 +207,7 @@ extends 
 
 FavoredNodeLocalityPicker
-private FavoredNodeLocalityPicker()
+private FavoredNodeLocalityPicker()
 
 
 
@@ -224,7 +224,7 @@ extends 
 
 generate
-protected BaseLoadBalancer.Cluster.Action generate(BaseLoadBalancer.Cluster cluster)
+protected BaseLoadBalancer.Cluster.Action generate(BaseLoadBalancer.Cluster cluster)
 
 Specified by:
 generate in
 class StochasticLoadBalancer.CandidateGenerator
@@ -237,7 +237,7 @@ extends 
 
 getDifferentFavoredNode
-private int getDifferentFavoredNode(BaseLoadBalancer.Cluster cluster,
+private int getDifferentFavoredNode(BaseLoadBalancer.Cluster cluster,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List favoredNodes,
 int currentServer)
 
@@ -248,7 +248,7 @@ extends 
 
 pickLowestLocalRegionOnServer
-private int pickLowestLocalRegionOnServer(BaseLoadBalancer.Cluster cluster,
+private int pickLowestLocalRegionOnServer(BaseLoadBalancer.Cluster cluster,
   int server)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.html
index d9128aa..a690c8f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.html
@@ -123,7 +123,7 @@ var acti

[48/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 7294419..fdaa852 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 ©2007 - 2017 The Apache Software Foundation
 
   File: 2026,
- Errors: 12838,
+ Errors: 12854,
  Warnings: 0,
  Infos: 0
   
@@ -12501,7 +12501,7 @@ under the License.
   0
 
 
-  4
+  5
 
   
   
@@ -16351,7 +16351,7 @@ under the License.
   0
 
 
-  130
+  131
 
   
   
@@ -16827,7 +16827,7 @@ under the License.
   0
 
 
-  16
+  23
 
   
   
@@ -22273,7 +22273,7 @@ under the License.
   0
 
 
-  7
+  8
 
   
   
@@ -27075,7 +27075,7 @@ under the License.
   0
 
 
-  61
+  67
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/coc.html
--
diff --git a/coc.html b/coc.html
index 2bcb02f..674ea10 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org";>the priv
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index d02e55e..a7f1766 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index f8bd1f2..66b4366 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependencies
 
@@ -527,7 +527,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index cf16315..901465f 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Reactor Dependency Convergence
 
@@ -741,7 +741,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Published: 
2017-08-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 289a137..04f7453 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Dependency Information
 
@@ -318,7 +318,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-16
+  Last Publi

[29/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/LoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/LoadBalancer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/LoadBalancer.html
index a72acb5..a3214af 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/LoadBalancer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/LoadBalancer.html
@@ -26,136 +26,163 @@
 018 */
 019package org.apache.hadoop.hbase.master;
 020
-021import java.util.List;
-022import java.util.Map;
-023
-024import 
org.apache.hadoop.conf.Configurable;
-025import 
org.apache.hadoop.conf.Configuration;
-026import 
org.apache.hadoop.hbase.ClusterStatus;
-027import 
org.apache.hadoop.hbase.HBaseIOException;
-028import 
org.apache.hadoop.hbase.HRegionInfo;
-029import 
org.apache.hadoop.hbase.ServerName;
-030import 
org.apache.hadoop.hbase.Stoppable;
-031import 
org.apache.hadoop.hbase.TableName;
-032import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-033import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-034
-035import 
edu.umd.cs.findbugs.annotations.Nullable;
-036
-037/**
-038 * Makes decisions about the placement 
and movement of Regions across
-039 * RegionServers.
-040 *
-041 * 

Cluster-wide load balancing will occur only when there are no regions in -042 * transition and according to a fixed period of a time using {@link #balanceCluster(Map)}. -043 * -044 *

On cluster startup, bulk assignment can be used to determine -045 * locations for all Regions in a cluster. -046 * -047 *

This class produces plans for the -048 * {@link org.apache.hadoop.hbase.master.assignment.AssignmentManager} -049 * to execute. -050 */ -051@InterfaceAudience.Private -052public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObserver { -053 -054 // Used to signal to the caller that the region(s) cannot be assigned -055 // We deliberately use 'localhost' so the operation will fail fast -056 ServerName BOGUS_SERVER_NAME = ServerName.valueOf("localhost,1,1"); -057 -058 /** -059 * Set the current cluster status. This allows a LoadBalancer to map host name to a server -060 * @param st +021import java.util.*; +022 +023import com.google.common.collect.ImmutableSet; +024import com.google.common.collect.UnmodifiableIterator; +025import org.apache.hadoop.conf.Configurable; +026import org.apache.hadoop.conf.Configuration; +027import org.apache.hadoop.hbase.ClusterStatus; +028import org.apache.hadoop.hbase.HBaseIOException; +029import org.apache.hadoop.hbase.HRegionInfo; +030import org.apache.hadoop.hbase.ServerName; +031import org.apache.hadoop.hbase.Stoppable; +032import org.apache.hadoop.hbase.TableName; +033import org.apache.hadoop.hbase.classification.InterfaceAudience; +034import org.apache.hadoop.hbase.conf.ConfigurationObserver; +035 +036import edu.umd.cs.findbugs.annotations.Nullable; +037import org.apache.hadoop.hbase.security.access.AccessControlLists; +038import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; +039import org.apache.hadoop.util.StringUtils; +040 +041/** +042 * Makes decisions about the placement and movement of Regions across +043 * RegionServers. +044 * +045 *

Cluster-wide load balancing will occur only when there are no regions in +046 * transition and according to a fixed period of a time using {@link #balanceCluster(Map)}. +047 * +048 *

On cluster startup, bulk assignment can be used to determine +049 * locations for all Regions in a cluster. +050 * +051 *

This class produces plans for the +052 * {@link org.apache.hadoop.hbase.master.assignment.AssignmentManager} +053 * to execute. +054 */ +055@InterfaceAudience.Private +056public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObserver { +057 /** +058 * Master can carry regions as of hbase-2.0.0. +059 * By default, it carries no tables. +060 * TODO: Add any | system as flags to indicate what it can do. 061 */ -062 void setClusterStatus(ClusterStatus st); +062 public static final String TABLES_ON_MASTER = "hbase.balancer.tablesOnMaster"; 063 064 /** -065 * Pass RegionStates and allow balancer to set the current cluster load. -066 * @param ClusterLoad -067 */ -068 void setClusterLoad(Map>> ClusterLoad); +065 * Master carries system tables. +066 */ +067 public static final String SYSTEM_TABLES_ON_MASTER = +068 "hbase.balancer.tablesOnMaster.systemTablesOnly"; 069 -070 /** -071 * Set the master service. -072 * @param masterServices -073 */ -074 void setMasterServices(MasterServices masterServices); -075 -076 /** -077 * Perform the major balance operation -078 * @param tableName -079 * @param clusterState -080 * @return List of plans -081 */ -0


[36/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
index a7c7bc1..fe0b5d0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
@@ -459,14 +459,14 @@ extends BaseLoadBalancer
-clusterStatus,
 config,
 IDLE_SERVER_PREDICATOR,
 masterServerName,
 metricsBalancer,
 MIN_SERVER_BALANCE,
 overallSlop,
 rackMana
 ger, regionFinder,
 services,
 slop,
 TABLES_ON_MASTER,
 tablesOnMaster
+clusterStatus,
 config,
 IDLE_SERVER_PREDICATOR,
 masterServerName,
 metricsBalancer,
 MIN_SERVER_BALANCE,
 onlySystemTablesOnMaster,
 overallSlop, rackManager,
 regionFinder,
 services,
 slop,
 tablesOnMaster
 
 
 
 
 
 Fields inherited from interface org.apache.hadoop.hbase.master.LoadBalancer
-BOGUS_SERVER_NAME
+BOGUS_SERVER_NAME,
 SYSTEM_TABLES_ON_MASTER,
 TABLES_ON_MASTER
 
 
 
@@ -628,7 +628,7 @@ extends BaseLoadBalancer
-assignMasterRegions,
 balanceMasterRegions,
 createCluster,
 getConf,
 getRegionAssignmentsByServer,
 getTablesOnMaster,
 initialize, 
isStopped,
 randomAssignment,
 regionOffline,
 regionOnline,
 retainAssignment,
 roundRobinAssignment, setClusterLoad,
 setRackManager,
 shouldBeOnMaster,
 stop,
 tablesOnMaster,
 userTablesOnMaster
+assignMasterSystemRegions,
 balanceMasterRegions,
 createCluster,
 getConf,
 getRegionAssignmentsByServer,
 initialize,
 isStopped, randomAssignment,
 regionOffline,
 regionOnline,
 retainAssignment,
 roundRobinAssignment,
 setClusterLoad, setRackManager,
 shouldBeOnMaster,
 stop
 
 
 
@@ -637,6 +637,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--";
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--";
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-";
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-";
 title="class or interface in java.lang">wait
 
+
+
+
+
+Methods inherited from interface org.apache.hadoop.hbase.master.LoadBalancer
+isSystemTablesOnlyOnMaster,
 isTablesOnMaster
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
index e458be7..c73f25f 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
@@ -197,8 +197,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
 org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
+org.apache.hadoop.hbase.master.balancer.BaseLoad

[42/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index d4d9441..9fc3978 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class AssignmentManager.RegionInTransitionStat
+public static class AssignmentManager.RegionInTransitionStat
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ritThreshold
-private final int ritThreshold
+private final int ritThreshold
 
 
 
@@ -275,7 +275,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ritsOverThreshold
-private http://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true";
 title="class or interface in java.util">HashMapString,RegionState> ritsOverThreshold
+private http://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true";
 title="class or interface in java.util">HashMapString,RegionState> ritsOverThreshold
 
 
 
@@ -284,7 +284,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 statTimestamp
-private long statTimestamp
+private long statTimestamp
 
 
 
@@ -293,7 +293,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 oldestRITTime
-private long oldestRITTime
+private long oldestRITTime
 
 
 
@@ -302,7 +302,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 totalRITsTwiceThreshold
-private int totalRITsTwiceThreshold
+private int totalRITsTwiceThreshold
 
 
 
@@ -311,7 +311,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 totalRITs
-private int totalRITs
+private int totalRITs
 
 
 
@@ -328,7 +328,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 RegionInTransitionStat
-public RegionInTransitionStat(org.apache.hadoop.conf.Configuration conf)
+public RegionInTransitionStat(org.apache.hadoop.conf.Configuration conf)
 
 
 
@@ -345,7 +345,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getRITThreshold
-public int getRITThreshold()
+public int getRITThreshold()
 
 
 
@@ -354,7 +354,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTimestamp
-public long getTimestamp()
+public long getTimestamp()
 
 
 
@@ -363,7 +363,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTotalRITs
-public int getTotalRITs()
+public int getTotalRITs()
 
 
 
@@ -372,7 +372,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getOldestRITTime
-public long getOldestRITTime()
+public long getOldestRITTime()
 
 
 
@@ -381,7 +381,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTotalRITsOverThreshold
-public int getTotalRITsOverThreshold()
+public int getTotalRITsOverThreshold()
 
 
 
@@ -390,7 +390,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 hasRegionsTwiceOverThreshold
-public boolean hasRegionsTwiceOverThreshold()
+public boolean hasRegionsTwiceOverThreshold()
 
 
 
@@ -399,7 +399,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 hasRegionsOverThreshold
-public boolean hasRegionsOverThreshold()
+public boolean hasRegionsOverThreshold()
 
 
 
@@ -408,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getRegionOverThreshold
-public http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection getRegionOverThreshold()
+public http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection getRegionOverThreshold()
 
 
 
@@ -417,7 +417,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isRegionOverThreshold
-public boolean isRegionOverThreshold(HRegionInfo regionInfo)
+public boolean isRegionOverThreshold(HRegionInfo regionInfo)
 

[18/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
index d9c59a0..13f64df 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
@@ -69,1532 +69,1492 @@
 061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
 062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-064
-065/**
-066 * The base class for load balancers. It 
provides the the functions used to by
-067 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-068 * in the edge cases. It doesn't provide 
an implementation of the
-069 * actual balancing algorithm.
-070 *
-071 */
-072public abstract class BaseLoadBalancer 
implements LoadBalancer {
-073  protected static final int 
MIN_SERVER_BALANCE = 2;
-074  private volatile boolean stopped = 
false;
-075
-076  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
-077
-078  static final 
Predicate IDLE_SERVER_PREDICATOR
-079= load -> 
load.getNumberOfRegions() == 0;
-080
-081  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-082
-083  private static class DefaultRackManager 
extends RackManager {
-084@Override
-085public String getRack(ServerName 
server) {
-086  return UNKNOWN_RACK;
-087}
-088  }
-089
-090  /**
-091   * The constructor that uses the basic 
MetricsBalancer
-092   */
-093  protected BaseLoadBalancer() {
-094metricsBalancer = new 
MetricsBalancer();
-095  }
-096
-097  /**
-098   * This Constructor accepts an instance 
of MetricsBalancer,
-099   * which will be used instead of 
creating a new one
-100   */
-101  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-102this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-103  }
-104
-105  /**
-106   * An efficient array based 
implementation similar to ClusterState for keeping
-107   * the status of the cluster in terms 
of region assignment and distribution.
-108   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-109   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-110   * class uses mostly indexes and 
arrays.
-111   *
-112   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-113   * topology in terms of server names, 
hostnames and racks.
-114   */
-115  protected static class Cluster {
-116ServerName[] servers;
-117String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-118String[] racks;
-119boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-120
-121ArrayList tables;
-122HRegionInfo[] regions;
-123Deque[] 
regionLoads;
-124private RegionLocationFinder 
regionFinder;
-125
-126int[][] regionLocations; 
//regionIndex -> list of serverIndex sorted by locality
-127
-128int[]   serverIndexToHostIndex;  
//serverIndex -> host index
-129int[]   serverIndexToRackIndex;  
//serverIndex -> rack index
-130
-131int[][] regionsPerServer;
//serverIndex -> region list
-132int[][] regionsPerHost;  
//hostIndex -> list of regions
-133int[][] regionsPerRack;  
//rackIndex -> region list
-134int[][] primariesOfRegionsPerServer; 
//serverIndex -> sorted list of regions by primary region index
-135int[][] primariesOfRegionsPerHost;   
//hostIndex -> sorted list of regions by primary region index
-136int[][] primariesOfRegionsPerRack;   
//rackIndex -> sorted list of regions by primary region index
-137
-138int[][] serversPerHost;  
//hostIndex -> list of server indexes
-139int[][] serversPerRack;  
//rackIndex -> list of server indexes
-140int[]   regionIndexToServerIndex;
//regionIndex -> serverIndex
-141int[]   
initialRegionIndexToServerIndex;//regionIndex -> serverIndex (initial 
cluster state)
-142int[]   regionIndexToTableIndex; 
//regionIndex -> tableIndex
-143int[][] numRegionsPerServerPerTable; 
//serverIndex -> tableIndex -> # regions
-144int[]   numMaxRegionsPerTable;   
//tableIndex -> max number of regions in a single RS
-145int[]   regionIndexToPrimaryIndex;   
//regionIndex -> regionIndex of the primary
-146boolea

[46/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilder.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilder.html
index 2cee6d2..4a8daf0 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilder.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilder.html
@@ -106,7 +106,7 @@
 
 
 (package private) class 
-AsyncAdminBuilderBase
+AsyncAdminBuilderBase
 Base class for all asynchronous admin builders.
 
 
@@ -120,85 +120,85 @@
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncConnection.getAdminBuilder()
 Returns an AsyncAdminBuilder for creating 
AsyncAdmin.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncConnectionImpl.getAdminBuilder() 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncConnection.getAdminBuilder(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService pool)
 Returns an AsyncAdminBuilder for creating 
AsyncAdmin.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncConnectionImpl.getAdminBuilder(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService pool) 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilder.setMaxAttempts(int maxAttempts)
 Set the max attempt times for an admin operation.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilderBase.setMaxAttempts(int maxAttempts) 
 
 
-default AsyncAdminBuilder
+default AsyncAdminBuilder
 AsyncAdminBuilder.setMaxRetries(int maxRetries)
 Set the max retry times for an admin operation.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilder.setOperationTimeout(long timeout,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set timeout for a whole admin operation.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilderBase.setOperationTimeout(long timeout,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit) 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilder.setRetryPause(long timeout,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set the base pause time for retrying.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilderBase.setRetryPause(long timeout,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit) 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilder.setRpcTimeout(long timeout,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit)
 Set timeout for each rpc request.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilderBase.setRpcTimeout(long timeout,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeUnit unit) 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilder.setStartLogErrorsCnt(int startLogErrorsCnt)
 Set the number of retries that are allowed before we start 
to log.
 
 
 
-AsyncAdminBuilder
+AsyncAdminBuilder
 AsyncAdminBuilderBase.setStartLogErrorsCnt(int startLogErrorsCnt) 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilderBase.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilderBase.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilderBase.html
index af91aad..aacddbb 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilderBase.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdminBuilderBase.html
@@ -105,7 +105,7 @@
 
 
 RawAsyncHBaseAdmin(AsyncConnectionImpl connection,
-  AsyncAdminBuilderBase builder) 
+  AsyncAdminBuilderBase builder) 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/client/class-u

[43/51] [partial] hbase-site git commit: Published site at .

2017-08-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
index a0004a6..165301e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
@@ -370,7 +370,7 @@ extends Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--";
 title="class or interface in java.lang">run in 
interface http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true";
 title="class or interface in java.lang">Runnable
 Overrides:
-run in
 class HRegionServer
+run in
 class HMaster
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/33bc9e06/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html 
b/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
index 654e886..0246c73 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":17,"i4":17,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public interface LoadBalancer
+public interface LoadBalancer
 extends org.apache.hadoop.conf.Configurable, Stoppable, ConfigurationObserver
 Makes decisions about the placement and movement of Regions 
across
  RegionServers.
@@ -150,6 +150,18 @@ extends org.apache.hadoop.conf.Configurable, static ServerName
 BOGUS_SERVER_NAME 
 
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+SYSTEM_TABLES_ON_MASTER
+Master carries system tables.
+
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+TABLES_ON_MASTER
+Master can carry regions as of hbase-2.0.0.
+
+
 
 
 
@@ -160,7 +172,7 @@ extends org.apache.hadoop.conf.Configurable, 
-All Methods Instance Methods Abstract Methods 
+All Methods Static Methods Instance Methods Abstract Methods Default Methods 
 
 Modifier and Type
 Method and Description
@@ -185,59 +197,67 @@ extends org.apache.hadoop.conf.Configurable, 
+static boolean
+isSystemTablesOnlyOnMaster(org.apache.hadoop.conf.Configuration conf) 
+
+
+static boolean
+isTablesOnMaster(org.apache.hadoop.conf.Configuration conf) 
+
+
 void
 onConfigurationChange(org.apache.hadoop.conf.Configuration conf)
 This method would be called by the ConfigurationManager
  object when the Configuration object is reloaded from disk.
 
 
-
+
 ServerName
 randomAssignment(HRegionInfo regionInfo,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers)
 Get a random region server from the list
 
 
-
+
 void
 regionOffline(HRegionInfo regionInfo)
 Marks the region as offline at balancer.
 
 
-
+
 void
 regionOnline(HRegionInfo regionInfo,
 ServerName sn)
 Marks the region as online at balancer.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>
 retainAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map regions,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers)
 Assign regions to the previously hosting region server
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map

hbase git commit: HBASE-18375: Fix the bug where the pool chunks from ChunkCreator are deallocated and not returned to pool, because there is no reference to them

2017-08-17 Thread anastasia
Repository: hbase
Updated Branches:
  refs/heads/master 092dc6de8 -> 75a6b3684


HBASE-18375: Fix the bug where the pool chunks from ChunkCreator are 
deallocated and not returned to pool, because there is no reference to them


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75a6b368
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75a6b368
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75a6b368

Branch: refs/heads/master
Commit: 75a6b36849c58d6a751f57226ab0c8f7884a9e87
Parents: 092dc6d
Author: anastas 
Authored: Thu Aug 17 18:23:19 2017 +0300
Committer: anastas 
Committed: Thu Aug 17 18:23:19 2017 +0300

--
 .../regionserver/CellChunkImmutableSegment.java |   5 +-
 .../hadoop/hbase/regionserver/ChunkCreator.java | 171 +--
 .../hbase/regionserver/CompactionPipeline.java  |  19 ++-
 .../hbase/regionserver/MemStoreLABImpl.java |  27 ++-
 .../hbase/regionserver/TestMemStoreLAB.java |  12 +-
 .../TestMemstoreLABWithoutPool.java |   3 +-
 6 files changed, 126 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/75a6b368/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
index cdda279..3653166 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
@@ -176,10 +176,7 @@ public class CellChunkImmutableSegment extends 
ImmutableSegment {
   private int createCellReference(ByteBufferKeyValue cell, ByteBuffer 
idxBuffer, int idxOffset) {
 int offset = idxOffset;
 int dataChunkID = cell.getChunkId();
-// ensure strong pointer to data chunk, as index is no longer directly 
points to it
-Chunk c = ChunkCreator.getInstance().saveChunkFromGC(dataChunkID);
-// if c is null, it means that this cell chunks was already released 
shouldn't happen
-assert (c!=null);
+
 offset = ByteBufferUtils.putInt(idxBuffer, offset, dataChunkID);// 
write data chunk id
 offset = ByteBufferUtils.putInt(idxBuffer, offset, cell.getOffset());  
// offset
 offset = ByteBufferUtils.putInt(idxBuffer, offset, 
KeyValueUtil.length(cell)); // length

http://git-wip-us.apache.org/repos/asf/hbase/blob/75a6b368/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
index 7e5395c..e818426 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
@@ -18,7 +18,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.lang.ref.WeakReference;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
@@ -58,21 +57,8 @@ public class ChunkCreator {
   // the header size need to be changed in case chunk id size is changed
   public static final int SIZEOF_CHUNK_HEADER = Bytes.SIZEOF_INT;
 
-  // An object pointed by a weak reference can be garbage collected, in 
opposite to an object
-  // referenced by a strong (regular) reference. Every chunk created via 
ChunkCreator is referenced
-  // from either weakChunkIdMap or strongChunkIdMap.
-  // Upon chunk C creation, C's ID is mapped into weak reference to C, in 
order not to disturb C's
-  // GC in case all other reference to C are going to be removed.
-  // When chunk C is referenced from CellChunkMap (via C's ID) it is possible 
to GC the chunk C.
-  // To avoid that upon inserting C into CellChunkMap, C's ID is mapped into 
strong (regular)
-  // reference to C.
-
-  // map that doesn't influence GC
-  private Map> weakChunkIdMap =
-  new ConcurrentHashMap>();
-
-  // map that keeps chunks from garbage collection
-  private Map strongChunkIdMap = new 
ConcurrentHashMap();
+  // mapping from chunk IDs to chunks
+  private Map chunkIdMap = new ConcurrentHashMap();
 
   private final int chunkSize;
   private final boolean offheap;
@@ -95,7 +81,7 @@ public class ChunkCreator {
   }
 
   /**
-   * Initializes the instance of MSLABChunkCreator
+   * Initializes the instance of ChunkCreator
* @param chunkSize the chunkSize
* @param offheap indicates if the chunk is to be c

[12/50] [abbrv] hbase git commit: HBASE-18526 FIFOCompactionPolicy pre-check uses wrong scope (Vladimir Rodionov)

2017-08-17 Thread stack
HBASE-18526 FIFOCompactionPolicy pre-check uses wrong scope (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aa8f67a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aa8f67a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aa8f67a1

Branch: refs/heads/HBASE-14070.HLC
Commit: aa8f67a148cbefbfc4bfdc25b2dc48c7ed947212
Parents: 1070888
Author: tedyu 
Authored: Fri Aug 11 16:41:40 2017 -0700
Committer: tedyu 
Committed: Fri Aug 11 16:41:40 2017 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java   | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aa8f67a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 96bf859..421ae8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -53,7 +53,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -79,6 +78,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.BypassCoprocessorException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@@ -164,6 +164,9 @@ import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.UserProvider;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
@@ -200,9 +203,6 @@ import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.servlet.ServletHolder;
 import org.eclipse.jetty.webapp.WebAppContext;
 
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Service;
 
@@ -1937,14 +1937,14 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   }
 
   // 3. blocking file count
-  String sbfc = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
-  if (sbfc != null) {
-blockingFileCount = Integer.parseInt(sbfc);
+  sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
+  if (sv != null) {
+blockingFileCount = Integer.parseInt(sv);
   }
   if (blockingFileCount < 1000) {
 message =
-"blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + 
blockingFileCount
-+ " is below recommended minimum of 1000";
+"Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + 
blockingFileCount
++ " is below recommended minimum of 1000 for column family "+ 
hcd.getNameAsString();
 throw new IOException(message);
   }
 }



[07/50] [abbrv] hbase git commit: HBASE-18500 Performance issue: Don't use BufferedMutator for HTable's put method

2017-08-17 Thread stack
HBASE-18500 Performance issue: Don't use BufferedMutator for HTable's put method


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cabdbf18
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cabdbf18
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cabdbf18

Branch: refs/heads/HBASE-14070.HLC
Commit: cabdbf181a0b44bd1f9c32aa67b8a2ee3b863758
Parents: 679f34e
Author: Guanghao Zhang 
Authored: Wed Aug 2 13:52:16 2017 +0800
Committer: Guanghao Zhang 
Committed: Fri Aug 11 16:33:35 2017 +0800

--
 .../hbase/client/BufferedMutatorImpl.java   |  19 ---
 .../org/apache/hadoop/hbase/client/HTable.java  | 126 ---
 .../org/apache/hadoop/hbase/client/Table.java   |  33 +
 .../hadoop/hbase/client/TableBuilder.java   |   6 -
 .../hadoop/hbase/client/TableBuilderBase.java   |   9 --
 .../hadoop/hbase/client/TestAsyncProcess.java   |  71 +--
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  10 --
 .../hadoop/hbase/client/HTableWrapper.java  |  10 --
 .../security/access/AccessControlLists.java |  16 ++-
 .../hadoop/hbase/PerformanceEvaluation.java |  13 +-
 .../hadoop/hbase/client/TestClientPushback.java |   6 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   5 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java |   3 +-
 .../hadoop/hbase/client/TestMultiParallel.java  |   1 -
 .../hbase/client/TestServerBusyException.java   |   8 +-
 .../hadoop/hbase/constraint/TestConstraint.java |   9 +-
 .../hbase/coprocessor/TestHTableWrapper.java|   8 --
 .../hbase/regionserver/RegionAsTable.java   |  10 --
 .../replication/TestMasterReplication.java  |   1 -
 .../replication/TestMultiSlaveReplication.java  |   3 -
 .../hbase/replication/TestReplicationBase.java  |   1 -
 .../replication/TestReplicationSmallTests.java  |   1 -
 .../replication/TestReplicationSyncUpTool.java  |   4 -
 .../hbase/security/access/SecureTestUtil.java   |   2 +-
 .../security/access/TestNamespaceCommands.java  |   1 +
 25 files changed, 69 insertions(+), 307 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cabdbf18/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index 0ddc159..b7d3104 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -305,25 +305,6 @@ public class BufferedMutatorImpl implements 
BufferedMutator {
   }
 };
   }
-  /**
-   * This is used for legacy purposes in {@link 
HTable#setWriteBufferSize(long)} only. This ought
-   * not be called for production uses.
-   * If the new buffer size is smaller than the stored data, the {@link 
BufferedMutatorImpl#flush()}
-   * will be called.
-   * @param writeBufferSize The max size of internal buffer where data is 
stored.
-   * @throws 
org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException
-   * if an I/O error occurs and there are too many retries.
-   * @throws java.io.InterruptedIOException if the I/O task is interrupted.
-   * @deprecated Going away when we drop public support for {@link HTable}.
-   */
-  @Deprecated
-  public void setWriteBufferSize(long writeBufferSize) throws 
RetriesExhaustedWithDetailsException,
-  InterruptedIOException {
-this.writeBufferSize = writeBufferSize;
-if (currentWriteBufferSize.get() > writeBufferSize) {
-  flush();
-}
-  }
 
   /**
* {@inheritDoc}

http://git-wip-us.apache.org/repos/asf/hbase/blob/cabdbf18/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index c0d321b..2920281 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -107,9 +107,6 @@ public class HTable implements Table {
   private final TableName tableName;
   private final Configuration configuration;
   private final ConnectionConfiguration connConfiguration;
-  @VisibleForTesting
-  volatile BufferedMutatorImpl mutator;
-  private final Object mutatorLock = new Object();
   private boolean closed = false;
   private final int scannerCaching;
   private final long scannerMaxResultSize;
@@ -120,7 +117,6 @@ public class HTable implements Table {
   private int writeRpcTimeout; /

[43/50] [abbrv] hbase git commit: HBASE-18493 [AMv2] Skipped re-assignment of regions on crashed server through AssignmentManager.checkIfShouldMoveSystemRegionAsync() as those regions are handled by S

2017-08-17 Thread stack
HBASE-18493 [AMv2] Skipped re-assignment of regions on crashed server through 
AssignmentManager.checkIfShouldMoveSystemRegionAsync() as those regions are 
handled by ServerCrashProcedure

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/acf9b87d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/acf9b87d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/acf9b87d

Branch: refs/heads/HBASE-14070.HLC
Commit: acf9b87dca2cd190f4b5318efd5dc48e19b317f4
Parents: 4c74a73
Author: Umesh Agashe 
Authored: Tue Aug 15 14:00:04 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 08:12:05 2017 -0700

--
 .../hadoop/hbase/master/assignment/AssignmentManager.java   | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/acf9b87d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 54cb1ca..0b23f47 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -480,6 +480,15 @@ public class AssignmentManager implements ServerListener {
 synchronized (checkIfShouldMoveSystemRegionLock) {
   List plans = new ArrayList<>();
   for (ServerName server : getExcludedServersForSystemTable()) {
+if (master.getServerManager().isServerDead(server)) {
+  // TODO: See HBASE-18494 and HBASE-18495. Though 
getExcludedServersForSystemTable()
+  // considers only online servers, the server could be queued for 
dead server
+  // processing. As region assignments for crashed server is 
handled by
+  // ServerCrashProcedure, do NOT handle them here. The goal is to 
handle this through
+  // regular flow of LoadBalancer as a favored node and not to 
have this special
+  // handling.
+  continue;
+}
 List regionsShouldMove = 
getCarryingSystemTables(server);
 if (!regionsShouldMove.isEmpty()) {
   for (HRegionInfo regionInfo : regionsShouldMove) {



[47/50] [abbrv] hbase git commit: HBASE-18125 shell disregards spaces at the end of a split key in a split file

2017-08-17 Thread stack
HBASE-18125 shell disregards spaces at the end of a split key in a split file

Signed-off-by: fchenxi 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a17ed035
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a17ed035
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a17ed035

Branch: refs/heads/HBASE-14070.HLC
Commit: a17ed0356f12c6f7a682557d92cc401b7a4297f1
Parents: 4c3a64d
Author: Chenxi Tong 
Authored: Wed Aug 9 12:00:53 2017 +0800
Committer: Michael Stack 
Committed: Wed Aug 16 15:14:12 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a17ed035/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 4b0de5f..2aacd7f 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -386,7 +386,7 @@ module Hbase
   end
   arg[SPLITS] = []
   File.foreach(splits_file) do |line|
-arg[SPLITS].push(line.strip)
+arg[SPLITS].push(line.chomp)
   end
   htd.setValue(SPLITS_FILE, arg[SPLITS_FILE])
 end



[20/50] [abbrv] hbase git commit: HBASE-18566 [RSGROUP]Log the client IP/port of the rsgroup admin

2017-08-17 Thread stack
HBASE-18566 [RSGROUP]Log the client IP/port of the rsgroup admin

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c6bf4d51
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c6bf4d51
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c6bf4d51

Branch: refs/heads/HBASE-14070.HLC
Commit: c6bf4d51928985517c438d1efcaefdeddc9ac5ef
Parents: 05e6e56
Author: Guangxu Cheng 
Authored: Mon Aug 14 10:44:52 2017 +0800
Committer: tedyu 
Committed: Sun Aug 13 20:29:12 2017 -0700

--
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java   | 15 +++
 .../java/org/apache/hadoop/hbase/master/HMaster.java |  2 +-
 .../apache/hadoop/hbase/master/MasterServices.java   |  2 ++
 .../hadoop/hbase/master/MockNoopMasterServices.java  |  5 +
 4 files changed, 23 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c6bf4d51/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 9fda3f0..0bc5c76 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -120,6 +120,8 @@ public class RSGroupAdminEndpoint implements 
MasterObserver, CoprocessorService
 GetRSGroupInfoRequest request, RpcCallback 
done) {
   GetRSGroupInfoResponse.Builder builder = 
GetRSGroupInfoResponse.newBuilder();
   String groupName = request.getRSGroupName();
+  LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info 
retrieval, group="
+  + groupName);
   try {
 RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
 if (rsGroupInfo != null) {
@@ -137,6 +139,8 @@ public class RSGroupAdminEndpoint implements 
MasterObserver, CoprocessorService
   GetRSGroupInfoOfTableResponse.Builder builder = 
GetRSGroupInfoOfTableResponse.newBuilder();
   try {
 TableName tableName = ProtobufUtil.toTableName(request.getTableName());
+LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info 
retrieval, table="
++ tableName);
 RSGroupInfo RSGroupInfo = 
groupAdminServer.getRSGroupInfoOfTable(tableName);
 if (RSGroupInfo != null) {
   
builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo));
@@ -156,6 +160,8 @@ public class RSGroupAdminEndpoint implements 
MasterObserver, CoprocessorService
 for (HBaseProtos.ServerName el : request.getServersList()) {
   hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
 }
+LOG.info(master.getClientIdAuditPrefix() + " move servers " + 
hostPorts +" to rsgroup "
++ request.getTargetGroup());
 groupAdminServer.moveServers(hostPorts, request.getTargetGroup());
   } catch (IOException e) {
 CoprocessorRpcUtils.setControllerException(controller, e);
@@ -172,6 +178,8 @@ public class RSGroupAdminEndpoint implements 
MasterObserver, CoprocessorService
 for (HBaseProtos.TableName tableName : request.getTableNameList()) {
   tables.add(ProtobufUtil.toTableName(tableName));
 }
+LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables +" 
to rsgroup "
++ request.getTargetGroup());
 groupAdminServer.moveTables(tables, request.getTargetGroup());
   } catch (IOException e) {
 CoprocessorRpcUtils.setControllerException(controller, e);
@@ -183,6 +191,7 @@ public class RSGroupAdminEndpoint implements 
MasterObserver, CoprocessorService
 public void addRSGroup(RpcController controller, AddRSGroupRequest request,
 RpcCallback done) {
   AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder();
+  LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + 
request.getRSGroupName());
   try {
 groupAdminServer.addRSGroup(request.getRSGroupName());
   } catch (IOException e) {
@@ -196,6 +205,7 @@ public class RSGroupAdminEndpoint implements 
MasterObserver, CoprocessorService
 RemoveRSGroupRequest request, RpcCallback done) 
{
   RemoveRSGroupResponse.Builder builder =
   RemoveRSGroupResponse.newBuilder();
+  LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + 
request.getRSGroupName());
   try {
 groupAdminServer.removeRSGroup(request.getRSGroupName());
   } catch (IOException e) {
@@ -208,6 +218,7 @@ public class RSGroupAdminEndpoint i

[33/50] [abbrv] hbase git commit: HBASE-18504 Add documentation for WAL compression

2017-08-17 Thread stack
HBASE-18504 Add documentation for WAL compression

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/63e313b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/63e313b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/63e313b5

Branch: refs/heads/HBASE-14070.HLC
Commit: 63e313b5c0d7c56d9cf9602e3c204214331189d5
Parents: 70c4f78
Author: Peter Somogyi 
Authored: Wed Aug 2 17:00:52 2017 +0200
Committer: Michael Stack 
Committed: Tue Aug 15 12:54:08 2017 -0700

--
 src/main/asciidoc/_chapters/architecture.adoc | 16 
 1 file changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/63e313b5/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index ebb0677..2ded813 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -1216,6 +1216,22 @@ This will be the default for HBase 0.99 
(link:https://issues.apache.org/jira/bro
 You must also enable HFile version 3 (which is the default HFile format 
starting in HBase 0.99.
 See link:https://issues.apache.org/jira/browse/HBASE-10855[HBASE-10855]). 
Distributed log replay is unsafe for rolling upgrades.
 
+[[wal.compression]]
+ WAL Compression 
+
+The content of the WAL can be compressed using LRU Dictionary compression.
+This can be used to speed up WAL replication to different datanodes.
+The dictionary can store up to 2^15^ elements; eviction starts after this 
number is exceeded.
+
+To enable WAL compression, set the `hbase.regionserver.wal.enablecompression` 
property to `true`.
+The default value for this property is `false`.
+By default, WAL tag compression is turned on when WAL compression is enabled.
+You can turn off WAL tag compression by setting the 
`hbase.regionserver.wal.tags.enablecompression` property to 'false'.
+
+A possible downside to WAL compression is that we lose more data from the last 
block in the WAL if it ill-terminated
+mid-write. If entries in this last block were added with new dictionary 
entries but we failed persist the amended
+dictionary because of an abrupt termination, a read of this last block may not 
be able to resolve last-written entries. 
+
 [[wal.disable]]
  Disabling the WAL
 



[39/50] [abbrv] hbase git commit: HBASE-18437 Revoke access permissions of a user from a table does not work as expected

2017-08-17 Thread stack
HBASE-18437 Revoke access permissions of a user from a table does not work as 
expected

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b0878184
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b0878184
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b0878184

Branch: refs/heads/HBASE-14070.HLC
Commit: b0878184a31804a4bf061df7581964157b4849d5
Parents: 59ffb611
Author: Ashish Singhi 
Authored: Fri Aug 11 12:48:32 2017 +0530
Committer: Andrew Purtell 
Committed: Tue Aug 15 22:29:16 2017 -0700

--
 .../hbase/security/access/Permission.java   |  6 ++
 .../security/access/AccessControlLists.java | 37 +++-
 .../security/access/TestAccessController.java   | 96 ++--
 3 files changed, 106 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b0878184/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
index 8476f61..18096e1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
@@ -110,6 +110,12 @@ public class Permission extends VersionedWritable {
 return false;
   }
 
+  public void setActions(Action[] assigned) {
+if (assigned != null && assigned.length > 0) {
+  actions = Arrays.copyOf(assigned, assigned.length);
+}
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (!(obj instanceof Permission)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b0878184/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 12bdc22..38e292c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -241,13 +241,40 @@ public class AccessControlLists {
*/
   static void removeUserPermission(Configuration conf, UserPermission 
userPerm, Table t)
   throws IOException {
-Delete d = new Delete(userPermissionRowKey(userPerm));
-byte[] key = userPermissionKey(userPerm);
-
+if (null == userPerm.getActions()) {
+  removePermissionRecord(conf, userPerm, t);
+} else {
+  // Get all the global user permissions from the acl table
+  List permsList = getUserPermissions(conf, 
userPermissionRowKey(userPerm));
+  List remainingActions = new ArrayList<>();
+  List dropActions = 
Arrays.asList(userPerm.getActions());
+  for (UserPermission perm : permsList) {
+// Find the user and remove only the requested permissions
+if 
(Bytes.toString(perm.getUser()).equals(Bytes.toString(userPerm.getUser( {
+  for (Permission.Action oldAction : perm.getActions()) {
+if (!dropActions.contains(oldAction)) {
+  remainingActions.add(oldAction);
+}
+  }
+  if (!remainingActions.isEmpty()) {
+perm.setActions(remainingActions.toArray(new 
Permission.Action[remainingActions.size()]));
+addUserPermission(conf, perm, t);
+  } else {
+removePermissionRecord(conf, userPerm, t);
+  }
+  break;
+}
+  }
+}
 if (LOG.isDebugEnabled()) {
-  LOG.debug("Removing permission "+ userPerm.toString());
+  LOG.debug("Removed permission "+ userPerm.toString());
 }
-d.addColumns(ACL_LIST_FAMILY, key);
+  }
+
+  private static void removePermissionRecord(Configuration conf, 
UserPermission userPerm, Table t)
+  throws IOException {
+Delete d = new Delete(userPermissionRowKey(userPerm));
+d.addColumns(ACL_LIST_FAMILY, userPermissionKey(userPerm));
 try {
   t.delete(d);
 } finally {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b0878184/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index c1fbb28..6583366 100644
--- 

[16/50] [abbrv] hbase git commit: HBASE-18271 Shade netty Purge mention of netty-all.

2017-08-17 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/c6ac04ab/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java
index 3b32383..6a39e12 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyIPC.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import io.netty.channel.epoll.EpollEventLoopGroup;
-import io.netty.channel.epoll.EpollSocketChannel;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioSocketChannel;
+import 
org.apache.hadoop.hbase.shaded.io.netty.channel.epoll.EpollEventLoopGroup;
+import 
org.apache.hadoop.hbase.shaded.io.netty.channel.epoll.EpollSocketChannel;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.nio.NioEventLoopGroup;
+import 
org.apache.hadoop.hbase.shaded.io.netty.channel.socket.nio.NioSocketChannel;
 
 import java.util.ArrayList;
 import java.util.Collection;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c6ac04ab/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
index 9b28975..2ae916f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hbase.regionserver.wal;
 
-import io.netty.channel.Channel;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioSocketChannel;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroup;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.nio.NioEventLoopGroup;
+import 
org.apache.hadoop.hbase.shaded.io.netty.channel.socket.nio.NioSocketChannel;
 
 import java.io.IOException;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c6ac04ab/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java
index bb67820..708d64c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncProtobufLog.java
@@ -19,10 +19,10 @@ package org.apache.hadoop.hbase.regionserver.wal;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
 
-import io.netty.channel.Channel;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioSocketChannel;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroup;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.nio.NioEventLoopGroup;
+import 
org.apache.hadoop.hbase.shaded.io.netty.channel.socket.nio.NioSocketChannel;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/c6ac04ab/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java
index 17f58f8..881cf7c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hbase.regionserver.wal;
 
-import io.netty.channel.Channel;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioSocketChannel;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroup;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.nio.NioEventLoopGroup;
+import 
org.apache.hadoop.hbase.shaded.io.netty.cha

[45/50] [abbrv] hbase git commit: HBASE-18271 Shade netty Purge mention of netty-all; ADDENDUM for sparktest

2017-08-17 Thread stack
HBASE-18271 Shade netty Purge mention of netty-all; ADDENDUM for sparktest


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5d2c3ddf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5d2c3ddf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5d2c3ddf

Branch: refs/heads/HBASE-14070.HLC
Commit: 5d2c3ddf5238fa0b48b8aa4fb99108eaf69f16aa
Parents: 4734467
Author: Michael Stack 
Authored: Wed Aug 16 10:04:33 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 10:04:56 2017 -0700

--
 hbase-spark/pom.xml | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5d2c3ddf/hbase-spark/pom.xml
--
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index 8137b53..d294835 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -559,9 +559,11 @@
   test
 
 
-  
--Xmx1536m -XX:ReservedCodeCacheSize=512m
-
+  
+
org.apache.hadoop.hbase.shaded.
+  
+   -Xmx1536m -XX:ReservedCodeCacheSize=512m
+  
   false
 
   



[21/50] [abbrv] hbase git commit: HBASE-18528 DON'T allow user to modify the passed table/column descriptor

2017-08-17 Thread stack
HBASE-18528 DON'T allow user to modify the passed table/column descriptor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e2b797be
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e2b797be
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e2b797be

Branch: refs/heads/HBASE-14070.HLC
Commit: e2b797be390f05c55a490a64bc72e2d8c19fcbb7
Parents: c6bf4d5
Author: Chia-Ping Tsai 
Authored: Mon Aug 14 14:02:30 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Mon Aug 14 14:02:30 2017 +0800

--
 .../client/ImmutableHColumnDescriptor.java  |  5 +-
 .../hbase/client/ImmutableHTableDescriptor.java | 11 ++-
 .../hbase/coprocessor/MasterObserver.java   | 48 ++--
 .../hbase/master/MasterCoprocessorHost.java | 80 +++-
 4 files changed, 79 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e2b797be/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
index c8d34ff..89ef851 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
@@ -35,8 +35,9 @@ public class ImmutableHColumnDescriptor extends 
HColumnDescriptor {
 super(desc, false);
   }
 
-  ImmutableHColumnDescriptor(final ModifyableColumnFamilyDescriptor desc) {
-super(desc);
+  public ImmutableHColumnDescriptor(final ColumnFamilyDescriptor desc) {
+super(desc instanceof ModifyableColumnFamilyDescriptor ?
+  (ModifyableColumnFamilyDescriptor) desc : new 
ModifyableColumnFamilyDescriptor(desc));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/e2b797be/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
index 4e9e9af..169f143 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
@@ -28,19 +28,17 @@ import org.apache.hadoop.hbase.HTableDescriptor;
  * Read-only table descriptor.
  */
 @Deprecated // deprecated for hbase 2.0, remove for hbase 3.0. see 
HTableDescriptor.
-@InterfaceAudience.Public
+@InterfaceAudience.Private
 public class ImmutableHTableDescriptor extends HTableDescriptor {
 
   @Override
   protected HColumnDescriptor toHColumnDescriptor(ColumnFamilyDescriptor desc) 
{
 if (desc == null) {
   return null;
-} else if (desc instanceof ModifyableColumnFamilyDescriptor) {
-  return new ImmutableHColumnDescriptor((ModifyableColumnFamilyDescriptor) 
desc);
 } else if (desc instanceof HColumnDescriptor) {
   return new ImmutableHColumnDescriptor((HColumnDescriptor) desc);
 } else {
-  return new ImmutableHColumnDescriptor(new 
ModifyableColumnFamilyDescriptor(desc));
+  return new ImmutableHColumnDescriptor(desc);
 }
   }
   /*
@@ -51,6 +49,11 @@ public class ImmutableHTableDescriptor extends 
HTableDescriptor {
 super(desc, false);
   }
 
+  public ImmutableHTableDescriptor(final TableDescriptor desc) {
+super(desc instanceof ModifyableTableDescriptor ?
+  (ModifyableTableDescriptor) desc : new 
ModifyableTableDescriptor(desc.getTableName(), desc));
+  }
+
   @Override
   protected ModifyableTableDescriptor getDelegateeForModification() {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");

http://git-wip-us.apache.org/repos/asf/hbase/blob/e2b797be/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index f4f5db3..8e368ba 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -107,7 +107,7 @@ public interface MasterObserver extends Coprocessor {
* table handler and it is async to the create RPC call.
* It can't bypass the default action, e.g., ctx.bypass() wo

[05/50] [abbrv] hbase git commit: HBASE-18387: [Thrift] Make principal configurable in DemoClient.java

2017-08-17 Thread stack
HBASE-18387: [Thrift] Make principal configurable in DemoClient.java

Added optional (fourth) parameter "server-principal"
The solution is backward compatible, in case not given, uses "hbase" as default 
value
If the third parameter is skipped the fourth cannot be set.

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8da77b41
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8da77b41
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8da77b41

Branch: refs/heads/HBASE-14070.HLC
Commit: 8da77b414657f0ee3b093913de5f92eba17ecd2a
Parents: 4dd24c5
Author: Tamas Penzes 
Authored: Tue Aug 8 13:45:09 2017 +0200
Committer: Josh Elser 
Committed: Thu Aug 10 23:47:07 2017 -0400

--
 hbase-examples/README.txt|  3 ++-
 .../org/apache/hadoop/hbase/thrift/DemoClient.java   | 15 ---
 2 files changed, 14 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8da77b41/hbase-examples/README.txt
--
diff --git a/hbase-examples/README.txt b/hbase-examples/README.txt
index 22d1103..bf28180 100644
--- a/hbase-examples/README.txt
+++ b/hbase-examples/README.txt
@@ -28,7 +28,8 @@ Example code.
   2. If HBase server is not secure, or authentication is not enabled for 
the Thrift server, execute:
   {java -cp hbase-examples-[VERSION].jar:${HBASE_EXAMPLE_CLASSPATH} 
org.apache.hadoop.hbase.thrift.DemoClient  }
   3. If HBase server is secure, and authentication is enabled for the 
Thrift server, run kinit at first, then execute:
-  {java -cp hbase-examples-[VERSION].jar:${HBASE_EXAMPLE_CLASSPATH} 
org.apache.hadoop.hbase.thrift.DemoClient   true}
+  {java -cp hbase-examples-[VERSION].jar:${HBASE_EXAMPLE_CLASSPATH} 
org.apache.hadoop.hbase.thrift.DemoClient   true }
+   should only be specified when the client connects to 
a secure cluster. It's default value is "hbase".
   4. Here is a lazy example that just pulls in all hbase dependency jars 
and that goes against default location on localhost.
   It should work with a standalone hbase instance started by doing 
./bin/start-hbase.sh:
   {java -cp 
./hbase-examples/target/hbase-examples-2.0.0-SNAPSHOT.jar:`./bin/hbase 
classpath` org.apache.hadoop.hbase.thrift.DemoClient localhost 9090}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da77b41/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
--
diff --git 
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
index cb0cfbb..706f82f 100644
--- 
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
+++ 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
@@ -60,13 +60,14 @@ public class DemoClient {
 CharsetDecoder decoder = null;
 
 private static boolean secure = false;
+private static String serverPrincipal = "hbase";
 
 public static void main(String[] args) throws Exception {
 
-if (args.length < 2 || args.length > 3) {
+if (args.length < 2 || args.length > 4 || (args.length > 2 && 
!isBoolean(args[2]))) {
 
 System.out.println("Invalid arguments!");
-System.out.println("Usage: DemoClient host port [secure=false]");
+System.out.println("Usage: DemoClient host port [secure=false 
[server-principal=hbase] ]");
 
 System.exit(-1);
 }
@@ -77,6 +78,10 @@ public class DemoClient {
   secure = Boolean.parseBoolean(args[2]);
 }
 
+if (args.length == 4) {
+  serverPrincipal = args[3];
+}
+
 final DemoClient client = new DemoClient();
 Subject.doAs(getSubject(),
   new PrivilegedExceptionAction() {
@@ -88,6 +93,10 @@ public class DemoClient {
   });
 }
 
+private static boolean isBoolean(String s){
+  return Boolean.TRUE.toString().equalsIgnoreCase(s) || 
Boolean.FALSE.toString().equalsIgnoreCase(s);
+}
+
 DemoClient() {
 decoder = Charset.forName("UTF-8").newDecoder();
 }
@@ -123,7 +132,7 @@ public class DemoClient {
* The HBase cluster must be secure, allow proxy user.
*/
   transport = new TSaslClientTransport("GSSAPI", null,
-"hbase", // Thrift server user name, should be an authorized proxy 
user.
+serverPrincipal, // Thrift server user name, should be an 
authorized proxy user.
 host, // Thrift server domain
 saslProperties, null, transport);
 }



[04/50] [abbrv] hbase git commit: HBASE-17125 Inconsistent result when use filter to read data

2017-08-17 Thread stack
HBASE-17125 Inconsistent result when use filter to read data


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4dd24c52
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4dd24c52
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4dd24c52

Branch: refs/heads/HBASE-14070.HLC
Commit: 4dd24c52b84c74a477e00ab6177d081c29462dd8
Parents: c37432f
Author: Guanghao Zhang 
Authored: Thu Aug 10 21:03:50 2017 +0800
Committer: Guanghao Zhang 
Committed: Fri Aug 11 10:58:00 2017 +0800

--
 .../org/apache/hadoop/hbase/client/Get.java |  34 +++-
 .../org/apache/hadoop/hbase/client/Query.java   |   6 +-
 .../org/apache/hadoop/hbase/client/Scan.java|  29 ++-
 .../querymatcher/ScanQueryMatcher.java  |   7 +-
 .../querymatcher/ScanWildcardColumnTracker.java |   7 +-
 .../querymatcher/UserScanQueryMatcher.java  | 166 +++-
 .../hadoop/hbase/client/TestFromClientSide.java | 195 +++
 .../hadoop/hbase/regionserver/TestHRegion.java  |  64 ++
 .../hbase/regionserver/TestMinVersions.java |   8 +-
 .../hadoop/hbase/regionserver/TestStore.java|  19 +-
 10 files changed, 463 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4dd24c52/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index b774a9a..086a0b4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -267,10 +267,12 @@ public class Get extends Query
   /**
* Get all available versions.
* @return this for invocation chaining
+   * @deprecated It is easy to misunderstand with column family's max 
versions, so use
+   * {@link #readAllVersions()} instead.
*/
+  @Deprecated
   public Get setMaxVersions() {
-this.maxVersions = Integer.MAX_VALUE;
-return this;
+return readAllVersions();
   }
 
   /**
@@ -278,12 +280,34 @@ public class Get extends Query
* @param maxVersions maximum versions for each column
* @throws IOException if invalid number of versions
* @return this for invocation chaining
+   * @deprecated It is easy to misunderstand with column family's max 
versions, so use
+   * {@link #readVersions(int)} instead.
*/
+  @Deprecated
   public Get setMaxVersions(int maxVersions) throws IOException {
-if(maxVersions <= 0) {
-  throw new IOException("maxVersions must be positive");
+return readVersions(maxVersions);
+  }
+
+  /**
+   * Get all available versions.
+   * @return this for invocation chaining
+   */
+  public Get readAllVersions() {
+this.maxVersions = Integer.MAX_VALUE;
+return this;
+  }
+
+  /**
+   * Get up to the specified number of versions of each column.
+   * @param versions specified number of versions for each column
+   * @throws IOException if invalid number of versions
+   * @return this for invocation chaining
+   */
+  public Get readVersions(int versions) throws IOException {
+if (versions <= 0) {
+  throw new IOException("versions must be positive");
 }
-this.maxVersions = maxVersions;
+this.maxVersions = versions;
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4dd24c52/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index 0bf54ae..cc9e9d4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -53,9 +53,9 @@ public abstract class Query extends OperationWithAttributes {
   }
 
   /**
-   * Apply the specified server-side filter when performing the Query.
-   * Only {@link Filter#filterKeyValue(org.apache.hadoop.hbase.Cell)} is 
called AFTER all tests
-   * for ttl, column match, deletes and max versions have been run.
+   * Apply the specified server-side filter when performing the Query. Only
+   * {@link Filter#filterKeyValue(org.apache.hadoop.hbase.Cell)} is called 
AFTER all tests for ttl,
+   * column match, deletes and column family's max versions have been run.
* @param filter filter to run on the server
* @return this for invocation chaining
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/4dd24c52/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
--

[29/50] [abbrv] hbase git commit: HBASE-18582 Correct the docs for Mutation#setCellVisibility

2017-08-17 Thread stack
HBASE-18582 Correct the docs for Mutation#setCellVisibility

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4317c80
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4317c80
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4317c80

Branch: refs/heads/HBASE-14070.HLC
Commit: d4317c80e62e4eb0c2e997adf4438b927dfbcd96
Parents: 0e32869
Author: brandboat 
Authored: Mon Aug 14 22:10:23 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Aug 15 21:37:55 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4317c80/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 25b088d..24b4cb8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -331,7 +331,6 @@ public abstract class Mutation extends 
OperationWithAttributes implements Row, C
 
   /**
* Sets the visibility expression associated with cells in this Mutation.
-   * It is illegal to set CellVisibility on Delete 
mutation.
* @param expression
*/
   public Mutation setCellVisibility(CellVisibility expression) {



[31/50] [abbrv] hbase git commit: HBASE-17064 Add TaskMonitor#getTasks() variant which accepts type selection

2017-08-17 Thread stack
HBASE-17064 Add TaskMonitor#getTasks() variant which accepts type selection

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/effd1093
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/effd1093
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/effd1093

Branch: refs/heads/HBASE-14070.HLC
Commit: effd1093b559aeba2bf66a4cf81cd4a0013de184
Parents: d37266f
Author: Reid Chan 
Authored: Tue Aug 15 15:50:22 2017 +0800
Committer: tedyu 
Committed: Tue Aug 15 09:45:19 2017 -0700

--
 .../hbase/tmpl/common/TaskMonitorTmpl.jamon | 21 +
 .../hadoop/hbase/monitoring/TaskMonitor.java| 97 +---
 .../hbase/monitoring/TestTaskMonitor.java   | 48 ++
 3 files changed, 133 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/effd1093/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
index b4a5fea..986bc3a 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
@@ -27,27 +27,8 @@ String filter = "general";
 String format = "html";
 
 <%java>
-List tasks = taskMonitor.getTasks();
-Iterator iter = tasks.iterator();
 // apply requested filter
-while (iter.hasNext()) {
-  MonitoredTask t = iter.next();
-  if (filter.equals("general")) {
-if (t instanceof MonitoredRPCHandler)
-  iter.remove();
-  } else if (filter.equals("handler")) {
-if (!(t instanceof MonitoredRPCHandler))
-  iter.remove();
-  } else if (filter.equals("rpc")) {
-if (!(t instanceof MonitoredRPCHandler) || 
-!((MonitoredRPCHandler) t).isRPCRunning())
-  iter.remove();
-  } else if (filter.equals("operation")) {
-if (!(t instanceof MonitoredRPCHandler) || 
-!((MonitoredRPCHandler) t).isOperationRunning())
-  iter.remove();
-  }
-}
+List tasks = taskMonitor.getTasks(filter);
 long now = System.currentTimeMillis();
 Collections.reverse(tasks);
 boolean first = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/effd1093/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
index 780916f..ad9bd02 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
@@ -157,22 +157,52 @@ public class TaskMonitor {
* MonitoredTasks handled by this TaskMonitor.
* @return A complete list of MonitoredTasks.
*/
-  public synchronized List getTasks() {
+  public List getTasks() {
+return getTasks(null);
+  }
+
+  /**
+   * Produces a list containing copies of the current state of all non-expired 
+   * MonitoredTasks handled by this TaskMonitor.
+   * @param filter type of wanted tasks
+   * @return A filtered list of MonitoredTasks.
+   */
+  public synchronized List getTasks(String filter) {
 purgeExpiredTasks();
-ArrayList ret = Lists.newArrayListWithCapacity(tasks.size() 
+ rpcTasks.size());
-for (Iterator it = tasks.iterator();
- it.hasNext();) {
-  TaskAndWeakRefPair pair = it.next();
-  MonitoredTask t = pair.get();
-  ret.add(t.clone());
+TaskFilter taskFilter = createTaskFilter(filter);
+ArrayList results =
+Lists.newArrayListWithCapacity(tasks.size() + rpcTasks.size());
+processTasks(tasks, taskFilter, results);
+processTasks(rpcTasks, taskFilter, results);
+return results;
+  }
+
+  /**
+   * Create a task filter according to a given filter type.
+   * @param filter type of monitored task
+   * @return a task filter
+   */
+  private static TaskFilter createTaskFilter(String filter) {
+switch (TaskFilter.TaskType.getTaskType(filter)) {
+  case GENERAL: return task -> task instanceof MonitoredRPCHandler;
+  case HANDLER: return task -> !(task instanceof MonitoredRPCHandler);
+  case RPC: return task -> !(task instanceof MonitoredRPCHandler) ||
+   !((MonitoredRPCHandler) task).isRPCRunning();
+  case OPERATION: return task -> !(task instanceof MonitoredRPCHandler) ||
+ !((MonitoredRPCHandler) 
task).isOperationRunning();
+  default: return task 

[02/50] [abbrv] hbase git commit: HBASE-18024 HRegion#initializeRegionInternals should not re-create .hregioninfo file when the region directory no longer exists

2017-08-17 Thread stack
HBASE-18024 HRegion#initializeRegionInternals should not re-create .hregioninfo 
file when the region directory no longer exists


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/efd211de
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/efd211de
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/efd211de

Branch: refs/heads/HBASE-14070.HLC
Commit: efd211debd8a37f215b1a6fdb982aa3ca890bc40
Parents: 5507150
Author: Esteban Gutierrez 
Authored: Fri Jul 21 13:13:00 2017 -0500
Committer: Esteban Gutierrez 
Committed: Thu Aug 10 17:56:17 2017 -0500

--
 .../hadoop/hbase/regionserver/HRegion.java  | 11 +++-
 .../hbase/regionserver/HRegionFileSystem.java   | 31 ++--
 .../hadoop/hbase/regionserver/TestHRegion.java  |  7 ++-
 .../hbase/regionserver/TestRegionOpen.java  | 53 +++-
 .../TestStoreFileRefresherChore.java|  1 +
 .../TestWALMonotonicallyIncreasingSeqId.java|  5 +-
 6 files changed, 97 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/efd211de/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 483cb36..3b24f3d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -888,8 +888,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
 
 // Write HRI to a file in case we need to recover hbase:meta
-status.setStatus("Writing region info on filesystem");
-fs.checkRegionInfoOnFilesystem();
+// Only the primary replica should write .regioninfo
+if (this.getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) 
{
+  status.setStatus("Writing region info on filesystem");
+  fs.checkRegionInfoOnFilesystem();
+} else {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Skipping creation of .regioninfo file for " + 
this.getRegionInfo());
+  }
+}
 
 // Initialize all the HStores
 status.setStatus("Initializing all the Stores");

http://git-wip-us.apache.org/repos/asf/hbase/blob/efd211de/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 1041260..9cb1316 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -893,9 +893,19 @@ public class HRegionFileSystem {
 // only should be sufficient. I don't want to read the file every time to 
check if it pb
 // serialized.
 byte[] content = getRegionInfoFileContent(regionInfoForFs);
+
+// Verify if the region directory exists before opening a region. We need 
to do this since if
+// the region directory doesn't exist we will re-create the region 
directory and a new HRI
+// when HRegion.openHRegion() is called.
 try {
-  Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
+  FileStatus status = fs.getFileStatus(getRegionDir());
+} catch (FileNotFoundException e) {
+  LOG.warn(getRegionDir() + " doesn't exist for region: " + 
regionInfoForFs.getEncodedName() +
+  " on table " + regionInfo.getTable());
+}
 
+try {
+  Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
   FileStatus status = fs.getFileStatus(regionInfoFile);
   if (status != null && status.getLen() == content.length) {
 // Then assume the content good and move on.
@@ -988,7 +998,13 @@ public class HRegionFileSystem {
 }
 
 // Write HRI to a file in case we need to recover hbase:meta
-regionFs.writeRegionInfoOnFilesystem(false);
+// Only primary replicas should write region info
+if (regionInfo.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+  regionFs.writeRegionInfoOnFilesystem(false);
+} else {
+  if (LOG.isDebugEnabled())
+LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
+}
 return regionFs;
   }
 
@@ -1018,8 +1034,15 @@ public class HRegionFileSystem {
   regionFs.cleanupSplitsDir();
   regionFs.cleanupMergesDir();
 
-  // if it doesn't exists, Write HRI to a file, in case we need to recover 
hbase:m

[25/50] [abbrv] hbase git commit: HBASE-18522 Add RowMutations support to Batch

2017-08-17 Thread stack
HBASE-18522 Add RowMutations support to Batch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/096dac2e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/096dac2e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/096dac2e

Branch: refs/heads/HBASE-14070.HLC
Commit: 096dac2e83c675f212bad4f91888d8440ba152ca
Parents: bd40073
Author: Jerry He 
Authored: Mon Aug 14 10:39:46 2017 -0700
Committer: Jerry He 
Committed: Mon Aug 14 10:39:46 2017 -0700

--
 .../hbase/client/MultiServerCallable.java   | 64 +++-
 .../org/apache/hadoop/hbase/client/Table.java   |  4 +-
 .../hbase/shaded/protobuf/RequestConverter.java |  6 +-
 .../shaded/protobuf/ResponseConverter.java  | 37 ++-
 .../hbase/client/TestFromClientSide3.java   | 46 ++
 .../hadoop/hbase/client/TestMultiParallel.java  | 34 ++-
 6 files changed, 168 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/096dac2e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index 33c9a0b..7f6052e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -93,30 +94,64 @@ class MultiServerCallable extends 
CancellableRegionServerCallable
 RegionAction.Builder regionActionBuilder = RegionAction.newBuilder();
 ClientProtos.Action.Builder actionBuilder = 
ClientProtos.Action.newBuilder();
 MutationProto.Builder mutationBuilder = MutationProto.newBuilder();
-List cells = null;
-// The multi object is a list of Actions by region.  Iterate by region.
+
+// Pre-size. Presume at least a KV per Action. There are likely more.
+List cells =
+(this.cellBlock ? new ArrayList(countOfActions) : null);
+
 long nonceGroup = multiAction.getNonceGroup();
 if (nonceGroup != HConstants.NO_NONCE) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
+// Index to track RegionAction within the MultiRequest
+int regionActionIndex = -1;
+// Map from a created RegionAction to the original index for a 
RowMutations within
+// its original list of actions
+Map rowMutationsIndexMap = new HashMap<>();
+// The multi object is a list of Actions by region. Iterate by region.
 for (Map.Entry> e: 
this.multiAction.actions.entrySet()) {
   final byte [] regionName = e.getKey();
   final List actions = e.getValue();
   regionActionBuilder.clear();
   regionActionBuilder.setRegion(RequestConverter.buildRegionSpecifier(
   HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, 
regionName));
-  if (this.cellBlock) {
-// Pre-size. Presume at least a KV per Action.  There are likely more.
-if (cells == null) cells = new ArrayList<>(countOfActions);
-// Send data in cellblocks. The call to buildNoDataMultiRequest will 
skip RowMutations.
-// They have already been handled above. Guess at count of cells
-regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(regionName, actions, cells,
-  regionActionBuilder, actionBuilder, mutationBuilder);
-  } else {
-regionActionBuilder = RequestConverter.buildRegionAction(regionName, 
actions,
-  regionActionBuilder, actionBuilder, mutationBuilder);
+
+  int rowMutations = 0;
+  for (Action action : actions) {
+Row row = action.getAction();
+// Row Mutations are a set of Puts and/or Deletes all to be applied 
atomically
+// on the one row. We do separate RegionAction for each RowMutations.
+// We maintain a map to keep track of this RegionAction and the 
original Action index.
+if (row instanceof RowMutations) {
+  RowMutations rms = (RowMutations)row;
+  if (this.cellBlock) {
+// Build a multi request absent its Cell payload. Send data in 
cellblocks.
+regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(regionName, rms, cells,
+  regionActionBuilder, actionBuilder, mutationBuilder);
+  } else {
+regionActionBuilder = 
RequestConverter.buildRegionAction(regionName, rms);
+  }
+  regionActionBuilder.setAtomic(true);
+  multiRequestBuilde

[24/50] [abbrv] hbase git commit: HBASE-18593 Tell m2eclipse what to do w/ replacer plugin

2017-08-17 Thread stack
HBASE-18593 Tell m2eclipse what to do w/ replacer plugin


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bd400730
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bd400730
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bd400730

Branch: refs/heads/HBASE-14070.HLC
Commit: bd40073094b248f74ac9a3c0fff7ef6668265feb
Parents: 424dff2
Author: Michael Stack 
Authored: Mon Aug 14 09:13:27 2017 -0700
Committer: Michael Stack 
Committed: Mon Aug 14 09:13:27 2017 -0700

--
 hbase-protocol-shaded/pom.xml | 17 +
 1 file changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bd400730/hbase-protocol-shaded/pom.xml
--
diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml
index b28c03e..4c72eca 100644
--- a/hbase-protocol-shaded/pom.xml
+++ b/hbase-protocol-shaded/pom.xml
@@ -192,6 +192,23 @@
 
   
 
+
+  
+
+  com.google.code.maven-replacer-plugin
+
+replacer
+[1.5.3,)
+
+  replace
+
+  
+  
+
+ false
+
+  
+
   
 
   



[38/50] [abbrv] hbase git commit: HBASE-18598 AsyncNonMetaRegionLocator use FIFO algorithm to get a candidate locate request

2017-08-17 Thread stack
HBASE-18598 AsyncNonMetaRegionLocator use FIFO algorithm to get a candidate 
locate request


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/59ffb611
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/59ffb611
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/59ffb611

Branch: refs/heads/HBASE-14070.HLC
Commit: 59ffb6119b2e4613bc8baec9a0738096184a3d92
Parents: 665fd0d
Author: Guanghao Zhang 
Authored: Tue Aug 15 16:15:29 2017 +0800
Committer: Guanghao Zhang 
Committed: Wed Aug 16 13:08:40 2017 +0800

--
 .../hbase/client/AsyncNonMetaRegionLocator.java | 119 ++-
 .../client/TestAsyncNonMetaRegionLocator.java   |   1 +
 2 files changed, 63 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/59ffb611/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
index 31f369c..ab1f0db 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
@@ -29,18 +29,18 @@ import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
 
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -107,7 +107,7 @@ class AsyncNonMetaRegionLocator {
 public final Set pendingRequests = new HashSet<>();
 
 public final Map> 
allRequests =
-new HashMap<>();
+new LinkedHashMap<>();
 
 public boolean hasQuota(int max) {
   return pendingRequests.size() < max;
@@ -120,6 +120,49 @@ class AsyncNonMetaRegionLocator {
 public void send(LocateRequest req) {
   pendingRequests.add(req);
 }
+
+public Optional getCandidate() {
+  return allRequests.keySet().stream().filter(r -> 
!isPending(r)).findFirst();
+}
+
+public void clearCompletedRequests(Optional location) {
+  for (Iterator>> iter = allRequests
+  .entrySet().iterator(); iter.hasNext();) {
+Map.Entry> entry = 
iter.next();
+if (tryComplete(entry.getKey(), entry.getValue(), location)) {
+  iter.remove();
+}
+  }
+}
+
+private boolean tryComplete(LocateRequest req, 
CompletableFuture future,
+Optional location) {
+  if (future.isDone()) {
+return true;
+  }
+  if (!location.isPresent()) {
+return false;
+  }
+  HRegionLocation loc = location.get();
+  boolean completed;
+  if (req.locateType.equals(RegionLocateType.BEFORE)) {
+// for locating the row before current row, the common case is to find 
the previous region in
+// reverse scan, so we check the endKey first. In general, the 
condition should be startKey <
+// req.row and endKey >= req.row. Here we split it to endKey == 
req.row || (endKey > req.row
+// && startKey < req.row). The two conditions are equal since startKey 
< endKey.
+int c = Bytes.compareTo(loc.getRegionInfo().getEndKey(), req.row);
+completed =
+c == 0 || (c > 0 && 
Bytes.compareTo(loc.getRegionInfo().getStartKey(), req.row) < 0);
+  } else {
+completed = loc.getRegionInfo().containsRow(req.row);
+  }
+  if (completed) {
+future.complete(loc);
+return true;
+  } else {
+return false;
+  }
+}
   }
 
   AsyncNonMetaRegionLocator(AsyncConnectionImpl conn) {
@@ -186,48 +229,27 @@ class AsyncNonMetaRegionLocator {
 }
   }
 
-  private boolean tryComplete(LocateRequest req, 
CompletableFuture future,
-  HRegionLocation loc) {
-if (future.isDone()) {
-  return true;
-}
-boolean completed;
-if (req.locateType.equals(RegionLocateType.BEFORE)) {
-  // for locating the row before current row, the common case is to find 
the previous region in
-  // reverse scan, so we check the endKey first. In general, the condition 
should be startKey <
-  // req.row and endKey >=

[48/50] [abbrv] hbase git commit: HBASE-18608 AsyncConnection should return AsyncAdmin interface instead of the implemenation

2017-08-17 Thread stack
HBASE-18608 AsyncConnection should return AsyncAdmin interface instead of the 
implemenation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/092dc6de
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/092dc6de
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/092dc6de

Branch: refs/heads/HBASE-14070.HLC
Commit: 092dc6de8483eea1b4e0d960cf22e65359379da1
Parents: a17ed03
Author: Guanghao Zhang 
Authored: Wed Aug 16 18:00:53 2017 +0800
Committer: Guanghao Zhang 
Committed: Thu Aug 17 09:47:39 2017 +0800

--
 .../hadoop/hbase/client/AsyncAdminBuilder.java  | 16 
 .../hadoop/hbase/client/AsyncAdminBuilderBase.java  | 12 ++--
 .../apache/hadoop/hbase/client/AsyncConnection.java |  4 ++--
 .../hadoop/hbase/client/AsyncConnectionImpl.java| 12 ++--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  2 +-
 .../hadoop/hbase/client/TestAsyncAdminBuilder.java  |  6 +++---
 6 files changed, 26 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/092dc6de/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
index d706949..fb0aefd 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
@@ -29,7 +29,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
  * create a new AsyncAdmin instance.
  */
 @InterfaceAudience.Public
-public interface AsyncAdminBuilder {
+public interface AsyncAdminBuilder {
 
   /**
* Set timeout for a whole admin operation. Operation timeout and max 
attempt times(or max retry
@@ -39,7 +39,7 @@ public interface AsyncAdminBuilder {
* @param unit
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setOperationTimeout(long timeout, TimeUnit unit);
+  AsyncAdminBuilder setOperationTimeout(long timeout, TimeUnit unit);
 
   /**
* Set timeout for each rpc request.
@@ -47,7 +47,7 @@ public interface AsyncAdminBuilder {
* @param unit
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setRpcTimeout(long timeout, TimeUnit unit);
+  AsyncAdminBuilder setRpcTimeout(long timeout, TimeUnit unit);
 
   /**
* Set the base pause time for retrying. We use an exponential policy to 
generate sleep time when
@@ -56,7 +56,7 @@ public interface AsyncAdminBuilder {
* @param unit
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setRetryPause(long timeout, TimeUnit unit);
+  AsyncAdminBuilder setRetryPause(long timeout, TimeUnit unit);
 
   /**
* Set the max retry times for an admin operation. Usually it is the max 
attempt times minus 1.
@@ -65,7 +65,7 @@ public interface AsyncAdminBuilder {
* @param maxRetries
* @return this for invocation chaining
*/
-  default AsyncAdminBuilder setMaxRetries(int maxRetries) {
+  default AsyncAdminBuilder setMaxRetries(int maxRetries) {
 return setMaxAttempts(retries2Attempts(maxRetries));
   }
 
@@ -76,18 +76,18 @@ public interface AsyncAdminBuilder {
* @param maxAttempts
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setMaxAttempts(int maxAttempts);
+  AsyncAdminBuilder setMaxAttempts(int maxAttempts);
 
   /**
* Set the number of retries that are allowed before we start to log.
* @param startLogErrorsCnt
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setStartLogErrorsCnt(int startLogErrorsCnt);
+  AsyncAdminBuilder setStartLogErrorsCnt(int startLogErrorsCnt);
 
   /**
* Create a {@link AsyncAdmin} instance.
* @return a {@link AsyncAdmin} instance
*/
-  T build();
+  AsyncAdmin build();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/092dc6de/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
index 013e8d7..77ff88d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
@@ -25,7 +25,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
  * Base class for all asynchronous admin builders.
  */
 @InterfaceAudience.Private
-abstract class Asyn

[32/50] [abbrv] hbase git commit: HBASE-18599 Add missing @Deprecated annotations

2017-08-17 Thread stack
HBASE-18599 Add missing @Deprecated annotations

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/70c4f78c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/70c4f78c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/70c4f78c

Branch: refs/heads/HBASE-14070.HLC
Commit: 70c4f78ce03cf9e13d148e75445b19d43571a09a
Parents: effd109
Author: Lars Francke 
Authored: Tue Aug 15 09:36:51 2017 +0200
Committer: Michael Stack 
Committed: Tue Aug 15 10:44:50 2017 -0700

--
 .../apache/hadoop/hbase/HColumnDescriptor.java  | 17 +--
 .../apache/hadoop/hbase/HTableDescriptor.java   |  8 +++--
 .../org/apache/hadoop/hbase/client/Admin.java   | 32 +++-
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  5 ++-
 .../client/metrics/ServerSideScanMetrics.java   | 15 +++--
 .../hbase/coprocessor/RegionObserver.java   |  5 ++-
 6 files changed, 65 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/70c4f78c/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 5fe85cc..507bf49 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -100,14 +100,18 @@ public class HColumnDescriptor implements 
ColumnFamilyDescriptor, Comparable:
-   * @deprecated use {@link ColumnFamilyDescriptorBuilder#of(String)}
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+   * Use {@link ColumnFamilyDescriptorBuilder#of(String)}.
*/
+  @Deprecated
   public HColumnDescriptor(final String familyName) {
 this(Bytes.toBytes(familyName));
   }
@@ -118,8 +122,11 @@ public class HColumnDescriptor implements 
ColumnFamilyDescriptor, Comparable:
-   * @deprecated use {@link ColumnFamilyDescriptorBuilder#of(byte[])}
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+   * Use {@link ColumnFamilyDescriptorBuilder#of(byte[])}.
*/
+  @Deprecated
   public HColumnDescriptor(final byte [] familyName) {
 this(new ModifyableColumnFamilyDescriptor(familyName));
   }
@@ -128,9 +135,13 @@ public class HColumnDescriptor implements 
ColumnFamilyDescriptor, Comparablehttps://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+   * Use {@link 
ColumnFamilyDescriptorBuilder#copy(ColumnFamilyDescriptor)}.
*/
+  @Deprecated
   public HColumnDescriptor(HColumnDescriptor desc) {
 this(desc, true);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/70c4f78c/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index c09d434..a0f23c1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -44,7 +44,7 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableCo
  * if the table is read only, the maximum size of the memstore,
  * when the region split should occur, coprocessors associated with it etc...
  * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
- * use {@link TableDescriptorBuilder} to build {@link 
HTableDescriptor}.
+ * Use {@link TableDescriptorBuilder} to build {@link 
HTableDescriptor}.
  */
 @Deprecated
 @InterfaceAudience.Public
@@ -602,9 +602,13 @@ public class HTableDescriptor implements TableDescriptor, 
Comparablehttps://issues.apache.org/jira/browse/HBASE-18008";>HBASE-18008).
+   * Use {@link #getColumnFamilyNames()}.
*/
+  @Deprecated
   public Set getFamiliesKeys() {
 return delegatee.getColumnFamilyNames();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/70c4f78c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index d2acae3..8de9f89 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/A

[22/50] [abbrv] hbase git commit: HBASE-18588 Verify we're using netty .so epolling on linux post HBASE-18271

2017-08-17 Thread stack
HBASE-18588 Verify we're using netty .so epolling on linux post HBASE-18271


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ddbaf56c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ddbaf56c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ddbaf56c

Branch: refs/heads/HBASE-14070.HLC
Commit: ddbaf56ca8c712dc44608d3323280f578c56aed2
Parents: e2b797b
Author: Michael Stack 
Authored: Mon Aug 14 09:09:56 2017 -0700
Committer: Michael Stack 
Committed: Mon Aug 14 09:09:56 2017 -0700

--
 hbase-protocol-shaded/pom.xml | 17 +
 1 file changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ddbaf56c/hbase-protocol-shaded/pom.xml
--
diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml
index b28c03e..4c72eca 100644
--- a/hbase-protocol-shaded/pom.xml
+++ b/hbase-protocol-shaded/pom.xml
@@ -192,6 +192,23 @@
 
   
 
+
+  
+
+  com.google.code.maven-replacer-plugin
+
+replacer
+[1.5.3,)
+
+  replace
+
+  
+  
+
+ false
+
+  
+
   
 
   



[49/50] [abbrv] hbase git commit: HBASE-18375: Fix the bug where the pool chunks from ChunkCreator are deallocated and not returned to pool, because there is no reference to them

2017-08-17 Thread stack
HBASE-18375: Fix the bug where the pool chunks from ChunkCreator are 
deallocated and not returned to pool, because there is no reference to them


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75a6b368
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75a6b368
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75a6b368

Branch: refs/heads/HBASE-14070.HLC
Commit: 75a6b36849c58d6a751f57226ab0c8f7884a9e87
Parents: 092dc6d
Author: anastas 
Authored: Thu Aug 17 18:23:19 2017 +0300
Committer: anastas 
Committed: Thu Aug 17 18:23:19 2017 +0300

--
 .../regionserver/CellChunkImmutableSegment.java |   5 +-
 .../hadoop/hbase/regionserver/ChunkCreator.java | 171 +--
 .../hbase/regionserver/CompactionPipeline.java  |  19 ++-
 .../hbase/regionserver/MemStoreLABImpl.java |  27 ++-
 .../hbase/regionserver/TestMemStoreLAB.java |  12 +-
 .../TestMemstoreLABWithoutPool.java |   3 +-
 6 files changed, 126 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/75a6b368/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
index cdda279..3653166 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java
@@ -176,10 +176,7 @@ public class CellChunkImmutableSegment extends 
ImmutableSegment {
   private int createCellReference(ByteBufferKeyValue cell, ByteBuffer 
idxBuffer, int idxOffset) {
 int offset = idxOffset;
 int dataChunkID = cell.getChunkId();
-// ensure strong pointer to data chunk, as index is no longer directly 
points to it
-Chunk c = ChunkCreator.getInstance().saveChunkFromGC(dataChunkID);
-// if c is null, it means that this cell chunks was already released 
shouldn't happen
-assert (c!=null);
+
 offset = ByteBufferUtils.putInt(idxBuffer, offset, dataChunkID);// 
write data chunk id
 offset = ByteBufferUtils.putInt(idxBuffer, offset, cell.getOffset());  
// offset
 offset = ByteBufferUtils.putInt(idxBuffer, offset, 
KeyValueUtil.length(cell)); // length

http://git-wip-us.apache.org/repos/asf/hbase/blob/75a6b368/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
index 7e5395c..e818426 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java
@@ -18,7 +18,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.lang.ref.WeakReference;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
@@ -58,21 +57,8 @@ public class ChunkCreator {
   // the header size need to be changed in case chunk id size is changed
   public static final int SIZEOF_CHUNK_HEADER = Bytes.SIZEOF_INT;
 
-  // An object pointed by a weak reference can be garbage collected, in 
opposite to an object
-  // referenced by a strong (regular) reference. Every chunk created via 
ChunkCreator is referenced
-  // from either weakChunkIdMap or strongChunkIdMap.
-  // Upon chunk C creation, C's ID is mapped into weak reference to C, in 
order not to disturb C's
-  // GC in case all other reference to C are going to be removed.
-  // When chunk C is referenced from CellChunkMap (via C's ID) it is possible 
to GC the chunk C.
-  // To avoid that upon inserting C into CellChunkMap, C's ID is mapped into 
strong (regular)
-  // reference to C.
-
-  // map that doesn't influence GC
-  private Map> weakChunkIdMap =
-  new ConcurrentHashMap>();
-
-  // map that keeps chunks from garbage collection
-  private Map strongChunkIdMap = new 
ConcurrentHashMap();
+  // mapping from chunk IDs to chunks
+  private Map chunkIdMap = new ConcurrentHashMap();
 
   private final int chunkSize;
   private final boolean offheap;
@@ -95,7 +81,7 @@ public class ChunkCreator {
   }
 
   /**
-   * Initializes the instance of MSLABChunkCreator
+   * Initializes the instance of ChunkCreator
* @param chunkSize the chunkSize
* @param offheap indicates if the chunk is to be created offheap or not
* @param globalMemStoreSize  the global memsto

[03/50] [abbrv] hbase git commit: HBASE-18563 Fix RAT License complaint about website jenkins scripts

2017-08-17 Thread stack
HBASE-18563 Fix RAT License complaint about website jenkins scripts


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c37432fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c37432fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c37432fe

Branch: refs/heads/HBASE-14070.HLC
Commit: c37432fefbf4d1ff5bf80f5227c986f3bde281a1
Parents: efd211d
Author: Esteban Gutierrez 
Authored: Thu Aug 10 20:08:03 2017 -0500
Committer: Esteban Gutierrez 
Committed: Thu Aug 10 20:08:03 2017 -0500

--
 .../jenkins-scripts/check-website-links.sh  | 20 +++-
 .../jenkins-scripts/generate-hbase-website.sh   | 18 ++
 2 files changed, 37 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c37432fe/dev-support/jenkins-scripts/check-website-links.sh
--
diff --git a/dev-support/jenkins-scripts/check-website-links.sh 
b/dev-support/jenkins-scripts/check-website-links.sh
index c23abbb..1f7cd1c 100755
--- a/dev-support/jenkins-scripts/check-website-links.sh
+++ b/dev-support/jenkins-scripts/check-website-links.sh
@@ -1,4 +1,22 @@
 #!/bin/bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
 
 # This script is designed to run as a Jenkins job, such as at
 # https://builds.apache.org/view/All/job/HBase%20Website%20Link%20Checker/
@@ -44,4 +62,4 @@ if ! grep -q 'ERROR' link_report/index.html; then
   exit 1
 else
   echo "No errors found. Warnings might be present."
-fi
\ No newline at end of file
+fi

http://git-wip-us.apache.org/repos/asf/hbase/blob/c37432fe/dev-support/jenkins-scripts/generate-hbase-website.sh
--
diff --git a/dev-support/jenkins-scripts/generate-hbase-website.sh 
b/dev-support/jenkins-scripts/generate-hbase-website.sh
index a3f7823..06d160a 100644
--- a/dev-support/jenkins-scripts/generate-hbase-website.sh
+++ b/dev-support/jenkins-scripts/generate-hbase-website.sh
@@ -1,4 +1,22 @@
 #!/bin/bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
 
 # This script is meant to run as part of a Jenkins job such as
 # https://builds.apache.org/job/hbase_generate_website/



[27/50] [abbrv] hbase git commit: HBASE-18303 Clean up @Parameter boilerplate

2017-08-17 Thread stack
HBASE-18303 Clean up @Parameter boilerplate


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b26ccda
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b26ccda
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b26ccda

Branch: refs/heads/HBASE-14070.HLC
Commit: 0b26ccdaa1b8700e7958aeebbaf9cad81e737dd0
Parents: ea8fa59
Author: Mike Drob 
Authored: Fri Jun 30 12:13:56 2017 -0500
Committer: Mike Drob 
Committed: Mon Aug 14 14:23:24 2017 -0500

--
 .../hadoop/hbase/filter/TestKeyOnlyFilter.java  | 15 -
 .../hadoop/hbase/HBaseCommonTestingUtility.java | 22 
 .../apache/hadoop/hbase/types/TestStruct.java   | 18 +++-
 .../hadoop/hbase/util/TestByteBufferUtils.java  |  8 ++-
 .../hbase/codec/keyvalue/TestKeyValueTool.java  | 16 +++---
 .../codec/prefixtree/row/TestRowEncoder.java|  6 +-
 .../hadoop/hbase/rest/TestMultiRowResource.java |  5 +
 .../hadoop/hbase/HBaseTestingUtility.java   | 20 +-
 .../encoding/TestSeekToBlockWithEncoders.java   |  4 ++--
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java |  3 ++-
 .../apache/hadoop/hbase/io/hfile/TestHFile.java |  3 ++-
 .../hbase/io/hfile/TestHFileBlockIndex.java |  3 ++-
 .../hbase/io/hfile/TestHFileWriterV3.java   |  3 ++-
 .../hbase/util/TestCoprocessorScanPolicy.java   |  3 ++-
 14 files changed, 59 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0b26ccda/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java
index 33e3cd9..f957b59 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java
@@ -26,6 +26,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.ByteBufferKeyValue;
 
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.KeyValueUtil;
@@ -44,20 +45,12 @@ import org.junit.runners.Parameterized.Parameters;
 @RunWith(Parameterized.class)
 public class TestKeyOnlyFilter {
 
-  private final boolean lenAsVal;
+  @Parameterized.Parameter
+  public boolean lenAsVal;
 
   @Parameters
   public static Collection parameters() {
-List paramList = new ArrayList<>(2);
-{
-  paramList.add(new Object[] { false });
-  paramList.add(new Object[] { true });
-}
-return paramList;
-  }
-
-  public TestKeyOnlyFilter(boolean lenAsVal) {
-this.lenAsVal = lenAsVal;
+return HBaseCommonTestingUtility.BOOLEAN_PARAMETERIZED;
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b26ccda/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
--
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
index e191046..1790f4a 100644
--- 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
+++ 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
 import java.util.UUID;
 
 import org.apache.commons.io.FileUtils;
@@ -28,6 +30,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.compress.Compression;
 
 /**
  * Common helpers for testing HBase that do not depend on specific server/etc. 
things.
@@ -37,6 +40,25 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 public class HBaseCommonTestingUtility {
   protected static final Log LOG = 
LogFactory.getLog(HBaseCommonTestingUtility.class);
 
+  /** Compression algorithms to use in parameterized JUnit 4 tests */
+  public static final List COMPRESSION_ALGORITHMS_PARAMETERIZED =
+Arrays.asList(new Object[][] {
+  { Compression.Algorithm.NONE },
+  { Compression.Algorithm.GZ }
+});
+
+  /** This is for unit tests parameterized with a two booleans. */
+  public static final List BOOLEAN_PARAMETERIZED =
+  Arrays.asList(new Object[][] {
+

[18/50] [abbrv] hbase git commit: HBASE-14135 Merge backup images (Vladimir Rodionov)

2017-08-17 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/05e6e569/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/HFileSplitterJob.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/HFileSplitterJob.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/HFileSplitterJob.java
deleted file mode 100644
index ba1b65e..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/HFileSplitterJob.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.mapreduce.HFileInputFormat;
-import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
-import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
-import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-/**
- * A tool to split HFiles into new region boundaries as a MapReduce job. The 
tool generates HFiles
- * for later bulk importing.
- */
-@InterfaceAudience.Private
-public class HFileSplitterJob extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(HFileSplitterJob.class);
-  final static String NAME = "HFileSplitterJob";
-  public final static String BULK_OUTPUT_CONF_KEY = "hfile.bulk.output";
-  public final static String TABLES_KEY = "hfile.input.tables";
-  public final static String TABLE_MAP_KEY = "hfile.input.tablesmap";
-  private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
-
-  public HFileSplitterJob() {
-  }
-
-  protected HFileSplitterJob(final Configuration c) {
-super(c);
-  }
-
-  /**
-   * A mapper that just writes out cells. This one can be used together with
-   * {@link KeyValueSortReducer}
-   */
-  static class HFileCellMapper extends
-  Mapper {
-
-@Override
-public void map(NullWritable key, KeyValue value, Context context) throws 
IOException,
-InterruptedException {
-  // Convert value to KeyValue if subclass
-  if (!value.getClass().equals(KeyValue.class)) {
-value =
-new KeyValue(value.getRowArray(), value.getRowOffset(), 
value.getRowLength(),
-value.getFamilyArray(), value.getFamilyOffset(), 
value.getFamilyLength(),
-value.getQualifierArray(), value.getQualifierOffset(), 
value.getQualifierLength(),
-value.getTimestamp(), Type.codeToType(value.getTypeByte()), 
value.getValueArray(),
-value.getValueOffset(), value.getValueLength());
-  }
-  context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), 
value);
-}
-
-@Override
-public void setup(Context context) throws IOException {
-  // do nothing
-}
-  }
-
-  /**
-   * Sets up the actual job.
-   * @param args The command line parameters.
-   * @return The newly created job.
-   * @throws IOException When setting up the job fails.
-   */
-  public Job createS

[06/50] [abbrv] hbase git commit: HBASE-18469 Correct RegionServer metric of totalRequestCount

2017-08-17 Thread stack
HBASE-18469 Correct RegionServer metric of totalRequestCount


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/679f34e8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/679f34e8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/679f34e8

Branch: refs/heads/HBASE-14070.HLC
Commit: 679f34e88108d870b79f3175b195e8e7d803c331
Parents: 8da77b4
Author: Yu Li 
Authored: Fri Aug 11 14:13:18 2017 +0800
Committer: Yu Li 
Committed: Fri Aug 11 14:13:18 2017 +0800

--
 .../regionserver/MetricsRegionServerSource.java |  3 ++
 .../MetricsRegionServerWrapper.java |  2 +
 .../MetricsRegionServerSourceImpl.java  |  2 +
 .../MetricsRegionServerWrapperImpl.java |  5 +++
 .../hbase/regionserver/RSRpcServices.java   | 14 ++-
 .../MetricsRegionServerWrapperStub.java |  5 +++
 .../regionserver/TestMetricsRegionServer.java   |  4 ++
 .../regionserver/TestRegionServerMetrics.java   | 40 +++-
 8 files changed, 71 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/679f34e8/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index df522d3..9656894 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -244,6 +244,9 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String TOTAL_REQUEST_COUNT = "totalRequestCount";
   String TOTAL_REQUEST_COUNT_DESC =
   "Total number of requests this RegionServer has answered.";
+  String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount";
+  String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC =
+  "Total number of region requests this RegionServer has answered, count 
by row-level action";
   String READ_REQUEST_COUNT = "readRequestCount";
   String READ_REQUEST_COUNT_DESC =
   "Number of read requests this region server has answered.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/679f34e8/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 0aa625c..ccb9de2 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -457,4 +457,6 @@ public interface MetricsRegionServerWrapper {
   long getDeleteFamilyBloomHitCount();
 
   long getTrailerHitCount();
+
+  long getTotalRowActionRequestCount();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/679f34e8/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index 94b21bc..e69e17c 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -354,6 +354,8 @@ public class MetricsRegionServerSourceImpl
   .addGauge(Interns.info(AVERAGE_REGION_SIZE, 
AVERAGE_REGION_SIZE_DESC), rsWrap.getAverageRegionSize())
   .addCounter(Interns.info(TOTAL_REQUEST_COUNT, 
TOTAL_REQUEST_COUNT_DESC),
   rsWrap.getTotalRequestCount())
+  .addCounter(Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, 
TOTAL_ROW_ACTION_REQUEST_COUNT_DESC),
+  rsWrap.getTotalRowActionRequestCount())
   .addCounter(Interns.info(READ_REQUEST_COUNT, 
READ_REQUEST_COUNT_DESC),
   rsWrap.getReadRequestsCount())
   .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, 
FILTERED_READ_REQUEST_COUNT_DESC),

http://git-wip-us.apache.org/repos/asf/hbase/blob/679f34e8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Metr

[08/50] [abbrv] hbase git commit: HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers

2017-08-17 Thread stack
HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers

If an unassign is unable to communicate with its target server,
expire the server and then wait on a signal from ServerCrashProcedure
before proceeding. The unassign has lock on the region so no one else
can proceed till we complete. We prevent any subsequent assign from
running until logs have been split for crashed server.

In AssignProcedure, do not assign if table is DISABLING or DISABLED.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 Change remoteCallFailed so it returns boolean on whether implementor
wants to stay suspended or not.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
  Doc. Also, if we are unable to talk to remote server, expire it and
then wait on SCP to wake us up after it has processed logs for failed
server.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6f44b248
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6f44b248
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6f44b248

Branch: refs/heads/HBASE-14070.HLC
Commit: 6f44b24860192d81dbf88ffd834d4b998a6fe636
Parents: cabdbf1
Author: Michael Stack 
Authored: Thu Aug 10 14:22:56 2017 -0700
Committer: Michael Stack 
Committed: Fri Aug 11 07:16:33 2017 -0700

--
 .../hbase/procedure2/ProcedureExecutor.java | 10 +--
 .../hadoop/hbase/master/MasterRpcServices.java  |  2 +-
 .../hbase/master/TableNamespaceManager.java |  2 +-
 .../hadoop/hbase/master/TableStateManager.java  |  1 +
 .../master/assignment/AssignProcedure.java  | 13 +++-
 .../assignment/RegionTransitionProcedure.java   | 44 ++--
 .../master/assignment/UnassignProcedure.java| 70 ++--
 .../master/procedure/DisableTableProcedure.java |  4 +-
 .../master/procedure/RSProcedureDispatcher.java |  2 +-
 .../master/procedure/ServerCrashException.java  |  3 +-
 .../master/procedure/ServerCrashProcedure.java  |  3 +-
 .../TestSplitTransactionOnCluster.java  | 17 +++--
 12 files changed, 100 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6f44b248/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index c110c2d..d0052f6 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -315,7 +315,7 @@ public class ProcedureExecutor {
   @Override
   public void setMaxProcId(long maxProcId) {
 assert lastProcId.get() < 0 : "expected only one call to 
setMaxProcId()";
-LOG.debug("Load maxProcId=" + maxProcId);
+LOG.debug("Load max pid=" + maxProcId);
 lastProcId.set(maxProcId);
   }
 
@@ -727,7 +727,7 @@ public class ProcedureExecutor {
!(procedures.containsKey(oldProcId) || 
completed.containsKey(oldProcId)) &&
nonceKeysToProcIdsMap.containsKey(nonceKey)) {
   if (traceEnabled) {
-LOG.trace("Waiting for procId=" + oldProcId.longValue() + " to be 
submitted");
+LOG.trace("Waiting for pid=" + oldProcId.longValue() + " to be 
submitted");
   }
   Threads.sleep(100);
 }
@@ -999,9 +999,9 @@ public class ProcedureExecutor {
   public void removeResult(final long procId) {
 CompletedProcedureRetainer retainer = completed.get(procId);
 if (retainer == null) {
-  assert !procedures.containsKey(procId) : "procId=" + procId + " is still 
running";
+  assert !procedures.containsKey(procId) : "pid=" + procId + " is still 
running";
   if (LOG.isDebugEnabled()) {
-LOG.debug("procId=" + procId + " already removed by the cleaner.");
+LOG.debug("pid=" + procId + " already removed by the cleaner.");
   }
   return;
 }
@@ -1349,7 +1349,7 @@ public class ProcedureExecutor {
   return LockState.LOCK_YIELD_WAIT;
 } catch (Throwable e) {
   // Catch NullPointerExceptions or similar errors...
-  LOG.fatal("CODE-BUG: Uncaught runtime exception fo " + proc, e);
+  LOG.fatal("CODE-BUG: Uncaught runtime exception for " + proc, e);
 }
 
 // allows to kill the executor before something is stored to the wal.

http://git-wip-us.apache.org/repos/asf/hbase/blob/6f44b248/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/

[28/50] [abbrv] hbase git commit: HBASE-18533 Expose BucketCache values to be configured

2017-08-17 Thread stack
HBASE-18533 Expose BucketCache values to be configured

Before this commit, BucketCache always used the default values.
This commit adds the ability to configure these values.

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0e32869f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0e32869f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0e32869f

Branch: refs/heads/HBASE-14070.HLC
Commit: 0e32869f01697abf29292aa786d0cdcca10213c6
Parents: 0b26ccd
Author: Zach York 
Authored: Wed Aug 2 14:43:03 2017 -0700
Committer: tedyu 
Committed: Mon Aug 14 13:27:26 2017 -0700

--
 .../hadoop/hbase/io/hfile/CacheConfig.java  |   2 +-
 .../hbase/io/hfile/bucket/BucketCache.java  | 126 ++-
 .../hbase/io/hfile/bucket/TestBucketCache.java  | 114 -
 .../io/hfile/bucket/TestBucketWriterThread.java |   3 +-
 4 files changed, 214 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0e32869f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 140009b..13f048e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -671,7 +671,7 @@ public class CacheConfig {
   // Bucket cache logs its stats on creation internal to the constructor.
   bucketCache = new BucketCache(bucketCacheIOEngineName,
 bucketCacheSize, blockSize, bucketSizes, writerThreads, 
writerQueueLen, persistentPath,
-ioErrorsTolerationDuration);
+ioErrorsTolerationDuration, c);
 } catch (IOException ioex) {
   LOG.error("Can't instantiate bucket cache", ioex); throw new 
RuntimeException(ioex);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e32869f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 1084399..79b1f4d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -52,8 +52,11 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
@@ -100,14 +103,23 @@ import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFa
 public class BucketCache implements BlockCache, HeapSize {
   private static final Log LOG = LogFactory.getLog(BucketCache.class);
 
+  /** Priority buckets config */
+  static final String SINGLE_FACTOR_CONFIG_NAME = 
"hbase.bucketcache.single.factor";
+  static final String MULTI_FACTOR_CONFIG_NAME = 
"hbase.bucketcache.multi.factor";
+  static final String MEMORY_FACTOR_CONFIG_NAME = 
"hbase.bucketcache.memory.factor";
+  static final String EXTRA_FREE_FACTOR_CONFIG_NAME = 
"hbase.bucketcache.extrafreefactor";
+  static final String ACCEPT_FACTOR_CONFIG_NAME = 
"hbase.bucketcache.acceptfactor";
+  static final String MIN_FACTOR_CONFIG_NAME = "hbase.bucketcache.minfactor";
+
   /** Priority buckets */
-  private static final float DEFAULT_SINGLE_FACTOR = 0.25f;
-  private static final float DEFAULT_MULTI_FACTOR = 0.50f;
-  private static final float DEFAULT_MEMORY_FACTOR = 0.25f;
-  private static final float DEFAULT_EXTRA_FREE_FACTOR = 0.10f;
+  @VisibleForTesting
+  static final float DEFAULT_SINGLE_FACTOR = 0.25f;
+  static final float DEFAULT_MULTI_FACTOR = 0.50f;
+  static final float DEFAULT_MEMORY_FACTOR = 0.25f;
+  static final float DEFAULT_MIN_FACTOR = 0.85f;
 
+  private static final float DEFAULT_EXTRA_FREE_FACTOR = 0.10f;
   private static final float DEFAULT_ACCEPT_FACTOR = 0.95f;
-  private static final float DEFAULT_MIN_FACTOR = 0.85f;
 
   // Number of blocks to clear for each of the bucket size that is full
   private static final int DEFAULT_FREE_ENTIRE_

[44/50] [abbrv] hbase git commit: HBASE-18511 Default no regions on master

2017-08-17 Thread stack
HBASE-18511 Default no regions on master

Changes the configuration hbase.balancer.tablesOnMaster from list of
table names to instead be a boolean; true if master carries
tables/regions and false if it does not.

Adds a new configuration hbase.balancer.tablesOnMaster.systemTablesOnly.
If true, hbase.balancer.tablesOnMaster is considered true but only
system tables are put on the master.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
  Master was claiming itself active master though it had stopped. Fix
the activeMaster flag. Set it to false on exit.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
 Add new configs and convenience methods for getting current state of
settings.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 Move configs up into super Interface and now the settings mean
different, remove the no longer needed processing.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/47344671
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/47344671
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/47344671

Branch: refs/heads/HBASE-14070.HLC
Commit: 473446719b7b81b56216862bf2a94a576ff90f60
Parents: acf9b87
Author: Michael Stack 
Authored: Wed Aug 2 22:54:21 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 08:39:36 2017 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  28 ++-
 .../hadoop/hbase/master/LoadBalancer.java   |  31 ++-
 .../hadoop/hbase/master/ServerManager.java  |  30 +--
 .../hbase/master/balancer/BaseLoadBalancer.java | 110 --
 .../balancer/FavoredStochasticBalancer.java |  11 +-
 .../hbase/regionserver/HRegionServer.java   |   5 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |   2 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |   3 +-
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   8 +-
 .../hbase/client/TestAsyncTableAdminApi.java|  16 +-
 .../hbase/client/TestClientClusterStatus.java   |   5 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   5 +-
 .../hadoop/hbase/fs/TestBlockReorder.java   |   4 +-
 .../hadoop/hbase/master/TestMasterMetrics.java  |  19 +-
 .../hbase/master/TestMasterMetricsWrapper.java  |  13 +-
 .../hbase/master/TestMasterNoCluster.java   |   7 +-
 .../master/balancer/TestBaseLoadBalancer.java   |  10 +-
 .../balancer/TestRegionsOnMasterOptions.java| 200 +++
 .../hbase/regionserver/TestClusterId.java   |   4 +-
 .../TestRSKilledWhenInitializing.java   |  15 +-
 .../hbase/regionserver/TestRegionOpen.java  |   5 +-
 .../regionserver/TestRegionServerAbort.java |  14 +-
 .../regionserver/TestRegionServerHostname.java  |  11 +-
 .../regionserver/TestRegionServerMetrics.java   |  57 --
 .../TestRegionServerReadRequestMetrics.java |  12 +-
 .../TestRegionServerReportForDuty.java  |  15 +-
 .../TestSplitTransactionOnCluster.java  |  16 +-
 .../TestFlushWithThroughputController.java  |   8 +-
 .../security/access/TestNamespaceCommands.java  |  13 +-
 29 files changed, 491 insertions(+), 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/47344671/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ce83838..6b4d4e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -530,6 +530,17 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 }
   }
 
+  // Main run loop. Calls through to the regionserver run loop.
+  @Override
+  public void run() {
+try {
+  super.run();
+} finally {
+  // If on way out, then we are no longer active master.
+  this.activeMaster = false;
+}
+  }
+
   // return the actual infoPort, -1 means disable info server.
   private int putUpJettyServer() throws IOException {
 if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
@@ -604,9 +615,8 @@ public class HMaster extends HRegionServer implements 
MasterServices {
*/
   @Override
   protected void waitForMasterActive(){
-boolean tablesOnMaster = BaseLoadBalancer.tablesOnMaster(conf);
-while (!(tablesOnMaster && activeMaster)
-&& !isStopped() && !isAborted()) {
+boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(conf);
+while (!(tablesOnMaster && activeMaster) && !isStopped() && !isAborted()) {
   sleeper.sleep();
 }
   }
@@ -644,7 +654,7 @@ public class HMaster extend

[09/50] [abbrv] hbase git commit: HBASE-18558 clean up duplicate dependency management entries for hbase-shaded-miscellaneous

2017-08-17 Thread stack
HBASE-18558 clean up duplicate dependency management entries for 
hbase-shaded-miscellaneous


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/043ec9b3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/043ec9b3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/043ec9b3

Branch: refs/heads/HBASE-14070.HLC
Commit: 043ec9b37e43328e8784f88e3d6867b007a31d1d
Parents: 6f44b24
Author: Michael Stack 
Authored: Fri Aug 11 07:24:17 2017 -0700
Committer: Michael Stack 
Committed: Fri Aug 11 07:24:17 2017 -0700

--
 pom.xml | 5 -
 1 file changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/043ec9b3/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 7648e8e..7925e4e 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1718,11 +1718,6 @@
 ${metrics-core.version}
   
   
-org.apache.hbase.thirdparty
-hbase-shaded-miscellaneous
-${hbase-thirdparty.version}
-  
-  
 commons-collections
 commons-collections
 ${collections.version}



[46/50] [abbrv] hbase git commit: HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

2017-08-17 Thread stack
HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4c3a64db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4c3a64db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4c3a64db

Branch: refs/heads/HBASE-14070.HLC
Commit: 4c3a64db13b086ad3d8a6ffa1be8ba2f5a24719c
Parents: 5d2c3dd
Author: Xiang Li 
Authored: Thu Aug 17 00:39:35 2017 +0800
Committer: Jerry He 
Committed: Wed Aug 16 14:50:46 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Append.java  |  9 +++---
 .../org/apache/hadoop/hbase/client/Delete.java  | 31 
 2 files changed, 10 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4c3a64db/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 2bd0860..6947313 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -134,11 +134,10 @@ public class Append extends Mutation {
   public Append add(final Cell cell) {
 // Presume it is KeyValue for now.
 byte [] family = CellUtil.cloneFamily(cell);
-List list = this.familyMap.get(family);
-if (list == null) {
-  list  = new ArrayList<>(1);
-  this.familyMap.put(family, list);
-}
+
+// Get cell list for the family
+List list = getCellList(family);
+
 // find where the new entry should be placed in the List
 list.add(cell);
 return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4c3a64db/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index bf5241c..66b6cfc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -180,11 +180,7 @@ public class Delete extends Mutation implements 
Comparable {
 " doesn't match the original one " +  Bytes.toStringBinary(this.row));
 }
 byte [] family = CellUtil.cloneFamily(kv);
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(kv);
 return this;
   }
@@ -216,11 +212,8 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-} else if(!list.isEmpty()) {
+List list = getCellList(family);
+if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
@@ -236,11 +229,7 @@ public class Delete extends Mutation implements 
Comparable {
* @return this for invocation chaining
*/
   public Delete addFamilyVersion(final byte [] family, final long timestamp) {
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
 return this;
@@ -269,11 +258,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
 return this;
@@ -304,11 +289,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 KeyValue kv = new KeyValue(this.row, family, qualifier, t

[13/50] [abbrv] hbase git commit: HBASE-18557: Change splitable to mergeable in MergeTableRegionsProcedure

2017-08-17 Thread stack
HBASE-18557: Change splitable to mergeable in MergeTableRegionsProcedure

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/95e88396
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/95e88396
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/95e88396

Branch: refs/heads/HBASE-14070.HLC
Commit: 95e883967cbb383b48d8fae548fb55b88c7f0529
Parents: aa8f67a
Author: Yi Liang 
Authored: Thu Aug 10 11:15:59 2017 -0700
Committer: Jerry He 
Committed: Fri Aug 11 22:45:22 2017 -0700

--
 .../hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/95e88396/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 74d9b75..9aaf297 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -513,7 +513,7 @@ public class MergeTableRegionsProcedure
   throws IOException {
 GetRegionInfoResponse response =
   Util.getRegionInfoResponse(env, rs.getServerName(), rs.getRegion());
-return response.hasSplittable() && response.getSplittable();
+return response.hasMergeable() && response.getMergeable();
   }
 
   /**



[01/50] [abbrv] hbase git commit: HBASE-18262 name of parameter quote need update

2017-08-17 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14070.HLC 82a9cec59 -> 675b63d50


HBASE-18262 name of parameter quote need update

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5507150a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5507150a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5507150a

Branch: refs/heads/HBASE-14070.HLC
Commit: 5507150a163f08c966f4cd55607feff8e2570c17
Parents: e4ba404
Author: dongtao.zhang 
Authored: Fri Jun 23 14:52:02 2017 +0800
Committer: Michael Stack 
Committed: Thu Aug 10 15:01:37 2017 -0700

--
 hbase-common/src/main/resources/hbase-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5507150a/hbase-common/src/main/resources/hbase-default.xml
--
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 87a36c1..43efd4b 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -88,7 +88,7 @@ possible configurations would overwhelm and obscure the 
important.
 list of ZooKeeper ensemble servers. If HBASE_MANAGES_ZK is set in 
hbase-env.sh
 this is the list of servers which hbase will start/stop ZooKeeper on as
 part of cluster start/stop.  Client-side, we will take this list of
-ensemble members and put it together with the hbase.zookeeper.clientPort
+ensemble members and put it together with the 
hbase.zookeeper.property.clientPort
 config. and pass it into zookeeper constructor as the connectString
 parameter.
   



[42/50] [abbrv] hbase git commit: HBASE-18553 Expose scan cursor for asynchronous scanner

2017-08-17 Thread stack
HBASE-18553 Expose scan cursor for asynchronous scanner


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4c74a73d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4c74a73d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4c74a73d

Branch: refs/heads/HBASE-14070.HLC
Commit: 4c74a73d57e09fd2c0ecde862a196c28dc6cd219
Parents: 2a9cdd5
Author: zhangduo 
Authored: Tue Aug 15 17:15:06 2017 +0800
Committer: zhangduo 
Committed: Wed Aug 16 21:04:57 2017 +0800

--
 .../AsyncScanSingleRegionRpcRetryingCaller.java |  35 -
 .../hbase/client/AsyncTableResultScanner.java   |  20 ++-
 .../hbase/client/RawScanResultConsumer.java |  11 +-
 .../client/AbstractTestResultScannerCursor.java |  89 +++
 .../client/TestAsyncResultScannerCursor.java|  49 ++
 .../hbase/client/TestRawAsyncScanCursor.java| 157 +--
 .../hbase/client/TestResultScannerCursor.java   |  34 
 .../hadoop/hbase/client/TestScanCursor.java |  90 ---
 8 files changed, 330 insertions(+), 155 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4c74a73d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
index 02a4357..d16cb8b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
@@ -157,10 +158,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
 
 private ScanResumerImpl resumer;
 
-public ScanControllerImpl(ScanResponse resp) {
-  callerThread = Thread.currentThread();
-  cursor = resp.hasCursor() ? 
Optional.of(ProtobufUtil.toCursor(resp.getCursor()))
-  : Optional.empty();
+public ScanControllerImpl(Optional cursor) {
+  this.callerThread = Thread.currentThread();
+  this.cursor = cursor;
 }
 
 private void preCheck() {
@@ -476,10 +476,11 @@ class AsyncScanSingleRegionRpcRetryingCaller {
 }
 updateServerSideMetrics(scanMetrics, resp);
 boolean isHeartbeatMessage = resp.hasHeartbeatMessage() && 
resp.getHeartbeatMessage();
+Result[] rawResults;
 Result[] results;
 int numberOfCompleteRowsBefore = resultCache.numberOfCompleteRows();
 try {
-  Result[] rawResults = 
ResponseConverter.getResults(controller.cellScanner(), resp);
+  rawResults = ResponseConverter.getResults(controller.cellScanner(), 
resp);
   updateResultsMetrics(scanMetrics, rawResults, isHeartbeatMessage);
   results = resultCache.addAndGet(
 
Optional.ofNullable(rawResults).orElse(ScanResultCache.EMPTY_RESULT_ARRAY),
@@ -493,12 +494,30 @@ class AsyncScanSingleRegionRpcRetryingCaller {
   return;
 }
 
-ScanControllerImpl scanController = new ScanControllerImpl(resp);
+ScanControllerImpl scanController;
 if (results.length > 0) {
+  scanController = new ScanControllerImpl(
+  resp.hasCursor() ? 
Optional.of(ProtobufUtil.toCursor(resp.getCursor()))
+  : Optional.empty());
   updateNextStartRowWhenError(results[results.length - 1]);
   consumer.onNext(results, scanController);
-} else if (resp.hasHeartbeatMessage() && resp.getHeartbeatMessage()) {
-  consumer.onHeartbeat(scanController);
+} else {
+  Optional cursor = Optional.empty();
+  if (resp.hasCursor()) {
+cursor = Optional.of(ProtobufUtil.toCursor(resp.getCursor()));
+  } else if (scan.isNeedCursorResult() && rawResults.length > 0) {
+// It is size limit exceed and we need to return the last Result's row.
+// When user setBatch and the scanner is reopened, the server may 
return Results that
+// user has seen and the last Result can not be seen because the 
number is not enough.
+// So the row keys of results may not be same, we must use the last 
one.
+cursor = Optional.of(new Cursor(rawResults[rawResults.length - 
1].getRow()));
+  }
+  scanController = new ScanControllerImpl(cursor);
+  if (isHeartbeatMessage || cursor.isPresent()) {
+// only call onHeartbeat if server tells us explicitly this is a 
heartbeat message, or we
+// want to pass

[23/50] [abbrv] hbase git commit: Revert "HBASE-18588 Verify we're using netty .so epolling on linux post HBASE-18271" References the wrong JIRA

2017-08-17 Thread stack
Revert "HBASE-18588 Verify we're using netty .so epolling on linux post 
HBASE-18271"
References the wrong JIRA

This reverts commit ddbaf56ca8c712dc44608d3323280f578c56aed2.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/424dff20
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/424dff20
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/424dff20

Branch: refs/heads/HBASE-14070.HLC
Commit: 424dff20607577901c06cb40b1293ea5051ec5c5
Parents: ddbaf56
Author: Michael Stack 
Authored: Mon Aug 14 09:12:51 2017 -0700
Committer: Michael Stack 
Committed: Mon Aug 14 09:12:51 2017 -0700

--
 hbase-protocol-shaded/pom.xml | 17 -
 1 file changed, 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/424dff20/hbase-protocol-shaded/pom.xml
--
diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml
index 4c72eca..b28c03e 100644
--- a/hbase-protocol-shaded/pom.xml
+++ b/hbase-protocol-shaded/pom.xml
@@ -192,23 +192,6 @@
 
   
 
-
-  
-
-  com.google.code.maven-replacer-plugin
-
-replacer
-[1.5.3,)
-
-  replace
-
-  
-  
-
- false
-
-  
-
   
 
   



[37/50] [abbrv] hbase git commit: HBASE-18424 Fix TestAsyncTableGetMultiThreaded

2017-08-17 Thread stack
HBASE-18424 Fix TestAsyncTableGetMultiThreaded

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/665fd0d0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/665fd0d0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/665fd0d0

Branch: refs/heads/HBASE-14070.HLC
Commit: 665fd0d07e34141c2765f02398eb1ad9e376f32f
Parents: 5280c10
Author: Vladimir Rodionov 
Authored: Wed Aug 16 11:29:34 2017 +0800
Committer: zhangduo 
Committed: Wed Aug 16 11:29:34 2017 +0800

--
 .../hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java  | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/665fd0d0/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
index 2abc54d..225060b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -37,7 +37,11 @@ import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MemoryCompactionPolicy;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.io.ByteBufferPool;
 import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -47,14 +51,12 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
  * Will split the table, and move region randomly when testing.
  */
-@Ignore // Can't move hbase:meta off master server in AMv2. TODO.
 @Category({ LargeTests.class, ClientTests.class })
 public class TestAsyncTableGetMultiThreaded {
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();



[26/50] [abbrv] hbase git commit: HBASE-18238 rubocop autocorrect for bin/

2017-08-17 Thread stack
HBASE-18238 rubocop autocorrect for bin/


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ea8fa59a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ea8fa59a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ea8fa59a

Branch: refs/heads/HBASE-14070.HLC
Commit: ea8fa59a4c2fe7633ebe70df622098bfe36b5df9
Parents: 096dac2
Author: Mike Drob 
Authored: Wed Jul 19 12:05:26 2017 -0500
Committer: Mike Drob 
Committed: Mon Aug 14 13:53:37 2017 -0500

--
 bin/draining_servers.rb | 108 +++
 bin/get-active-master.rb|   6 +-
 bin/hirb.rb |  46 +++--
 bin/region_mover.rb |   2 +-
 bin/region_status.rb|  50 +++---
 bin/replication/copy_tables_desc.rb |  47 +++---
 bin/shutdown_regionserver.rb|  16 ++---
 7 files changed, 132 insertions(+), 143 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ea8fa59a/bin/draining_servers.rb
--
diff --git a/bin/draining_servers.rb b/bin/draining_servers.rb
index 8e1b250..ea74c30 100644
--- a/bin/draining_servers.rb
+++ b/bin/draining_servers.rb
@@ -16,7 +16,7 @@
 # limitations under the License.
 #
 
-# Add or remove servers from draining mode via zookeeper 
+# Add or remove servers from draining mode via zookeeper
 
 require 'optparse'
 include Java
@@ -29,13 +29,13 @@ java_import org.apache.commons.logging.Log
 java_import org.apache.commons.logging.LogFactory
 
 # Name of this script
-NAME = "draining_servers"
+NAME = 'draining_servers'.freeze
 
 # Do command-line parsing
 options = {}
 optparse = OptionParser.new do |opts|
   opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] 
add|remove|list || ..."
-  opts.separator 'Add remove or list servers in draining mode. Can accept 
either hostname to drain all region servers' +
+  opts.separator 'Add remove or list servers in draining mode. Can accept 
either hostname to drain all region servers' \
  'in that host, a host:port pair or a host,port,startCode 
triplet. More than one server can be given separated by space'
   opts.on('-h', '--help', 'Display usage information') do
 puts opts
@@ -51,117 +51,117 @@ optparse.parse!
 # Return array of servernames where servername is hostname+port+startcode
 # comma-delimited
 def getServers(admin)
-  serverInfos = admin.getClusterStatus().getServers()
+  serverInfos = admin.getClusterStatus.getServers
   servers = []
   for server in serverInfos
-servers << server.getServerName()
+servers << server.getServerName
   end
-  return servers
+  servers
 end
 
 def getServerNames(hostOrServers, config)
   ret = []
   connection = ConnectionFactory.createConnection(config)
-  
+
   for hostOrServer in hostOrServers
 # check whether it is already serverName. No need to connect to cluster
 parts = hostOrServer.split(',')
-if parts.size() == 3
+if parts.size == 3
   ret << hostOrServer
-else 
-  admin = connection.getAdmin() if not admin
+else
+  admin = connection.getAdmin unless admin
   servers = getServers(admin)
 
-  hostOrServer = hostOrServer.gsub(/:/, ",")
-  for server in servers 
+  hostOrServer = hostOrServer.tr(':', ',')
+  for server in servers
 ret << server if server.start_with?(hostOrServer)
   end
 end
   end
-  
-  admin.close() if admin
-  connection.close()
-  return ret
+
+  admin.close if admin
+  connection.close
+  ret
 end
 
-def addServers(options, hostOrServers)
-  config = HBaseConfiguration.create()
+def addServers(_options, hostOrServers)
+  config = HBaseConfiguration.create
   servers = getServerNames(hostOrServers, config)
-  
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 
"draining_servers", nil)
+
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 
'draining_servers', nil)
   parentZnode = zkw.znodePaths.drainingZNode
-  
+
   begin
 for server in servers
   node = ZKUtil.joinZNode(parentZnode, server)
   ZKUtil.createAndFailSilent(zkw, node)
 end
   ensure
-zkw.close()
+zkw.close
   end
 end
 
-def removeServers(options, hostOrServers)
-  config = HBaseConfiguration.create()
+def removeServers(_options, hostOrServers)
+  config = HBaseConfiguration.create
   servers = getServerNames(hostOrServers, config)
-  
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 
"draining_servers", nil)
+
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 
'draining_servers', nil)
   parentZnode = zkw.znodePaths.drainingZNode
-  
+
   begin
 for server in servers
   node = ZKUtil.joinZNode(parentZnode, server)
   ZKUtil.

[36/50] [abbrv] hbase git commit: HBASE-18587 Fix flaky TestFileIOEngine

2017-08-17 Thread stack
HBASE-18587 Fix flaky TestFileIOEngine

This short circuits reads and writes with 0 length and also removes flakiness 
in TestFileIOEngine

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5280c100
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5280c100
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5280c100

Branch: refs/heads/HBASE-14070.HLC
Commit: 5280c100ff93f65cd568ce830e088cc12a2f5585
Parents: 2b88edf
Author: Zach York 
Authored: Thu Aug 10 16:55:28 2017 -0700
Committer: Michael Stack 
Committed: Tue Aug 15 14:57:10 2017 -0700

--
 .../hbase/io/hfile/bucket/FileIOEngine.java |  23 ++--
 .../hbase/io/hfile/bucket/TestFileIOEngine.java | 123 +++
 2 files changed, 88 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5280c100/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index a847bfe..ab77696 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
 import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
 import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -122,15 +123,18 @@ public class FileIOEngine implements IOEngine {
   @Override
   public Cacheable read(long offset, int length, 
CacheableDeserializer deserializer)
   throws IOException {
+Preconditions.checkArgument(length >= 0, "Length of read can not be less 
than 0.");
 ByteBuffer dstBuffer = ByteBuffer.allocate(length);
-accessFile(readAccessor, dstBuffer, offset);
-// The buffer created out of the fileChannel is formed by copying the data 
from the file
-// Hence in this case there is no shared memory that we point to. Even if 
the BucketCache evicts
-// this buffer from the file the data is already copied and there is no 
need to ensure that
-// the results are not corrupted before consuming them.
-if (dstBuffer.limit() != length) {
-  throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, " 
+ length
-  + " expected");
+if (length != 0) {
+  accessFile(readAccessor, dstBuffer, offset);
+  // The buffer created out of the fileChannel is formed by copying the 
data from the file
+  // Hence in this case there is no shared memory that we point to. Even 
if the BucketCache evicts
+  // this buffer from the file the data is already copied and there is no 
need to ensure that
+  // the results are not corrupted before consuming them.
+  if (dstBuffer.limit() != length) {
+throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, 
" + length
++ " expected");
+  }
 }
 return deserializer.deserialize(new SingleByteBuff(dstBuffer), true, 
MemoryType.EXCLUSIVE);
   }
@@ -143,6 +147,9 @@ public class FileIOEngine implements IOEngine {
*/
   @Override
   public void write(ByteBuffer srcBuffer, long offset) throws IOException {
+if (!srcBuffer.hasRemaining()) {
+  return;
+}
 accessFile(writeAccessor, srcBuffer, offset);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5280c100/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
index d13022d..4451c0c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -30,6 +31,8 @@ import 
org.apache.hadoop.hbase.io.hfile.bucket.TestByteBufferIOEngine.BufferGrab
 import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.testclassification.IOTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+imp

[35/50] [abbrv] hbase git commit: HBASE-18581 Removed dead code and some tidy up work in BaseLoadBalancer

2017-08-17 Thread stack
HBASE-18581 Removed dead code and some tidy up work in BaseLoadBalancer

  * calls to methods getLowestLocalityRegionServer() & 
getLeastLoadedTopServerForRegion() got removed in HBASE-18164
  * call to calculateRegionServerLocalities() got removed in HBASE-15486
  * Some other minor improvements

Change-Id: Ib149530d8d20c019b0891c026e23180e260f59db
Signed-off-by: Apekshit Sharma 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2b88edfd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2b88edfd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2b88edfd

Branch: refs/heads/HBASE-14070.HLC
Commit: 2b88edfd8d6c1cb512abf1d9f3316c50ed342cfc
Parents: 310934d
Author: Umesh Agashe 
Authored: Fri Aug 11 11:18:13 2017 -0700
Committer: Apekshit Sharma 
Committed: Tue Aug 15 14:55:52 2017 -0700

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 190 ---
 1 file changed, 32 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2b88edfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 8f5b6f5..30f59a9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -1,4 +1,4 @@
- /**
+ /*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -34,6 +34,7 @@ import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.function.Predicate;
+import java.util.stream.Collectors;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
@@ -360,10 +361,10 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 
   numMaxRegionsPerTable = new int[numTables];
-  for (int serverIndex = 0 ; serverIndex < 
numRegionsPerServerPerTable.length; serverIndex++) {
-for (tableIndex = 0 ; tableIndex < 
numRegionsPerServerPerTable[serverIndex].length; tableIndex++) {
-  if (numRegionsPerServerPerTable[serverIndex][tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
-numMaxRegionsPerTable[tableIndex] = 
numRegionsPerServerPerTable[serverIndex][tableIndex];
+  for (int[] aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) {
+for (tableIndex = 0; tableIndex < aNumRegionsPerServerPerTable.length; 
tableIndex++) {
+  if (aNumRegionsPerServerPerTable[tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
+numMaxRegionsPerTable[tableIndex] = 
aNumRegionsPerServerPerTable[tableIndex];
   }
 }
   }
@@ -375,10 +376,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 } else {
   hasRegionReplicas = true;
   HRegionInfo primaryInfo = 
RegionReplicaUtil.getRegionInfoForDefaultReplica(info);
-  regionIndexToPrimaryIndex[i] =
-  regionsToIndex.containsKey(primaryInfo) ?
-  regionsToIndex.get(primaryInfo):
-  -1;
+  regionIndexToPrimaryIndex[i] = 
regionsToIndex.getOrDefault(primaryInfo, -1);
 }
   }
 
@@ -608,7 +606,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 
 /** An action to move or swap a region */
 public static class Action {
-  public static enum Type {
+  public enum Type {
 ASSIGN_REGION,
 MOVE_REGION,
 SWAP_REGIONS,
@@ -806,9 +804,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   == numMaxRegionsPerTable[tableIndex]) {
 //recompute maxRegionsPerTable since the previous value was coming 
from the old server
 numMaxRegionsPerTable[tableIndex] = 0;
-for (int serverIndex = 0 ; serverIndex < 
numRegionsPerServerPerTable.length; serverIndex++) {
-  if (numRegionsPerServerPerTable[serverIndex][tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
-numMaxRegionsPerTable[tableIndex] = 
numRegionsPerServerPerTable[serverIndex][tableIndex];
+for (int[] aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) 
{
+  if (aNumRegionsPerServerPerTable[tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
+numMaxRegionsPerTable[tableIndex] = 
aNumRegionsPerServerPerTable[tableIndex];
   }
 }
   }
@@ -912,49 +910,7 @@ public abstract class Base

[34/50] [abbrv] hbase git commit: HBASE-18544 Move the HRegion#addRegionToMETA to TestDefaultMemStore

2017-08-17 Thread stack
HBASE-18544 Move the HRegion#addRegionToMETA to TestDefaultMemStore

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/310934d0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/310934d0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/310934d0

Branch: refs/heads/HBASE-14070.HLC
Commit: 310934d0604605fe361e836fe4277c48b5c493fa
Parents: 63e313b
Author: Chun-Hao Tang 
Authored: Wed Aug 16 00:43:02 2017 +0800
Committer: Michael Stack 
Committed: Tue Aug 15 14:52:33 2017 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  | 31 ++--
 .../hbase/regionserver/TestDefaultMemStore.java | 28 +-
 2 files changed, 29 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/310934d0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 3b24f3d..b9cafd9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3928,7 +3928,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* We throw RegionTooBusyException if above memstore limit
* and expect client to retry using some kind of backoff
   */
-  private void checkResources() throws RegionTooBusyException {
+  void checkResources() throws RegionTooBusyException {
 // If catalog region, do not impose resource constraints or block updates.
 if (this.getRegionInfo().isMetaRegion()) return;
 
@@ -3974,7 +3974,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* @param edits Cell updates by column
* @throws IOException
*/
-  private void put(final byte [] row, byte [] family, List edits)
+  void put(final byte [] row, byte [] family, List edits)
   throws IOException {
 NavigableMap> familyMap;
 familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
@@ -6878,33 +6878,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 
   /**
-   * Inserts a new region's meta information into the passed
-   * meta region. Used by the HMaster bootstrap code adding
-   * new table to hbase:meta table.
-   *
-   * @param meta hbase:meta HRegion to be updated
-   * @param r HRegion to add to meta
-   *
-   * @throws IOException
-   */
-  // TODO remove since only test and merge use this
-  public static void addRegionToMETA(final HRegion meta, final HRegion r) 
throws IOException {
-meta.checkResources();
-// The row key is the region name
-byte[] row = r.getRegionInfo().getRegionName();
-final long now = EnvironmentEdgeManager.currentTime();
-final List cells = new ArrayList<>(2);
-cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
-  HConstants.REGIONINFO_QUALIFIER, now,
-  r.getRegionInfo().toByteArray()));
-// Set into the root table the version of the meta table.
-cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
-  HConstants.META_VERSION_QUALIFIER, now,
-  Bytes.toBytes(HConstants.META_VERSION)));
-meta.put(row, HConstants.CATALOG_FAMILY, cells);
-  }
-
-  /**
* Computes the Path of the HRegion
*
* @param tabledir qualified path for table

http://git-wip-us.apache.org/repos/asf/hbase/blob/310934d0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 0b1638b..7b10846 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -975,7 +975,7 @@ public class TestDefaultMemStore {
 HRegion r =
 HRegion.createHRegion(hri, testDir, conf, desc,
 wFactory.getWAL(hri.getEncodedNameAsBytes(), 
hri.getTable().getNamespace()));
-HRegion.addRegionToMETA(meta, r);
+addRegionToMETA(meta, r);
 edge.setCurrentTimeMillis(1234 + 100);
 StringBuffer sb = new StringBuffer();
 assertTrue(meta.shouldFlush(sb) == false);
@@ -983,6 +983,32 @@ public class TestDefaultMemStore {
 assertTrue(meta.shouldFlush(sb) == true);
   }
 
+  /**
+   * Inserts a new region's meta information into the passed
+   * meta region. Used 

[11/50] [abbrv] hbase git commit: HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers; AMENDMENT -- disable TestAM#testSocketTimeout... mock is insufficent for new processing

2017-08-17 Thread stack
HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers; AMENDMENT -- 
disable TestAM#testSocketTimeout... mock is insufficent for new processing


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1070888f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1070888f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1070888f

Branch: refs/heads/HBASE-14070.HLC
Commit: 1070888fff3a89d435018f11bfb2fd5609be8bab
Parents: 71a9a9a
Author: Michael Stack 
Authored: Fri Aug 11 14:20:06 2017 -0700
Committer: Michael Stack 
Committed: Fri Aug 11 14:20:35 2017 -0700

--
 .../hadoop/hbase/master/assignment/TestAssignmentManager.java | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1070888f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
index d18c12a..4d2a894 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
@@ -243,7 +243,7 @@ public class TestAssignmentManager {
 }
   }
 
-  @Test
+  @Ignore @Test // Disabled for now. Since HBASE-18551, this mock is 
insufficient.
   public void testSocketTimeout() throws Exception {
 final TableName tableName = TableName.valueOf(this.name.getMethodName());
 final HRegionInfo hri = createRegionInfo(tableName, 1);
@@ -254,9 +254,8 @@ public class TestAssignmentManager {
 rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 3));
 waitOnFuture(submitProcedure(am.createAssignProcedure(hri, false)));
 
-rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 3));
-
-exception.expect(ServerCrashException.class);
+rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 1));
+// exception.expect(ServerCrashException.class);
 waitOnFuture(submitProcedure(am.createUnassignProcedure(hri, null, 
false)));
 
 assertEquals(assignSubmittedCount + 1, 
assignProcMetrics.getSubmittedCounter().getCount());



[19/50] [abbrv] hbase git commit: HBASE-14135 Merge backup images (Vladimir Rodionov)

2017-08-17 Thread stack
HBASE-14135 Merge backup images (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/05e6e569
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/05e6e569
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/05e6e569

Branch: refs/heads/HBASE-14070.HLC
Commit: 05e6e5695089640006d06c2f74126b50a73363b7
Parents: c6ac04a
Author: Josh Elser 
Authored: Sun Aug 13 20:55:58 2017 -0400
Committer: Josh Elser 
Committed: Sun Aug 13 20:55:58 2017 -0400

--
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  20 +-
 .../hadoop/hbase/backup/BackupDriver.java   |   2 +
 .../apache/hadoop/hbase/backup/BackupInfo.java  |   5 +
 .../hadoop/hbase/backup/BackupMergeJob.java |  40 +++
 .../hbase/backup/BackupRestoreFactory.java  |  20 +-
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  57 ++--
 .../hbase/backup/impl/BackupAdminImpl.java  | 213 +---
 .../hbase/backup/impl/BackupCommands.java   | 163 ++---
 .../hadoop/hbase/backup/impl/BackupManager.java |  21 +-
 .../hbase/backup/impl/BackupManifest.java   |  24 +-
 .../hbase/backup/impl/BackupSystemTable.java| 314 ++---
 .../hbase/backup/impl/RestoreTablesClient.java  |  32 +-
 .../backup/mapreduce/HFileSplitterJob.java  | 181 --
 .../mapreduce/MapReduceBackupMergeJob.java  | 321 ++
 .../mapreduce/MapReduceHFileSplitterJob.java| 181 ++
 .../backup/mapreduce/MapReduceRestoreJob.java   |  84 ++---
 .../hadoop/hbase/backup/util/BackupUtils.java   |  93 +++--
 .../TestIncrementalBackupMergeWithFailures.java | 336 +++
 .../backup/TestRepairAfterFailedDelete.java |   2 +-
 19 files changed, 1574 insertions(+), 535 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/05e6e569/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
index 6f642a4..9dc6382 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupAdmin.java
@@ -38,8 +38,8 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 public interface BackupAdmin extends Closeable {
 
   /**
-   * Backup given list of tables fully. This is a synchronous operation.
-   * It returns backup id on success or throw exception on failure.
+   * Backup given list of tables fully. This is a synchronous operation. It 
returns backup id on
+   * success or throw exception on failure.
* @param userRequest BackupRequest instance
* @return the backup Id
*/
@@ -61,16 +61,24 @@ public interface BackupAdmin extends Closeable {
*/
   BackupInfo getBackupInfo(String backupId) throws IOException;
 
-
   /**
* Delete backup image command
-   * @param backupIds backup id list
+   * @param backupIds array of backup ids
* @return total number of deleted sessions
* @throws IOException exception
*/
   int deleteBackups(String[] backupIds) throws IOException;
 
   /**
+   * Merge backup images command
+   * @param backupIds array of backup ids of images to be merged
+   *The resulting backup image will have the same backup id as the most
+   *recent image from a list of images to be merged
+   * @throws IOException exception
+   */
+  void mergeBackups(String[] backupIds) throws IOException;
+
+  /**
* Show backup history command
* @param n last n backup sessions
* @return list of backup info objects
@@ -113,7 +121,7 @@ public interface BackupAdmin extends Closeable {
   /**
* Add tables to backup set command
* @param name name of backup set.
-   * @param tables list of tables to be added to this set.
+   * @param tables array of tables to be added to this set.
* @throws IOException exception
*/
   void addToBackupSet(String name, TableName[] tables) throws IOException;
@@ -121,7 +129,7 @@ public interface BackupAdmin extends Closeable {
   /**
* Remove tables from backup set
* @param name name of backup set.
-   * @param tables list of tables to be removed from this set.
+   * @param tables array of tables to be removed from this set.
* @throws IOException exception
*/
   void removeFromBackupSet(String name, TableName[] tables) throws IOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/05e6e569/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java 
b

[14/50] [abbrv] hbase git commit: HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of Mutation and Query

2017-08-17 Thread stack
HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of 
Mutation and Query

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/173dce73
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/173dce73
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/173dce73

Branch: refs/heads/HBASE-14070.HLC
Commit: 173dce73471da005fb6780a7e7b65b43bad481e2
Parents: 95e8839
Author: Xiang Li 
Authored: Fri Aug 11 00:07:11 2017 +0800
Committer: Jerry He 
Committed: Fri Aug 11 22:49:38 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/client/Append.java  |  2 +-
 .../main/java/org/apache/hadoop/hbase/client/Delete.java  | 10 +-
 .../src/main/java/org/apache/hadoop/hbase/client/Get.java |  2 +-
 .../java/org/apache/hadoop/hbase/client/Increment.java|  2 --
 .../java/org/apache/hadoop/hbase/client/Mutation.java |  1 +
 .../src/main/java/org/apache/hadoop/hbase/client/Put.java |  7 ---
 .../main/java/org/apache/hadoop/hbase/client/Scan.java|  2 +-
 7 files changed, 9 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/173dce73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 02ec770..2bd0860 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -137,10 +137,10 @@ public class Append extends Mutation {
 List list = this.familyMap.get(family);
 if (list == null) {
   list  = new ArrayList<>(1);
+  this.familyMap.put(family, list);
 }
 // find where the new entry should be placed in the List
 list.add(cell);
-this.familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/173dce73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 395c277..bf5241c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -183,9 +183,9 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -219,12 +219,12 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 } else if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -239,10 +239,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
-familyMap.put(family, list);
 return this;
   }
 
@@ -272,10 +272,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
-familyMap.put(family, list);
 return this;
   }
 
@@ -307,10 +307,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, 
KeyValue.Type.Delete);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/173dce73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.

  1   2   >