hbase git commit: HBASE-18553 Expose scan cursor for asynchronous scanner

2017-08-16 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 2a9cdd5e7 -> 4c74a73d5


HBASE-18553 Expose scan cursor for asynchronous scanner


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4c74a73d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4c74a73d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4c74a73d

Branch: refs/heads/master
Commit: 4c74a73d57e09fd2c0ecde862a196c28dc6cd219
Parents: 2a9cdd5
Author: zhangduo 
Authored: Tue Aug 15 17:15:06 2017 +0800
Committer: zhangduo 
Committed: Wed Aug 16 21:04:57 2017 +0800

--
 .../AsyncScanSingleRegionRpcRetryingCaller.java |  35 -
 .../hbase/client/AsyncTableResultScanner.java   |  20 ++-
 .../hbase/client/RawScanResultConsumer.java |  11 +-
 .../client/AbstractTestResultScannerCursor.java |  89 +++
 .../client/TestAsyncResultScannerCursor.java|  49 ++
 .../hbase/client/TestRawAsyncScanCursor.java| 157 +--
 .../hbase/client/TestResultScannerCursor.java   |  34 
 .../hadoop/hbase/client/TestScanCursor.java |  90 ---
 8 files changed, 330 insertions(+), 155 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4c74a73d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
index 02a4357..d16cb8b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
@@ -157,10 +158,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
 
 private ScanResumerImpl resumer;
 
-public ScanControllerImpl(ScanResponse resp) {
-  callerThread = Thread.currentThread();
-  cursor = resp.hasCursor() ? 
Optional.of(ProtobufUtil.toCursor(resp.getCursor()))
-  : Optional.empty();
+public ScanControllerImpl(Optional cursor) {
+  this.callerThread = Thread.currentThread();
+  this.cursor = cursor;
 }
 
 private void preCheck() {
@@ -476,10 +476,11 @@ class AsyncScanSingleRegionRpcRetryingCaller {
 }
 updateServerSideMetrics(scanMetrics, resp);
 boolean isHeartbeatMessage = resp.hasHeartbeatMessage() && 
resp.getHeartbeatMessage();
+Result[] rawResults;
 Result[] results;
 int numberOfCompleteRowsBefore = resultCache.numberOfCompleteRows();
 try {
-  Result[] rawResults = 
ResponseConverter.getResults(controller.cellScanner(), resp);
+  rawResults = ResponseConverter.getResults(controller.cellScanner(), 
resp);
   updateResultsMetrics(scanMetrics, rawResults, isHeartbeatMessage);
   results = resultCache.addAndGet(
 
Optional.ofNullable(rawResults).orElse(ScanResultCache.EMPTY_RESULT_ARRAY),
@@ -493,12 +494,30 @@ class AsyncScanSingleRegionRpcRetryingCaller {
   return;
 }
 
-ScanControllerImpl scanController = new ScanControllerImpl(resp);
+ScanControllerImpl scanController;
 if (results.length > 0) {
+  scanController = new ScanControllerImpl(
+  resp.hasCursor() ? 
Optional.of(ProtobufUtil.toCursor(resp.getCursor()))
+  : Optional.empty());
   updateNextStartRowWhenError(results[results.length - 1]);
   consumer.onNext(results, scanController);
-} else if (resp.hasHeartbeatMessage() && resp.getHeartbeatMessage()) {
-  consumer.onHeartbeat(scanController);
+} else {
+  Optional cursor = Optional.empty();
+  if (resp.hasCursor()) {
+cursor = Optional.of(ProtobufUtil.toCursor(resp.getCursor()));
+  } else if (scan.isNeedCursorResult() && rawResults.length > 0) {
+// It is size limit exceed and we need to return the last Result's row.
+// When user setBatch and the scanner is reopened, the server may 
return Results that
+// user has seen and the last Result can not be seen because the 
number is not enough.
+// So the row keys of results may not be same, we must use the last 
one.
+cursor = Optional.of(new Cursor(rawResults[rawResults.length - 
1].getRow()));
+  }
+  scanController = new ScanControllerImpl(cursor);
+  if (isHeartbeatMessage || cursor.isPresent()) {
+// only call onHeartbeat if server tells us

hbase git commit: HBASE-18553 Expose scan cursor for asynchronous scanner

2017-08-16 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 1bae5cabf -> 770312a8c


HBASE-18553 Expose scan cursor for asynchronous scanner


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/770312a8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/770312a8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/770312a8

Branch: refs/heads/branch-2
Commit: 770312a8c2f27c427e14b467edb41e379720f063
Parents: 1bae5ca
Author: zhangduo 
Authored: Tue Aug 15 17:15:06 2017 +0800
Committer: zhangduo 
Committed: Wed Aug 16 21:05:02 2017 +0800

--
 .../AsyncScanSingleRegionRpcRetryingCaller.java |  35 -
 .../hbase/client/AsyncTableResultScanner.java   |  20 ++-
 .../hbase/client/RawScanResultConsumer.java |  11 +-
 .../client/AbstractTestResultScannerCursor.java |  89 +++
 .../client/TestAsyncResultScannerCursor.java|  49 ++
 .../hbase/client/TestRawAsyncScanCursor.java| 157 +--
 .../hbase/client/TestResultScannerCursor.java   |  34 
 .../hadoop/hbase/client/TestScanCursor.java |  90 ---
 8 files changed, 330 insertions(+), 155 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/770312a8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
index 02a4357..d16cb8b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
@@ -157,10 +158,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
 
 private ScanResumerImpl resumer;
 
-public ScanControllerImpl(ScanResponse resp) {
-  callerThread = Thread.currentThread();
-  cursor = resp.hasCursor() ? 
Optional.of(ProtobufUtil.toCursor(resp.getCursor()))
-  : Optional.empty();
+public ScanControllerImpl(Optional cursor) {
+  this.callerThread = Thread.currentThread();
+  this.cursor = cursor;
 }
 
 private void preCheck() {
@@ -476,10 +476,11 @@ class AsyncScanSingleRegionRpcRetryingCaller {
 }
 updateServerSideMetrics(scanMetrics, resp);
 boolean isHeartbeatMessage = resp.hasHeartbeatMessage() && 
resp.getHeartbeatMessage();
+Result[] rawResults;
 Result[] results;
 int numberOfCompleteRowsBefore = resultCache.numberOfCompleteRows();
 try {
-  Result[] rawResults = 
ResponseConverter.getResults(controller.cellScanner(), resp);
+  rawResults = ResponseConverter.getResults(controller.cellScanner(), 
resp);
   updateResultsMetrics(scanMetrics, rawResults, isHeartbeatMessage);
   results = resultCache.addAndGet(
 
Optional.ofNullable(rawResults).orElse(ScanResultCache.EMPTY_RESULT_ARRAY),
@@ -493,12 +494,30 @@ class AsyncScanSingleRegionRpcRetryingCaller {
   return;
 }
 
-ScanControllerImpl scanController = new ScanControllerImpl(resp);
+ScanControllerImpl scanController;
 if (results.length > 0) {
+  scanController = new ScanControllerImpl(
+  resp.hasCursor() ? 
Optional.of(ProtobufUtil.toCursor(resp.getCursor()))
+  : Optional.empty());
   updateNextStartRowWhenError(results[results.length - 1]);
   consumer.onNext(results, scanController);
-} else if (resp.hasHeartbeatMessage() && resp.getHeartbeatMessage()) {
-  consumer.onHeartbeat(scanController);
+} else {
+  Optional cursor = Optional.empty();
+  if (resp.hasCursor()) {
+cursor = Optional.of(ProtobufUtil.toCursor(resp.getCursor()));
+  } else if (scan.isNeedCursorResult() && rawResults.length > 0) {
+// It is size limit exceed and we need to return the last Result's row.
+// When user setBatch and the scanner is reopened, the server may 
return Results that
+// user has seen and the last Result can not be seen because the 
number is not enough.
+// So the row keys of results may not be same, we must use the last 
one.
+cursor = Optional.of(new Cursor(rawResults[rawResults.length - 
1].getRow()));
+  }
+  scanController = new ScanControllerImpl(cursor);
+  if (isHeartbeatMessage || cursor.isPresent()) {
+// only call onHeartbeat if server tell

[01/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 4c7741b34 -> 1ada5f22c


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
index 803999d..8af4814 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
@@ -41,241 +41,248 @@
 033import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
 034import 
org.apache.hadoop.hbase.nio.ByteBuff;
 035import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-036import 
org.apache.hadoop.util.StringUtils;
-037
-038/**
-039 * IO engine that stores data to a file 
on the local file system.
-040 */
-041@InterfaceAudience.Private
-042public class FileIOEngine implements 
IOEngine {
-043  private static final Log LOG = 
LogFactory.getLog(FileIOEngine.class);
-044  public static final String 
FILE_DELIMITER = ",";
-045  private final String[] filePaths;
-046  private final FileChannel[] 
fileChannels;
-047  private final RandomAccessFile[] 
rafs;
-048
-049  private final long sizePerFile;
-050  private final long capacity;
-051
-052  private FileReadAccessor readAccessor = 
new FileReadAccessor();
-053  private FileWriteAccessor writeAccessor 
= new FileWriteAccessor();
-054
-055  public FileIOEngine(long capacity, 
boolean maintainPersistence, String... filePaths)
-056  throws IOException {
-057this.sizePerFile = capacity / 
filePaths.length;
-058this.capacity = this.sizePerFile * 
filePaths.length;
-059this.filePaths = filePaths;
-060this.fileChannels = new 
FileChannel[filePaths.length];
-061if (!maintainPersistence) {
-062  for (String filePath : filePaths) 
{
-063File file = new File(filePath);
-064if (file.exists()) {
-065  if (LOG.isDebugEnabled()) {
-066LOG.debug("File " + filePath 
+ " already exists. Deleting!!");
-067  }
-068  file.delete();
-069  // If deletion fails still we 
can manage with the writes
-070}
-071  }
-072}
-073this.rafs = new 
RandomAccessFile[filePaths.length];
-074for (int i = 0; i < 
filePaths.length; i++) {
-075  String filePath = filePaths[i];
-076  try {
-077rafs[i] = new 
RandomAccessFile(filePath, "rw");
-078long totalSpace = new 
File(filePath).getTotalSpace();
-079if (totalSpace < sizePerFile) 
{
-080  // The next setting length will 
throw exception,logging this message
-081  // is just used for the detail 
reason of exception,
-082  String msg = "Only " + 
StringUtils.byteDesc(totalSpace)
-083  + " total space under " + 
filePath + ", not enough for requested "
-084  + 
StringUtils.byteDesc(sizePerFile);
-085  LOG.warn(msg);
-086}
-087rafs[i].setLength(sizePerFile);
-088fileChannels[i] = 
rafs[i].getChannel();
-089LOG.info("Allocating cache " + 
StringUtils.byteDesc(sizePerFile)
-090+ ", on the path:" + 
filePath);
-091  } catch (IOException fex) {
-092LOG.error("Failed allocating 
cache on " + filePath, fex);
-093shutdown();
-094throw fex;
-095  }
-096}
-097  }
-098
-099  @Override
-100  public String toString() {
-101return "ioengine=" + 
this.getClass().getSimpleName() + ", paths="
-102+ Arrays.asList(filePaths) + ", 
capacity=" + String.format("%,d", this.capacity);
-103  }
-104
-105  /**
-106   * File IO engine is always able to 
support persistent storage for the cache
-107   * @return true
-108   */
-109  @Override
-110  public boolean isPersistent() {
-111return true;
-112  }
-113
-114  /**
-115   * Transfers data from file to the 
given byte buffer
-116   * @param offset The offset in the file 
where the first byte to be read
-117   * @param length The length of buffer 
that should be allocated for reading
-118   *   from the file 
channel
-119   * @return number of bytes read
-120   * @throws IOException
-121   */
-122  @Override
-123  public Cacheable read(long offset, int 
length, CacheableDeserializer deserializer)
-124  throws IOException {
-125ByteBuffer dstBuffer = 
ByteBuffer.allocate(length);
-126accessFile(readAccessor, dstBuffer, 
offset);
-127// The buffer created out of the 
fileChannel is formed by copying the data from the file
-128// Hence in this case there is no 
shared memory that we point to. Even if the BucketCache evicts
-129// this buffer from the file the data 
is already copied and there is no need to ensure that

[17/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/overview-tree.html
--
diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html
index d5022d9..4a4b2fe 100644
--- a/devapidocs/overview-tree.html
+++ b/devapidocs/overview-tree.html
@@ -973,6 +973,7 @@
 
 
 org.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapCollection (implements 
java.util.http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection)
+org.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapEntry (implements 
java.util.http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry)
 org.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapIterator (implements 
java.util.http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator)
 org.apache.hadoop.hbase.rest.model.CellModel (implements 
org.apache.hadoop.hbase.rest.ProtobufMessageHandler, 
java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 org.apache.hadoop.hbase.regionserver.CellSet (implements java.util.http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true";
 title="class or interface in java.util">NavigableSet)
@@ -4981,6 +4982,7 @@
 org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler
 org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
 org.apache.hadoop.hbase.Tag
+org.apache.hadoop.hbase.monitoring.TaskMonitor.TaskFilter
 org.apache.hadoop.hbase.util.Threads.PrintThreadInfoHelper
 org.apache.hadoop.hbase.security.visibility.VisibilityExpEvaluator
 org.apache.hadoop.hbase.security.visibility.VisibilityLabelOrdinalProvider
@@ -5098,6 +5100,7 @@
 org.apache.hadoop.hbase.codec.prefixtree.encode.other.ColumnNodeType
 org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerRowSearchPosition
 org.apache.hadoop.hbase.monitoring.MonitoredTask.State
+org.apache.hadoop.hbase.monitoring.TaskMonitor.TaskFilter.TaskType
 org.apache.hadoop.hbase.mapreduce.TableSplit.Version
 org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication.Verifier.Counters
 org.apache.hadoop.hbase.mapreduce.CellCounter.CellCounterMapper.Counters



[20/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CellFlatMap.CellFlatMapEntry.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CellFlatMap.CellFlatMapEntry.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CellFlatMap.CellFlatMapEntry.html
new file mode 100644
index 000..a85e684
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CellFlatMap.CellFlatMapEntry.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapEntry (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapEntry
+
+No usage of 
org.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapEntry
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
index 08876b1..1fe750a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
@@ -335,76 +335,68 @@
 
 
 
-static void
-HRegion.addRegionToMETA(HRegion meta,
-   HRegion r)
-Inserts a new region's meta information into the passed
- meta region.
-
-
-
 protected void
 KeyPrefixRegionSplitPolicy.configureForRegion(HRegion region) 
 
-
+
 protected void
 RegionSplitPolicy.configureForRegion(HRegion region)
 Upon construction, this method will be called with the 
region
  to be governed.
 
 
-
+
 protected void
 DelimitedKeyPrefixRegionSplitPolicy.configureForRegion(HRegion region) 
 
-
+
 protected void
 IncreasingToUpperBoundRegionSplitPolicy.configureForRegion(HRegion region) 
 
-
+
 protected void
 FlushAllLargeStoresPolicy.configureForRegion(HRegion region) 
 
-
+
 protected void
 FlushPolicy.configureForRegion(HRegion region)
 Upon construction, this method will be called with the 
region to be governed.
 
 
-
+
 protected void
 ConstantSizeRegionSplitPolicy.configureForRegion(HRegion region) 
 
-
+
 protected void
 FlushNonSloppyStoresFirstPolicy.configureForRegion(HRegion region) 
 
-
+
 protected void
 BusyRegionSplitPolicy.configureForRegion(HRegion region) 
 
-
+
 static FlushPolicy
 FlushPolicyFactory.create(HRegion region,
   org.apache.hadoop.conf.Configuration conf)
 Create the FlushPolicy configured for the given table.
 
 
-
+
 static RegionSplitPolicy
 RegionSplitPolicy.create(HRegion region,
   org.apache.hadoop.conf.Configuration conf)
 Create the RegionSplitPolicy configured for the given 
table.
 
 
-
+
 (package private) HRegion
 HRegion.createMergedRegionFromMerges(HRegionInfo mergedRegionInfo,
 HRegion region_b)
 Create a merged region given a temp directory with the 
region data.
 
 
-
+
 private void
 HRegion.doProcessRowWithTimeout(RowProcessor processor,
long now,
@@ -413,77 +405,77 @@
WALEdit walEdit,
long timeout) 
 
-
+
 private Result
 RSRpcServices.get(Get get,
HRegion region,
RSRpcServices.RegionScannersCloseCallBack closeCallBack,
RpcCallContext context) 
 
-
+
 protected long
 FlushLargeStoresPolicy.getFlushSizeLowerBound(HRegion region) 
 
-
+
 protected void
 HRegion.RegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java

[37/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 3573826..7294419 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 ©2007 - 2017 The Apache Software Foundation
 
   File: 2026,
- Errors: 12861,
+ Errors: 12838,
  Warnings: 0,
  Infos: 0
   
@@ -3807,7 +3807,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -13369,7 +13369,7 @@ under the License.
   0
 
 
-  44
+  41
 
   
   
@@ -13677,7 +13677,7 @@ under the License.
   0
 
 
-  9
+  11
 
   
   
@@ -17037,7 +17037,7 @@ under the License.
   0
 
 
-  19
+  20
 
   
   
@@ -18059,7 +18059,7 @@ under the License.
   0
 
 
-  21
+  20
 
   
   
@@ -22147,7 +22147,7 @@ under the License.
   0
 
 
-  34
+  33
 
   
   
@@ -22651,7 +22651,7 @@ under the License.
   0
 
 
-  4
+  6
 
   
   
@@ -23729,7 +23729,7 @@ under the License.
   0
 
 
-  2
+  3
 
   
   
@@ -24359,7 +24359,7 @@ under the License.
   0
 
 
-  12
+  10
 
   
   
@@ -25675,7 +25675,7 @@ under the License.
   0
 
 
-  209
+  208
 
   
   
@@ -26025,7 +26025,7 @@ under the License.
   0
 
 
-  49
+  48
 
   
   
@@ -26795,7 +26795,7 @@ under the License.
   0
 
 
-  95
+  89
 
   
   
@@ -27019,7 +27019,7 @@ under the License.
   0
 
 
-  6
+  7
 
   
   
@@ -27075,7 +27075,7 @@ under the License.
   0
 
 
-  75
+  61
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/coc.html
--
diff --git a/coc.html b/coc.html
index 3b687df..2bcb02f 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org";>the priv
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-15
+  Last Published: 
2017-08-16
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index a154739..d02e55e 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-15
+  Last Published: 
2017-08-16
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/dependencies.html
--
diff --git a/dependencies.html b/dependencies.

[40/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 2ab3342..d459974 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -304,824 +304,827 @@
 296   * @param regionName region name to 
close
 297   * @param serverName Deprecated. Not 
used anymore after deprecation.
 298   * @return Deprecated. Always returns 
true now.
-299   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-300   */
-301  CompletableFuture 
closeRegion(byte[] regionName, Optional serverName);
-302
-303  /**
-304   * Get all the online regions on a 
region server.
-305   */
-306  
CompletableFuture> getOnlineRegions(ServerName 
serverName);
-307
-308  /**
-309   * Get the regions of a given table.
-310   */
-311  
CompletableFuture> getTableRegions(TableName 
tableName);
-312
-313  /**
-314   * Flush a table.
-315   * @param tableName table to flush
-316   */
-317  CompletableFuture 
flush(TableName tableName);
-318
-319  /**
-320   * Flush an individual region.
-321   * @param regionName region to flush
-322   */
-323  CompletableFuture 
flushRegion(byte[] regionName);
-324
-325  /**
-326   * Compact a table. When the returned 
CompletableFuture is done, it only means the compact request
-327   * was sent to HBase and may need some 
time to finish the compact operation.
-328   * @param tableName table to compact
-329   */
-330  default CompletableFuture 
compact(TableName tableName) {
-331return compact(tableName, 
Optional.empty());
-332  }
-333
-334  /**
-335   * Compact a column family within a 
table. When the returned CompletableFuture is done, it only
-336   * means the compact request was sent 
to HBase and may need some time to finish the compact
-337   * operation.
-338   * @param tableName table to compact
-339   * @param columnFamily column family 
within a table. If not present, compact the table's all
-340   *  column families.
-341   */
-342  CompletableFuture 
compact(TableName tableName, Optional columnFamily);
-343
-344  /**
-345   * Compact an individual region. When 
the returned CompletableFuture is done, it only means the
-346   * compact request was sent to HBase 
and may need some time to finish the compact operation.
-347   * @param regionName region to 
compact
-348   */
-349  default CompletableFuture 
compactRegion(byte[] regionName) {
-350return compactRegion(regionName, 
Optional.empty());
-351  }
-352
-353  /**
-354   * Compact a column family within a 
region. When the returned CompletableFuture is done, it only
-355   * means the compact request was sent 
to HBase and may need some time to finish the compact
-356   * operation.
-357   * @param regionName region to 
compact
-358   * @param columnFamily column family 
within a region. If not present, compact the region's all
-359   *  column families.
-360   */
-361  CompletableFuture 
compactRegion(byte[] regionName, Optional columnFamily);
-362
-363  /**
-364   * Major compact a table. When the 
returned CompletableFuture is done, it only means the compact
-365   * request was sent to HBase and may 
need some time to finish the compact operation.
-366   * @param tableName table to major 
compact
-367   */
-368  default CompletableFuture 
majorCompact(TableName tableName) {
-369return majorCompact(tableName, 
Optional.empty());
-370  }
-371
-372  /**
-373   * Major compact a column family within 
a table. When the returned CompletableFuture is done, it
-374   * only means the compact request was 
sent to HBase and may need some time to finish the compact
-375   * operation.
-376   * @param tableName table to major 
compact
-377   * @param columnFamily column family 
within a table. If not present, major compact the table's all
-378   *  column families.
-379   */
-380  CompletableFuture 
majorCompact(TableName tableName, Optional columnFamily);
-381
-382  /**
-383   * Major compact a region. When the 
returned CompletableFuture is done, it only means the compact
-384   * request was sent to HBase and may 
need some time to finish the compact operation.
-385   * @param regionName region to major 
compact
-386   */
-387  default CompletableFuture 
majorCompactRegion(byte[] regionName) {
-388return majorCompactRegion(regionName, 
Optional.empty());
-389  }
-390
-391  /**
-392   * Major compact a column family within 
region. When the returned CompletableFuture is done, it
-393   * only means the compact request was 
sent to HBase and may need some time 

[38/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 32a931c..043eb24 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -289,7 +289,7 @@
 2026
 0
 0
-12861
+12838
 
 Files
 
@@ -412,7 +412,7 @@
 org/apache/hadoop/hbase/HColumnDescriptor.java
 0
 0
-44
+41
 
 org/apache/hadoop/hbase/HConstants.java
 0
@@ -432,7 +432,7 @@
 org/apache/hadoop/hbase/HTableDescriptor.java
 0
 0
-34
+33
 
 org/apache/hadoop/hbase/HealthChecker.java
 0
@@ -772,7 +772,7 @@
 org/apache/hadoop/hbase/client/Admin.java
 0
 0
-95
+89
 
 org/apache/hadoop/hbase/client/Append.java
 0
@@ -782,7 +782,7 @@
 org/apache/hadoop/hbase/client/AsyncAdmin.java
 0
 0
-21
+20
 
 org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
 0
@@ -837,7 +837,7 @@
 org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
 0
 0
-4
+6
 
 org/apache/hadoop/hbase/client/AsyncProcess.java
 0
@@ -877,7 +877,7 @@
 org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
 0
 0
-2
+3
 
 org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java
 0
@@ -899,6106 +899,6101 @@
 0
 3
 
-org/apache/hadoop/hbase/client/AsyncTableResultScanner.java
-0
-0
-1
-
 org/apache/hadoop/hbase/client/BatchErrors.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/BufferedMutator.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/ClientIdGenerator.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/ClientScanner.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/ClientServiceCallable.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/ClientUtil.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/ClusterStatusListener.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
 0
 0
 12
-
+
 org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
 0
 0
 54
-
+
 org/apache/hadoop/hbase/client/CompactType.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/ConnectionFactory.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/ConnectionImplementation.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/client/ConnectionUtils.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/CoprocessorHConnection.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/DelayingRunner.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/Delete.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/client/Get.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/client/HBaseAdmin.java
 0
 0
 103
-
+
 org/apache/hadoop/hbase/client/HRegionLocator.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/HTable.java
 0
 0
 31
-
+
 org/apache/hadoop/hbase/client/HTableMultiplexer.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/client/HTableWrapper.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/Increment.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/MasterCallable.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/MetaCache.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/client/MetricsConnection.java
 0
 0
 40
-
+
 org/apache/hadoop/hbase/client/MultiAction.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/client/MultiResponse.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/MultiServerCallable.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/client/Mutation.java
 0
 0
 21
-
+
 org/apache/hadoop/hbase/client/Operation.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
 0
 0
 12
-
+
 org/apache/hadoop/hbase/client/Put.java
 0
 0
 19
-
+
 org/apache/hadoop/hbase/client/Query.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/client/QuotaStatusCalls.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
 0
 0
 125
-
+
 org/apache/hadoop/hbase/client/RawAsyncTable.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/RawScanResultConsumer.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/RegionReplicaUtil.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/client/RegionServerCallable.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/Registry.java
 0
 0
 3
-
+
 

[12/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
index 13bde46..9aa8673 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
@@ -37,18 +37,18 @@
 029
 030import java.io.IOException;
 031import java.util.Arrays;
-032import java.util.HashMap;
-033import java.util.HashSet;
-034import java.util.Iterator;
+032import java.util.HashSet;
+033import java.util.Iterator;
+034import java.util.LinkedHashMap;
 035import java.util.List;
 036import java.util.Map;
-037import java.util.Set;
-038import 
java.util.concurrent.CompletableFuture;
-039import 
java.util.concurrent.ConcurrentHashMap;
-040import 
java.util.concurrent.ConcurrentMap;
-041import 
java.util.concurrent.ConcurrentNavigableMap;
-042import 
java.util.concurrent.ConcurrentSkipListMap;
-043import 
java.util.concurrent.ThreadLocalRandom;
+037import java.util.Optional;
+038import java.util.Set;
+039import 
java.util.concurrent.CompletableFuture;
+040import 
java.util.concurrent.ConcurrentHashMap;
+041import 
java.util.concurrent.ConcurrentMap;
+042import 
java.util.concurrent.ConcurrentNavigableMap;
+043import 
java.util.concurrent.ConcurrentSkipListMap;
 044
 045import org.apache.commons.logging.Log;
 046import 
org.apache.commons.logging.LogFactory;
@@ -115,7 +115,7 @@
 107public final Set 
pendingRequests = new HashSet<>();
 108
 109public final Map> allRequests =
-110new HashMap<>();
+110new LinkedHashMap<>();
 111
 112public boolean hasQuota(int max) {
 113  return pendingRequests.size() < 
max;
@@ -128,353 +128,358 @@
 120public void send(LocateRequest req) 
{
 121  pendingRequests.add(req);
 122}
-123  }
-124
-125  
AsyncNonMetaRegionLocator(AsyncConnectionImpl conn) {
-126this.conn = conn;
-127
this.maxConcurrentLocateRequestPerTable = conn.getConfiguration().getInt(
-128  
MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE, 
DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE);
-129  }
-130
-131  private TableCache 
getTableCache(TableName tableName) {
-132return computeIfAbsent(cache, 
tableName, TableCache::new);
-133  }
-134
-135  private void 
removeFromCache(HRegionLocation loc) {
-136TableCache tableCache = 
cache.get(loc.getRegionInfo().getTable());
-137if (tableCache == null) {
-138  return;
-139}
-140
tableCache.cache.computeIfPresent(loc.getRegionInfo().getStartKey(), (k, 
oldLoc) -> {
-141  if (oldLoc.getSeqNum() > 
loc.getSeqNum() ||
-142  
!oldLoc.getServerName().equals(loc.getServerName())) {
-143return oldLoc;
-144  }
-145  return null;
-146});
-147  }
-148
-149  // return whether we add this loc to 
cache
-150  private boolean addToCache(TableCache 
tableCache, HRegionLocation loc) {
-151if (LOG.isTraceEnabled()) {
-152  LOG.trace("Try adding " + loc + " 
to cache");
-153}
-154byte[] startKey = 
loc.getRegionInfo().getStartKey();
-155HRegionLocation oldLoc = 
tableCache.cache.putIfAbsent(startKey, loc);
-156if (oldLoc == null) {
-157  return true;
-158}
-159if (oldLoc.getSeqNum() > 
loc.getSeqNum() ||
-160
oldLoc.getServerName().equals(loc.getServerName())) {
-161  if (LOG.isTraceEnabled()) {
-162LOG.trace("Will not add " + loc + 
" to cache because the old value " + oldLoc +
-163" is newer than us or has the 
same server name");
+123
+124public Optional 
getCandidate() {
+125  return 
allRequests.keySet().stream().filter(r -> !isPending(r)).findFirst();
+126}
+127
+128public void 
clearCompletedRequests(Optional location) {
+129  for 
(Iterator>> iter = allRequests
+130  .entrySet().iterator(); 
iter.hasNext();) {
+131Map.Entry> entry = iter.next();
+132if (tryComplete(entry.getKey(), 
entry.getValue(), location)) {
+133  iter.remove();
+134}
+135  }
+136}
+137
+138private boolean 
tryComplete(LocateRequest req, CompletableFuture 
future,
+139Optional 
location) {
+140  if (future.isDone()) {
+141return true;
+142  }
+143  if (!location.isPresent()) {
+144return false;
+145  }
+146  HRegionLocation loc = 
location.get();
+147  boolean completed;
+148  if 
(req.locateType.

[21/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index b75a6bd..77abd0a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":9,"i48":9,"i49":10,"i50":9,"i51":9,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":9,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":9,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10
 
,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":9,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":41,"i124":41,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":9,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":42,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":9,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":9,"i190":10,"i191":10,"i192":9,"i193":9,"i194":9,"i195":9,"i196":9,"i197":9,"i198":9,"i199":9,"i200":9,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10
 
,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":10,"i230":10,"i231":10,"i232":10,"i233":10,"i234":10,"i235":10,"i236":10,"i237":10,"i238":10,"i239":10,"i240":9,"i241":9,"i242":10,"i243":10,"i244":10,"i245":10,"i246":10,"i247":10,"i248":10,"i249":10,"i250":10,"i251":10,"i252":10,"i253":10,"i254":9,"i255":10,"i256":10,"i257":10,"i258":10,"i259":10,"i260":10,"i261":10,"i262":10,"i263":10,"i264":10,"i265":10,"i266":10,"i267":10,"i268":10,"i269":10,"i270":9,"i271":10,"i272":10,"i273":10,"i274":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":9,"i47":9,"i48":10,"i49":9,"i50":9,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":9,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":9,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":1
 
0,"i110":10,"i111":10,"i112":10,"i113":10,"i114":9,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":41,"i123":41,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":9,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":42,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":9,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":9,"i189":10,"i190":10,

[32/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Admin.html 
b/devapidocs/org/apache/hadoop/hbase/client/Admin.html
index 88b7fa7..91d80e9 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Admin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Admin.html
@@ -265,7 +265,9 @@ extends closeRegion(byte[] regionname,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 
 
 
@@ -274,7 +276,9 @@ extends closeRegion(ServerName sn,
HRegionInfo hri)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 
 
 
@@ -283,7 +287,9 @@ extends closeRegion(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String regionname,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 
 
 
@@ -292,7 +298,9 @@ extends closeRegionWithEncodedRegionName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String encodedRegionName,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 
 
 
@@ -1463,8 +1471,9 @@ extends void
 splitRegion(byte[] regionName)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use
- splitRegionAsync(byte[],
 byte[]) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18229";>HBASE-18229).
+ Use splitRegionAsync(byte[],
 byte[]).
 
 
 
@@ -1473,8 +1482,9 @@ extends splitRegion(byte[] regionName,
byte[] splitPoint)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use
- splitRegionAsync(byte[],
 byte[]) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18229";>HBASE-18229).
+ Use splitRegionAsync(byte[],
 byte[]).
 
 
 
@@ -2914,10 +2924,13 @@ void 
 
 closeRegion
-void closeRegion(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String regionname,
- http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
-  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
-Deprecated. Since 2.0. Will be removed in 3.0. Use unassign(byte[],
 boolean) instead.
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+void closeRegion(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String regionname,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
+  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 Uses unassign(byte[],
 boolean) to unassign the region. For expert-admins.
 
 Parameters:
@@ -2934,10 +2947,13 @@ void 
 
 closeRegion
-void closeRegion(byte[] regionname,
- 

[03/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
index 803999d..8af4814 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
@@ -41,241 +41,248 @@
 033import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
 034import 
org.apache.hadoop.hbase.nio.ByteBuff;
 035import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-036import 
org.apache.hadoop.util.StringUtils;
-037
-038/**
-039 * IO engine that stores data to a file 
on the local file system.
-040 */
-041@InterfaceAudience.Private
-042public class FileIOEngine implements 
IOEngine {
-043  private static final Log LOG = 
LogFactory.getLog(FileIOEngine.class);
-044  public static final String 
FILE_DELIMITER = ",";
-045  private final String[] filePaths;
-046  private final FileChannel[] 
fileChannels;
-047  private final RandomAccessFile[] 
rafs;
-048
-049  private final long sizePerFile;
-050  private final long capacity;
-051
-052  private FileReadAccessor readAccessor = 
new FileReadAccessor();
-053  private FileWriteAccessor writeAccessor 
= new FileWriteAccessor();
-054
-055  public FileIOEngine(long capacity, 
boolean maintainPersistence, String... filePaths)
-056  throws IOException {
-057this.sizePerFile = capacity / 
filePaths.length;
-058this.capacity = this.sizePerFile * 
filePaths.length;
-059this.filePaths = filePaths;
-060this.fileChannels = new 
FileChannel[filePaths.length];
-061if (!maintainPersistence) {
-062  for (String filePath : filePaths) 
{
-063File file = new File(filePath);
-064if (file.exists()) {
-065  if (LOG.isDebugEnabled()) {
-066LOG.debug("File " + filePath 
+ " already exists. Deleting!!");
-067  }
-068  file.delete();
-069  // If deletion fails still we 
can manage with the writes
-070}
-071  }
-072}
-073this.rafs = new 
RandomAccessFile[filePaths.length];
-074for (int i = 0; i < 
filePaths.length; i++) {
-075  String filePath = filePaths[i];
-076  try {
-077rafs[i] = new 
RandomAccessFile(filePath, "rw");
-078long totalSpace = new 
File(filePath).getTotalSpace();
-079if (totalSpace < sizePerFile) 
{
-080  // The next setting length will 
throw exception,logging this message
-081  // is just used for the detail 
reason of exception,
-082  String msg = "Only " + 
StringUtils.byteDesc(totalSpace)
-083  + " total space under " + 
filePath + ", not enough for requested "
-084  + 
StringUtils.byteDesc(sizePerFile);
-085  LOG.warn(msg);
-086}
-087rafs[i].setLength(sizePerFile);
-088fileChannels[i] = 
rafs[i].getChannel();
-089LOG.info("Allocating cache " + 
StringUtils.byteDesc(sizePerFile)
-090+ ", on the path:" + 
filePath);
-091  } catch (IOException fex) {
-092LOG.error("Failed allocating 
cache on " + filePath, fex);
-093shutdown();
-094throw fex;
-095  }
-096}
-097  }
-098
-099  @Override
-100  public String toString() {
-101return "ioengine=" + 
this.getClass().getSimpleName() + ", paths="
-102+ Arrays.asList(filePaths) + ", 
capacity=" + String.format("%,d", this.capacity);
-103  }
-104
-105  /**
-106   * File IO engine is always able to 
support persistent storage for the cache
-107   * @return true
-108   */
-109  @Override
-110  public boolean isPersistent() {
-111return true;
-112  }
-113
-114  /**
-115   * Transfers data from file to the 
given byte buffer
-116   * @param offset The offset in the file 
where the first byte to be read
-117   * @param length The length of buffer 
that should be allocated for reading
-118   *   from the file 
channel
-119   * @return number of bytes read
-120   * @throws IOException
-121   */
-122  @Override
-123  public Cacheable read(long offset, int 
length, CacheableDeserializer deserializer)
-124  throws IOException {
-125ByteBuffer dstBuffer = 
ByteBuffer.allocate(length);
-126accessFile(readAccessor, dstBuffer, 
offset);
-127// The buffer created out of the 
fileChannel is formed by copying the data from the file
-128// Hence in this case there is no 
shared memory that we point to. Even if the BucketCache evicts
-129// this buffer from the file the data 
is already copied and there is no need to ensure that
-130// the results are not corrupted 
before consuming them.
-131if (dstBuffer.limit() != length) {
-132

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index f47225a..5a77704 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -841,1497 +841,1513 @@
 833   * @param regionname region name to 
close
 834   * @param serverName Deprecated. Not 
used.
 835   * @throws IOException if a remote or 
network exception occurs
-836   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-837   */
-838  void closeRegion(final String 
regionname, final String serverName) throws IOException;
-839
-840  /**
-841   * Uses {@link #unassign(byte[], 
boolean)} to unassign the region. For expert-admins.
-842   *
-843   * @param regionname region name to 
close
-844   * @param serverName Deprecated. Not 
used.
-845   * @throws IOException if a remote or 
network exception occurs
-846   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-847   */
-848  void closeRegion(final byte[] 
regionname, final String serverName) throws IOException;
-849
-850  /**
-851   * Uses {@link #unassign(byte[], 
boolean)} to unassign the region. For expert-admins.
-852   *
-853   * @param encodedRegionName The encoded 
region name; i.e. the hash that makes up the region name
-854   * suffix: e.g. if regionname is
-855   * 
TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.,
-856   * then the encoded region name is: 
527db22f95c8a9e0116f0cc13c680396.
-857   * @param serverName Deprecated. Not 
used.
-858   * @return Deprecated. Returns true 
always.
-859   * @throws IOException if a remote or 
network exception occurs
-860   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-861   */
-862  boolean 
closeRegionWithEncodedRegionName(final String encodedRegionName, final String 
serverName)
-863  throws IOException;
-864
-865  /**
-866   * Used {@link #unassign(byte[], 
boolean)} to unassign the region. For expert-admins.
-867   *
-868   * @param sn Deprecated. Not used.
-869   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-870   */
-871  void closeRegion(final ServerName sn, 
final HRegionInfo hri) throws IOException;
-872
-873  /**
-874   * Get all the online regions on a 
region server.
-875   */
-876  List 
getOnlineRegions(final ServerName sn) throws IOException;
-877
-878  /**
-879   * Flush a table. Synchronous 
operation.
-880   *
-881   * @param tableName table to flush
-882   * @throws IOException if a remote or 
network exception occurs
-883   */
-884  void flush(final TableName tableName) 
throws IOException;
-885
-886  /**
-887   * Flush an individual region. 
Synchronous operation.
-888   *
-889   * @param regionName region to flush
-890   * @throws IOException if a remote or 
network exception occurs
-891   */
-892  void flushRegion(final byte[] 
regionName) throws IOException;
-893
-894  /**
-895   * Compact a table. Asynchronous 
operation.
-896   *
-897   * @param tableName table to compact
-898   * @throws IOException if a remote or 
network exception occurs
-899   */
-900  void compact(final TableName tableName) 
throws IOException;
-901
-902  /**
-903   * Compact an individual region. 
Asynchronous operation.
-904   *
-905   * @param regionName region to 
compact
-906   * @throws IOException if a remote or 
network exception occurs
-907   */
-908  void compactRegion(final byte[] 
regionName) throws IOException;
-909
-910  /**
-911   * Compact a column family within a 
table. Asynchronous operation.
-912   *
-913   * @param tableName table to compact
-914   * @param columnFamily column family 
within a table
-915   * @throws IOException if a remote or 
network exception occurs
-916   */
-917  void compact(final TableName tableName, 
final byte[] columnFamily)
-918throws IOException;
-919
-920  /**
-921   * Compact a column family within a 
region. Asynchronous operation.
-922   *
-923   * @param regionName region to 
compact
-924   * @param columnFamily column family 
within a region
-925   * @throws IOException if a remote or 
network exception occurs
-926   */
-927  void compactRegion(final byte[] 
regionName, final byte[] columnFamily)
-928throws IOException;
-929
-930  /**
-931   * Major compact a table. Asynchronous 
operation.
-932   *
-933   * @param tableName table to major 
compact
-934   * @throws IOException if a remote or 
network exception occurs
-935   */
-936  void majorCompact(TableName tableName) 
throws IOException;
-937
-938  /**
-939   * Major compact a table or an 
individual region.

[11/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
index 13bde46..9aa8673 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
@@ -37,18 +37,18 @@
 029
 030import java.io.IOException;
 031import java.util.Arrays;
-032import java.util.HashMap;
-033import java.util.HashSet;
-034import java.util.Iterator;
+032import java.util.HashSet;
+033import java.util.Iterator;
+034import java.util.LinkedHashMap;
 035import java.util.List;
 036import java.util.Map;
-037import java.util.Set;
-038import 
java.util.concurrent.CompletableFuture;
-039import 
java.util.concurrent.ConcurrentHashMap;
-040import 
java.util.concurrent.ConcurrentMap;
-041import 
java.util.concurrent.ConcurrentNavigableMap;
-042import 
java.util.concurrent.ConcurrentSkipListMap;
-043import 
java.util.concurrent.ThreadLocalRandom;
+037import java.util.Optional;
+038import java.util.Set;
+039import 
java.util.concurrent.CompletableFuture;
+040import 
java.util.concurrent.ConcurrentHashMap;
+041import 
java.util.concurrent.ConcurrentMap;
+042import 
java.util.concurrent.ConcurrentNavigableMap;
+043import 
java.util.concurrent.ConcurrentSkipListMap;
 044
 045import org.apache.commons.logging.Log;
 046import 
org.apache.commons.logging.LogFactory;
@@ -115,7 +115,7 @@
 107public final Set 
pendingRequests = new HashSet<>();
 108
 109public final Map> allRequests =
-110new HashMap<>();
+110new LinkedHashMap<>();
 111
 112public boolean hasQuota(int max) {
 113  return pendingRequests.size() < 
max;
@@ -128,353 +128,358 @@
 120public void send(LocateRequest req) 
{
 121  pendingRequests.add(req);
 122}
-123  }
-124
-125  
AsyncNonMetaRegionLocator(AsyncConnectionImpl conn) {
-126this.conn = conn;
-127
this.maxConcurrentLocateRequestPerTable = conn.getConfiguration().getInt(
-128  
MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE, 
DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE);
-129  }
-130
-131  private TableCache 
getTableCache(TableName tableName) {
-132return computeIfAbsent(cache, 
tableName, TableCache::new);
-133  }
-134
-135  private void 
removeFromCache(HRegionLocation loc) {
-136TableCache tableCache = 
cache.get(loc.getRegionInfo().getTable());
-137if (tableCache == null) {
-138  return;
-139}
-140
tableCache.cache.computeIfPresent(loc.getRegionInfo().getStartKey(), (k, 
oldLoc) -> {
-141  if (oldLoc.getSeqNum() > 
loc.getSeqNum() ||
-142  
!oldLoc.getServerName().equals(loc.getServerName())) {
-143return oldLoc;
-144  }
-145  return null;
-146});
-147  }
-148
-149  // return whether we add this loc to 
cache
-150  private boolean addToCache(TableCache 
tableCache, HRegionLocation loc) {
-151if (LOG.isTraceEnabled()) {
-152  LOG.trace("Try adding " + loc + " 
to cache");
-153}
-154byte[] startKey = 
loc.getRegionInfo().getStartKey();
-155HRegionLocation oldLoc = 
tableCache.cache.putIfAbsent(startKey, loc);
-156if (oldLoc == null) {
-157  return true;
-158}
-159if (oldLoc.getSeqNum() > 
loc.getSeqNum() ||
-160
oldLoc.getServerName().equals(loc.getServerName())) {
-161  if (LOG.isTraceEnabled()) {
-162LOG.trace("Will not add " + loc + 
" to cache because the old value " + oldLoc +
-163" is newer than us or has the 
same server name");
+123
+124public Optional 
getCandidate() {
+125  return 
allRequests.keySet().stream().filter(r -> !isPending(r)).findFirst();
+126}
+127
+128public void 
clearCompletedRequests(Optional location) {
+129  for 
(Iterator>> iter = allRequests
+130  .entrySet().iterator(); 
iter.hasNext();) {
+131Map.Entry> entry = iter.next();
+132if (tryComplete(entry.getKey(), 
entry.getValue(), location)) {
+133  iter.remove();
+134}
+135  }
+136}
+137
+138private boolean 
tryComplete(LocateRequest req, CompletableFuture 
future,
+139Optional 
location) {
+140  if (future.isDone()) {
+141return true;
+142  }
+143  if (!location.isPresent()) {
+144return false;
+145  }
+146  HRegionLocation loc = 
location.get();
+147  boolean completed;
+148  if 
(req.locateType.equals(RegionLo

[07/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerImpl.html
index e7cd47c..d61d346 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerImpl.html
@@ -43,132 +43,132 @@
 035
 036import java.io.IOException;
 037import java.util.ArrayList;
-038import java.util.List;
-039import java.util.Optional;
-040import 
java.util.concurrent.CompletableFuture;
-041import java.util.concurrent.TimeUnit;
-042
-043import org.apache.commons.logging.Log;
-044import 
org.apache.commons.logging.LogFactory;
-045import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.NotServingRegionException;
-048import 
org.apache.hadoop.hbase.UnknownScannerException;
-049import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-050import 
org.apache.hadoop.hbase.client.RawScanResultConsumer.ScanResumer;
-051import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-052import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-053import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-055import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
-063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-064
-065/**
-066 * Retry caller for scanning a region.
-067 * 

-068 * We will modify the {@link Scan} object passed in directly. The upper layer should store the -069 * reference of this object and use it to open new single region scanners. -070 */ -071@InterfaceAudience.Private -072class AsyncScanSingleRegionRpcRetryingCaller { -073 -074 private static final Log LOG = LogFactory.getLog(AsyncScanSingleRegionRpcRetryingCaller.class); -075 -076 private final HashedWheelTimer retryTimer; -077 -078 private final Scan scan; -079 -080 private final ScanMetrics scanMetrics; -081 -082 private final long scannerId; -083 -084 private final ScanResultCache resultCache; -085 -086 private final RawScanResultConsumer consumer; -087 -088 private final ClientService.Interface stub; -089 -090 private final HRegionLocation loc; -091 -092 private final boolean regionServerRemote; -093 -094 private final long scannerLeaseTimeoutPeriodNs; -095 -096 private final long pauseNs; -097 -098 private final int maxAttempts; -099 -100 private final long scanTimeoutNs; -101 -102 private final long rpcTimeoutNs; -103 -104 private final int startLogErrorsCnt; -105 -106 private final Runnable completeWhenNoMoreResultsInRegion; -107 -108 private final CompletableFuture future; -109 -110 private final HBaseRpcController controller; -111 -112 private byte[] nextStartRowWhenError; -113 -114 private boolean includeNextStartRowWhenError; -115 -116 private long nextCallStartNs; -117 -118 private int tries; -119 -120 private final List exceptions; -121 -122 private long nextCallSeq = -1L; -123 -124 private enum ScanControllerState { -125INITIALIZED, SUSPENDED, TERMINATED, DESTROYED -126 } -127 -128 // Since suspend and terminate should only be called within onNext or onHeartbeat(see the comments -129 // of RawScanResultConsumer.onNext and onHeartbeat), we need to add some check to prevent invalid -130 // usage. We use two things to prevent invalid usage: -131 // 1. Record the thread that construct the ScanControllerImpl instance. We will throw an -132 // IllegalStateException if the caller thread is not this thread. -133 // 2. The ControllerState. The initial state is INITIALIZED, if you call suspend, the state will -134 // be transformed to SUSPENDED, and if you call terminate, the state will be transformed to -135 // TERMINATED. And when we are back from onNext or onHeartbeat in the onComplete met


[26/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
index 5759774..d0bd3d1 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected static class BaseLoadBalancer.Cluster
+protected static class BaseLoadBalancer.Cluster
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 An efficient array based implementation similar to 
ClusterState for keeping
  the status of the cluster in terms of region assignment and distribution.
@@ -199,170 +199,166 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 initialRegionIndexToServerIndex 
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true";
 title="class or interface in java.util">ComparatorInteger>
-localityComparator 
-
-
 (package private) float[]
 localityPerServer 
 
-
+
 (package private) boolean
 multiServersPerHost 
 
-
+
 static BaseLoadBalancer.Cluster.Action
 NullAction 
 
-
+
 (package private) int
 numHosts 
 
-
+
 (package private) int[]
 numMaxRegionsPerTable 
 
-
+
 (package private) int
 numMovedRegions 
 
-
+
 (package private) int
 numRacks 
 
-
+
 (package private) int
 numRegions 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true";
 title="class or interface in java.util">ComparatorInteger>
 numRegionsComparator 
 
-
+
 (package private) int[][]
 numRegionsPerServerPerTable 
 
-
+
 (package private) int
 numServers 
 
-
+
 (package private) int
 numTables 
 
-
+
 (package private) int[][]
 primariesOfRegionsPerHost 
 
-
+
 (package private) int[][]
 primariesOfRegionsPerRack 
 
-
+
 (package private) int[][]
 primariesOfRegionsPerServer 
 
-
+
 private float[][]
 rackLocalities 
 
-
+
 protected RackManager
 rackManager 
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[]
 racks 
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer>
 racksToIndex 
 
-
+
 private RegionLocationFinder
 regionFinder 
 
-
+
 (package private) int[]
 regionIndexToPrimaryIndex 
 
-
+
 (package private) int[]
 regionIndexToServerIndex 
 
-
+
 (package private) int[]
 regionIndexToTableIndex 
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true";
 title="class or interface in java.util">Deque[]
 regionLoads 
 
-
+
 (package private) int[][]
 regionLocations 
 
-
+
 (package private) HRegionInfo[]
 regions 
 
-
+
 (package private) int[][]
 regionsPerHost 
 
-
+
 (package private) int[][]
 regionsPerRack 
 
-
+
 (package private) int[][]
 regionsPerServer 
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapInteger>
 regionsToIndex 
 
-
+
 private int[][]
 regionsToMostLocalEntities 
 
-
+
 (package private) int[]
 serverIndexToHostInde

[31/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 8935670..ff435ee 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -192,7 +192,9 @@ public interface closeRegion(byte[] regionName,
http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional serverName)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 
 
 
@@ -1420,9 +1422,12 @@ public interface 
 
 closeRegion
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureBoolean> closeRegion(byte[] regionName,
-   http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional serverName)
-Deprecated. Since 2.0. Will be removed in 3.0. Use unassign(byte[],
 boolean) instead.
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureBoolean> closeRegion(byte[] regionName,
+   http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional serverName)
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 Close a region. For expert-admins Runs close on the 
regionserver. The master will not be
  informed of the close.
 
@@ -1440,7 +1445,7 @@ public interface 
 
 getOnlineRegions
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList> getOnlineRegions(ServerName serverName)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList> getOnlineRegions(ServerName serverName)
 Get all the online regions on a region server.
 
 
@@ -1450,7 +1455,7 @@ public interface 
 
 getTableRegions
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList> getTableRegions(TableName tableName)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList> getTableRegions(TableName tableName)
 Get the regions of a given table.
 
 
@@ -1460,7 +1465,7 @@ public interface 
 
 flush
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid> flush(TableName tableName)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid> flush

[23/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index af6823b..42637bb 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -424,16 +424,16 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.ProcedureState
-org.apache.hadoop.hbase.HConstants.Modify
+org.apache.hadoop.hbase.KeyValue.Type
 org.apache.hadoop.hbase.MetaTableAccessor.QueryType
+org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
+org.apache.hadoop.hbase.HConstants.Modify
 org.apache.hadoop.hbase.MemoryCompactionPolicy
+org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
+org.apache.hadoop.hbase.ProcedureState
 org.apache.hadoop.hbase.KeepDeletedCells
-org.apache.hadoop.hbase.HConstants.OperationStatusCode
 org.apache.hadoop.hbase.Coprocessor.State
-org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
-org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
-org.apache.hadoop.hbase.KeyValue.Type
+org.apache.hadoop.hbase.HConstants.OperationStatusCode
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/package-use.html
index 8bad7c1..42a31e3 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-use.html
@@ -650,7 +650,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -912,7 +912,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -1006,7 +1006,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -1093,7 +1093,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -1475,7 +1475,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -1586,7 +1586,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -2001,7 +2001,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -2111,7 +2111,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -2242,7 +2242,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -2305,7 +2305,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -2465,7 +2465,7 @@ service.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
-  

[49/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/deprecated-list.html
--
diff --git a/apidocs/deprecated-list.html b/apidocs/deprecated-list.html
index 409a3d7..0fd1000 100644
--- a/apidocs/deprecated-list.html
+++ b/apidocs/deprecated-list.html
@@ -107,7 +107,7 @@
 
 org.apache.hadoop.hbase.HTableDescriptor
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 
@@ -259,27 +259,37 @@
 
 
 org.apache.hadoop.hbase.client.AsyncAdmin.closeRegion(byte[],
 Optional)
-Since 2.0. Will be removed 
in 3.0. Use AsyncAdmin.unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use AsyncAdmin.unassign(byte[],
 boolean).
 
 
 
 org.apache.hadoop.hbase.client.Admin.closeRegion(byte[],
 String)
-Since 2.0. Will be removed 
in 3.0. Use Admin.unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use Admin.unassign(byte[],
 boolean).
 
 
 
 org.apache.hadoop.hbase.client.Admin.closeRegion(ServerName,
 HRegionInfo)
-Since 2.0. Will be removed 
in 3.0. Use Admin.unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use Admin.unassign(byte[],
 boolean).
 
 
 
 org.apache.hadoop.hbase.client.Admin.closeRegion(String,
 String)
-Since 2.0. Will be removed 
in 3.0. Use Admin.unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use Admin.unassign(byte[],
 boolean).
 
 
 
 org.apache.hadoop.hbase.client.Admin.closeRegionWithEncodedRegionName(String,
 String)
-Since 2.0. Will be removed 
in 3.0. Use Admin.unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use Admin.unassign(byte[],
 boolean).
 
 
 
@@ -440,7 +450,9 @@
 
 
 org.apache.hadoop.hbase.HTableDescriptor.getFamiliesKeys()
-Use HTableDescriptor.getColumnFamilyNames().
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18008";>HBASE-18008).
+ Use HTableDescriptor.getColumnFamilyNames().
 
 
 
@@ -840,14 +852,16 @@
 
 
 org.apache.hadoop.hbase.client.Admin.splitRegion(byte[])
-Since 2.0. Will be removed 
in 3.0. Use
- Admin.splitRegionAsync(byte[],
 byte[]) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18229";>HBASE-18229).
+ Use Admin.splitRegionAsync(byte[],
 byte[]).
 
 
 
 org.apache.hadoop.hbase.client.Admin.splitRegion(byte[],
 byte[])
-Since 2.0. Will be removed 
in 3.0. Use
- Admin.splitRegionAsync(byte[],
 byte[]) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18229";>HBASE-18229).
+ Use Admin.splitRegionAsync(byte[],
 byte[]).
 
 
 
@@ -921,17 +935,23 @@
 
 
 org.apache.hadoop.hbase.HColumnDescriptor(byte[])
-use ColumnFamilyDescriptorBuilder.of(byte[])
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.of(byte[]).
 
 
 
 org.apache.hadoop.hbase.HColumnDescriptor(HColumnDescriptor)
-use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor)
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor).
 
 
 
 org.apache.hadoop.hbase.HColumnDescriptor(String)
-use ColumnFamilyDescriptorBuilder.of(String)
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.of(String).
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/index-all.html
--
diff --git a/apidocs/index-all.html b/apidocs/index-all.html
index 7ae0b6a..062caaf 100644
--- a/apidocs/index-all.html
+++ b/apidocs/index-all.html
@@ -1400,31 +1400,41 @@
 closeRegion(String,
 String) - Method in interface org.apache.hadoop.hbase.client.Admin
 
 Deprecated.
-Since 2.0. Will be removed 
in 3.0. Use Admin.unassign(byte[],

[47/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html 
b/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
index b39770e..8bc8e3a 100644
--- a/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
@@ -492,7 +492,9 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set
 getFamiliesKeys()
 Deprecated. 
-Use getColumnFamilyNames().
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18008";>HBASE-18008).
+ Use getColumnFamilyNames().
 
 
 
@@ -1217,7 +1219,7 @@ implements 
 
 NAMESPACE_FAMILY_INFO
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String NAMESPACE_FAMILY_INFO
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String NAMESPACE_FAMILY_INFO
 Deprecated. 
 
 See Also:
@@ -1231,7 +1233,7 @@ implements 
 
 NAMESPACE_FAMILY_INFO_BYTES
-public static final byte[] NAMESPACE_FAMILY_INFO_BYTES
+public static final byte[] NAMESPACE_FAMILY_INFO_BYTES
 Deprecated. 
 
 
@@ -1241,7 +1243,7 @@ implements 
 
 NAMESPACE_COL_DESC_BYTES
-public static final byte[] NAMESPACE_COL_DESC_BYTES
+public static final byte[] NAMESPACE_COL_DESC_BYTES
 Deprecated. 
 
 
@@ -1251,7 +1253,7 @@ implements 
 
 NAMESPACE_TABLEDESC
-public static final HTableDescriptor NAMESPACE_TABLEDESC
+public static final HTableDescriptor NAMESPACE_TABLEDESC
 Deprecated. 
 Table descriptor for namespace table
 
@@ -2127,8 +2129,11 @@ public http://docs.oracle.com/javase/8/docs/api/java/util/Collecti
 
 
 getFamiliesKeys
-public http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set getFamiliesKeys()
-Deprecated. Use getColumnFamilyNames().
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+public http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set getFamiliesKeys()
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18008";>HBASE-18008).
+ Use getColumnFamilyNames().
 Returns all the column family names of the current table. 
The map of
  HTableDescriptor contains mapping of family name to HColumnDescriptors.
  This returns all the keys of the family map which represents the column
@@ -2145,7 +2150,7 @@ public http://docs.oracle.com/javase/8/docs/api/java/util/Collecti
 
 
 getColumnFamilyCount
-public int getColumnFamilyCount()
+public int getColumnFamilyCount()
 Deprecated. 
 Returns the count of the column families of the table.
 
@@ -2163,7 +2168,7 @@ public http://docs.oracle.com/javase/8/docs/api/java/util/Collecti
 
 getColumnFamilies
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
-public HColumnDescriptor[] getColumnFamilies()
+public HColumnDescriptor[] getColumnFamilies()
 Deprecated. 
 Returns an array all the HColumnDescriptor of the column 
families
  of the table.
@@ -2184,7 +2189,7 @@ public 
 getFamily
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
-public HColumnDescriptor getFamily(byte[] column)
+public HColumnDescriptor getFamily(byte[] column)
 Deprecated. Use getColumnFamily(byte[]).
 Returns the HColumnDescriptor for a specific column family 
with name as
  specified by the parameter column.
@@ -2203,7 +2208,7 @@ public 
 
 removeFamily
-public HColumnDescriptor removeFamily(byte[] column)
+public HColumnDescriptor removeFamily(byte[] column)
 Deprecated. 
 Removes the HColumnDescriptor with name specified by the 
parameter column
  from the table descriptor
@@ -,7 +2227,7 @@ public 
 
 toHColumnDescriptor
-protected HColumnDescriptor toHColumnDescriptor(ColumnFamilyDescriptor desc)
+protected HColumnDescriptor toHColumnDescriptor(ColumnFamilyDescriptor desc)
 Deprecated. 
 Return a HCo

[29/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/client/AsyncTableResultScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableResultScanner.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableResultScanner.html
index ab028a2..b738b29 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableResultScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableResultScanner.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class AsyncTableResultScanner
+class AsyncTableResultScanner
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements ResultScanner, RawScanResultConsumer
 The ResultScanner implementation 
for AsyncTable. It will fetch data 
automatically
@@ -186,6 +186,10 @@ implements resumer 
 
 
+private Scan
+scan 
+
+
 private ScanMetrics
 scanMetrics 
 
@@ -262,8 +266,8 @@ implements 
 void
 onHeartbeat(RawScanResultConsumer.ScanController controller)
-Indicate that there is an heartbeat message but we have not 
cumulated enough cells to call
- onNext.
+Indicate that there is a heartbeat message but we have not 
cumulated enough cells to call
+ RawScanResultConsumer.onNext(Result[],
 ScanController).
 
 
 
@@ -337,7 +341,7 @@ implements 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -346,7 +350,7 @@ implements 
 
 rawTable
-private final RawAsyncTable rawTable
+private final RawAsyncTable rawTable
 
 
 
@@ -355,7 +359,16 @@ implements 
 
 maxCacheSize
-private final long maxCacheSize
+private final long maxCacheSize
+
+
+
+
+
+
+
+scan
+private final Scan scan
 
 
 
@@ -364,7 +377,7 @@ implements 
 
 queue
-private final http://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true";
 title="class or interface in java.util">Queue queue
+private final http://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true";
 title="class or interface in java.util">Queue queue
 
 
 
@@ -373,7 +386,7 @@ implements 
 
 scanMetrics
-private ScanMetrics scanMetrics
+private ScanMetrics scanMetrics
 
 
 
@@ -382,7 +395,7 @@ implements 
 
 cacheSize
-private long cacheSize
+private long cacheSize
 
 
 
@@ -391,7 +404,7 @@ implements 
 
 closed
-private boolean closed
+private boolean closed
 
 
 
@@ -400,7 +413,7 @@ implements 
 
 error
-private http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable error
+private http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable error
 
 
 
@@ -409,7 +422,7 @@ implements 
 
 resumer
-private RawScanResultConsumer.ScanResumer resumer
+private RawScanResultConsumer.ScanResumer resumer
 
 
 
@@ -426,7 +439,7 @@ implements 
 
 AsyncTableResultScanner
-public AsyncTableResultScanner(RawAsyncTable table,
+public AsyncTableResultScanner(RawAsyncTable table,
Scan scan,
long maxCacheSize)
 
@@ -445,7 +458,7 @@ implements 
 
 addToCache
-private void addToCache(Result result)
+private void addToCache(Result result)
 
 
 
@@ -454,7 +467,7 @@ implements 
 
 stopPrefetch
-private void stopPrefetch(RawScanResultConsumer.ScanController controller)
+private void stopPrefetch(RawScanResultConsumer.ScanController controller)
 
 
 
@@ -463,7 +476,7 @@ implements 
 
 onNext
-public void onNext(Result[] results,
+public void onNext(Result[] results,
RawScanResultConsumer.ScanController controller)
 Description copied from 
interface: RawScanResultConsumer
 Indicate that we have receive some data.
@@ -484,10 +497,17 @@ implements 
 
 onHeartbeat
-public void onHeartbeat(RawScanResultConsumer.ScanController controller)
+public void onHeartbeat(RawScanResultConsumer.ScanController controller)
 Description copied from 
interface: RawScanResultConsumer
-Indicate that there is an heartbeat message but we have not 
cumulated enough cells to call
- onNext.
+Indicate that there is a heartbeat message but we have not 
cumulated enough cells to call
+ RawScanResultConsumer.onNext(Result[],
 ScanController).
+ 
+ Note that this method will always be called when RS returns something to us 
but we do not have
+ enough cells to call RawScanResultConsumer.onNext(Result[],
 ScanController). Sometimes it may not be a
+ 'heartbeat' message for RS, for example, we have a large row with many cells 
and size limit is
+ exceeded before sending all the cells for this row. For RS it does send some 
data to us and the
+ time limit has not been reached, but we can not return the data to client so 
here we call this
+ method to tell client we have 

[08/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState.html
index e7cd47c..d61d346 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState.html
@@ -43,132 +43,132 @@
 035
 036import java.io.IOException;
 037import java.util.ArrayList;
-038import java.util.List;
-039import java.util.Optional;
-040import 
java.util.concurrent.CompletableFuture;
-041import java.util.concurrent.TimeUnit;
-042
-043import org.apache.commons.logging.Log;
-044import 
org.apache.commons.logging.LogFactory;
-045import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.NotServingRegionException;
-048import 
org.apache.hadoop.hbase.UnknownScannerException;
-049import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-050import 
org.apache.hadoop.hbase.client.RawScanResultConsumer.ScanResumer;
-051import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-052import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-053import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-055import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
-063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-064
-065/**
-066 * Retry caller for scanning a region.
-067 * 

-068 * We will modify the {@link Scan} object passed in directly. The upper layer should store the -069 * reference of this object and use it to open new single region scanners. -070 */ -071@InterfaceAudience.Private -072class AsyncScanSingleRegionRpcRetryingCaller { -073 -074 private static final Log LOG = LogFactory.getLog(AsyncScanSingleRegionRpcRetryingCaller.class); -075 -076 private final HashedWheelTimer retryTimer; -077 -078 private final Scan scan; -079 -080 private final ScanMetrics scanMetrics; -081 -082 private final long scannerId; -083 -084 private final ScanResultCache resultCache; -085 -086 private final RawScanResultConsumer consumer; -087 -088 private final ClientService.Interface stub; -089 -090 private final HRegionLocation loc; -091 -092 private final boolean regionServerRemote; -093 -094 private final long scannerLeaseTimeoutPeriodNs; -095 -096 private final long pauseNs; -097 -098 private final int maxAttempts; -099 -100 private final long scanTimeoutNs; -101 -102 private final long rpcTimeoutNs; -103 -104 private final int startLogErrorsCnt; -105 -106 private final Runnable completeWhenNoMoreResultsInRegion; -107 -108 private final CompletableFuture future; -109 -110 private final HBaseRpcController controller; -111 -112 private byte[] nextStartRowWhenError; -113 -114 private boolean includeNextStartRowWhenError; -115 -116 private long nextCallStartNs; -117 -118 private int tries; -119 -120 private final List exceptions; -121 -122 private long nextCallSeq = -1L; -123 -124 private enum ScanControllerState { -125INITIALIZED, SUSPENDED, TERMINATED, DESTROYED -126 } -127 -128 // Since suspend and terminate should only be called within onNext or onHeartbeat(see the comments -129 // of RawScanResultConsumer.onNext and onHeartbeat), we need to add some check to prevent invalid -130 // usage. We use two things to prevent invalid usage: -131 // 1. Record the thread that construct the ScanControllerImpl instance. We will throw an -132 // IllegalStateException if the caller thread is not this thread. -133 // 2. The ControllerState. The initial state is INITIALIZED, if you call suspend, the state will -134 // be transformed to SUSPENDED, and if you call terminate, the state will be transformed to -135 // TERMINATED. And when we are back from onNext or onHeartbeat i


[48/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html 
b/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
index 5b9a148..a67672f 100644
--- a/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
@@ -502,7 +502,9 @@ implements  
 HColumnDescriptor(byte[] familyName)
 Deprecated. 
-use ColumnFamilyDescriptorBuilder.of(byte[])
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.of(byte[]).
 
 
 
@@ -516,7 +518,9 @@ implements  
 HColumnDescriptor(HColumnDescriptor desc)
 Deprecated. 
-use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor)
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor).
 
 
 
@@ -531,7 +535,9 @@ implements  
 HColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String familyName)
 Deprecated. 
-use ColumnFamilyDescriptorBuilder.of(String)
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.of(String).
 
 
 
@@ -1883,8 +1889,11 @@ implements 
 
 HColumnDescriptor
-public HColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String familyName)
-Deprecated. use ColumnFamilyDescriptorBuilder.of(String)
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+public HColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String familyName)
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.of(String).
 Construct a column descriptor specifying only the family 
name
  The other attributes are defaulted.
 
@@ -1900,8 +1909,11 @@ implements 
 
 HColumnDescriptor
-public HColumnDescriptor(byte[] familyName)
-Deprecated. use ColumnFamilyDescriptorBuilder.of(byte[])
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+public HColumnDescriptor(byte[] familyName)
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.of(byte[]).
 Construct a column descriptor specifying only the family 
name
  The other attributes are defaulted.
 
@@ -1917,8 +1929,11 @@ implements 
 
 HColumnDescriptor
-public HColumnDescriptor(HColumnDescriptor desc)
-Deprecated. use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor)
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+public HColumnDescriptor(HColumnDescriptor desc)
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor).
 Constructor.
  Makes a deep copy of the supplied descriptor.
  Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
@@ -1934,7 +1949,7 @@ implements 
 
 HColumnDescriptor
-protected HColumnDescriptor(HColumnDescriptor desc,
+protected HColumnDescriptor(HColumnDescriptor desc,
 boolean deepClone)
 Deprecated. 
 
@@ -1945,7 +1960,7 @@ implements 
 
 HColumnDescriptor
-protected HColumnDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor delegate)
+protected HColumnDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor delegate)
 Deprecated. 
 
 
@@ -1964,7 +1979,7 @@ implements 
 isLegalFamilyName
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
-public static byte[] isLegalFamilyName(byte[] b)
+public static byte[] isLegalFamilyName(byte[] b)
 Deprecated. Use ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(byte[]).
 
 Parameters:
@@ -1985,7 +2000,7 @@ public static byte[] 
 
 getName
-public byte[] getName()
+public byte[] getNa

[05/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.html
index e7cd47c..d61d346 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.html
@@ -43,132 +43,132 @@
 035
 036import java.io.IOException;
 037import java.util.ArrayList;
-038import java.util.List;
-039import java.util.Optional;
-040import 
java.util.concurrent.CompletableFuture;
-041import java.util.concurrent.TimeUnit;
-042
-043import org.apache.commons.logging.Log;
-044import 
org.apache.commons.logging.LogFactory;
-045import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.NotServingRegionException;
-048import 
org.apache.hadoop.hbase.UnknownScannerException;
-049import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-050import 
org.apache.hadoop.hbase.client.RawScanResultConsumer.ScanResumer;
-051import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-052import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-053import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-055import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
-063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-064
-065/**
-066 * Retry caller for scanning a region.
-067 * 

-068 * We will modify the {@link Scan} object passed in directly. The upper layer should store the -069 * reference of this object and use it to open new single region scanners. -070 */ -071@InterfaceAudience.Private -072class AsyncScanSingleRegionRpcRetryingCaller { -073 -074 private static final Log LOG = LogFactory.getLog(AsyncScanSingleRegionRpcRetryingCaller.class); -075 -076 private final HashedWheelTimer retryTimer; -077 -078 private final Scan scan; -079 -080 private final ScanMetrics scanMetrics; -081 -082 private final long scannerId; -083 -084 private final ScanResultCache resultCache; -085 -086 private final RawScanResultConsumer consumer; -087 -088 private final ClientService.Interface stub; -089 -090 private final HRegionLocation loc; -091 -092 private final boolean regionServerRemote; -093 -094 private final long scannerLeaseTimeoutPeriodNs; -095 -096 private final long pauseNs; -097 -098 private final int maxAttempts; -099 -100 private final long scanTimeoutNs; -101 -102 private final long rpcTimeoutNs; -103 -104 private final int startLogErrorsCnt; -105 -106 private final Runnable completeWhenNoMoreResultsInRegion; -107 -108 private final CompletableFuture future; -109 -110 private final HBaseRpcController controller; -111 -112 private byte[] nextStartRowWhenError; -113 -114 private boolean includeNextStartRowWhenError; -115 -116 private long nextCallStartNs; -117 -118 private int tries; -119 -120 private final List exceptions; -121 -122 private long nextCallSeq = -1L; -123 -124 private enum ScanControllerState { -125INITIALIZED, SUSPENDED, TERMINATED, DESTROYED -126 } -127 -128 // Since suspend and terminate should only be called within onNext or onHeartbeat(see the comments -129 // of RawScanResultConsumer.onNext and onHeartbeat), we need to add some check to prevent invalid -130 // usage. We use two things to prevent invalid usage: -131 // 1. Record the thread that construct the ScanControllerImpl instance. We will throw an -132 // IllegalStateException if the caller thread is not this thread. -133 // 2. The ControllerState. The initial state is INITIALIZED, if you call suspend, the state will -134 // be transformed to SUSPENDED, and if you call terminate, the state will be transformed to -135 // TERMINATED. And when we are back from onNext or onHeartbeat in the onComplete method, we will -136 // call destroy to get the current state and set the state t


[43/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html 
b/apidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
index f95..1198ee7 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
@@ -108,718 +108,729 @@
 100  public static final String 
NEW_VERSION_BEHAVIOR = ColumnFamilyDescriptorBuilder.NEW_VERSION_BEHAVIOR;
 101  public static final boolean 
DEFAULT_NEW_VERSION_BEHAVIOR = 
ColumnFamilyDescriptorBuilder.DEFAULT_NEW_VERSION_BEHAVIOR;
 102  protected final 
ModifyableColumnFamilyDescriptor delegatee;
-103  /**
-104   * Construct a column descriptor 
specifying only the family name
-105   * The other attributes are 
defaulted.
-106   *
-107   * @param familyName Column family 
name. Must be 'printable' -- digit or
-108   * letter -- and may not contain a 
:
-109   * @deprecated use {@link 
ColumnFamilyDescriptorBuilder#of(String)}
-110   */
-111  public HColumnDescriptor(final String 
familyName) {
-112this(Bytes.toBytes(familyName));
-113  }
-114
-115  /**
-116   * Construct a column descriptor 
specifying only the family name
-117   * The other attributes are 
defaulted.
-118   *
-119   * @param familyName Column family 
name. Must be 'printable' -- digit or
-120   * letter -- and may not contain a 
:
-121   * @deprecated use {@link 
ColumnFamilyDescriptorBuilder#of(byte[])}
-122   */
-123  public HColumnDescriptor(final byte [] 
familyName) {
-124this(new 
ModifyableColumnFamilyDescriptor(familyName));
-125  }
-126
-127  /**
-128   * Constructor.
-129   * Makes a deep copy of the supplied 
descriptor.
-130   * Can make a modifiable descriptor 
from an UnmodifyableHColumnDescriptor.
-131   * @param desc The descriptor.
-132   * @deprecated use {@link 
ColumnFamilyDescriptorBuilder#copy(ColumnFamilyDescriptor)}
-133   */
-134  public 
HColumnDescriptor(HColumnDescriptor desc) {
-135this(desc, true);
-136  }
-137
-138  protected 
HColumnDescriptor(HColumnDescriptor desc, boolean deepClone) {
-139this(deepClone ? new 
ModifyableColumnFamilyDescriptor(desc)
-140: desc.delegatee);
-141  }
-142
-143  protected 
HColumnDescriptor(ModifyableColumnFamilyDescriptor delegate) {
-144this.delegatee = delegate;
-145  }
-146
-147  /**
-148   * @param b Family name.
-149   * @return b
-150   * @throws IllegalArgumentException If 
not null and not a legitimate family
-151   * name: i.e. 'printable' and ends in a 
':' (Null passes are allowed because
-152   * b can be 
null when deserializing).  Cannot start with a '.'
-153   * either. Also Family can not be an 
empty value or equal "recovered.edits".
-154   * @deprecated Use {@link 
ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])}.
-155   */
-156  @Deprecated
-157  public static byte [] 
isLegalFamilyName(final byte [] b) {
-158return 
ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(b);
-159  }
-160
-161  /**
-162   * @return Name of this column family
-163   */
-164  @Override
-165  public byte [] getName() {
-166return delegatee.getName();
-167  }
-168
-169  /**
-170   * @return The name string of this 
column family
-171   */
-172  @Override
-173  public String getNameAsString() {
-174return delegatee.getNameAsString();
-175  }
-176
-177  /**
-178   * @param key The key.
-179   * @return The value.
-180   */
-181  @Override
-182  public byte[] getValue(byte[] key) {
-183return delegatee.getValue(key);
-184  }
-185
-186  /**
-187   * @param key The key.
-188   * @return The value as a string.
-189   */
-190  public String getValue(String key) {
-191byte[] value = 
getValue(Bytes.toBytes(key));
-192return value == null ? null : 
Bytes.toString(value);
-193  }
-194
-195  @Override
-196  public Map 
getValues() {
-197return delegatee.getValues();
-198  }
-199
-200  /**
-201   * @param key The key.
-202   * @param value The value.
-203   * @return this (for chained 
invocation)
-204   */
-205  public HColumnDescriptor 
setValue(byte[] key, byte[] value) {
-206
getDelegateeForModification().setValue(key, value);
-207return this;
-208  }
-209
-210  /**
-211   * @param key Key whose key and value 
we're to remove from HCD parameters.
-212   */
-213  public HColumnDescriptor remove(final 
byte [] key) {
-214
getDelegateeForModification().removeValue(new Bytes(key));
-215return this;
-216  }
-217
-218  /**
-219   * @param key The key.
-220   * @param value The value.
-221   * @return this (for chained 
invocation)
-222   */
-223  public HColumnDescriptor 
setValue(String key, String value) {
-224
getDelegateeForModification().setValue(key, value);
-225return this;
-226  }
-227
-228  /

[16/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
index f95..1198ee7 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
@@ -108,718 +108,729 @@
 100  public static final String 
NEW_VERSION_BEHAVIOR = ColumnFamilyDescriptorBuilder.NEW_VERSION_BEHAVIOR;
 101  public static final boolean 
DEFAULT_NEW_VERSION_BEHAVIOR = 
ColumnFamilyDescriptorBuilder.DEFAULT_NEW_VERSION_BEHAVIOR;
 102  protected final 
ModifyableColumnFamilyDescriptor delegatee;
-103  /**
-104   * Construct a column descriptor 
specifying only the family name
-105   * The other attributes are 
defaulted.
-106   *
-107   * @param familyName Column family 
name. Must be 'printable' -- digit or
-108   * letter -- and may not contain a 
:
-109   * @deprecated use {@link 
ColumnFamilyDescriptorBuilder#of(String)}
-110   */
-111  public HColumnDescriptor(final String 
familyName) {
-112this(Bytes.toBytes(familyName));
-113  }
-114
-115  /**
-116   * Construct a column descriptor 
specifying only the family name
-117   * The other attributes are 
defaulted.
-118   *
-119   * @param familyName Column family 
name. Must be 'printable' -- digit or
-120   * letter -- and may not contain a 
:
-121   * @deprecated use {@link 
ColumnFamilyDescriptorBuilder#of(byte[])}
-122   */
-123  public HColumnDescriptor(final byte [] 
familyName) {
-124this(new 
ModifyableColumnFamilyDescriptor(familyName));
-125  }
-126
-127  /**
-128   * Constructor.
-129   * Makes a deep copy of the supplied 
descriptor.
-130   * Can make a modifiable descriptor 
from an UnmodifyableHColumnDescriptor.
-131   * @param desc The descriptor.
-132   * @deprecated use {@link 
ColumnFamilyDescriptorBuilder#copy(ColumnFamilyDescriptor)}
-133   */
-134  public 
HColumnDescriptor(HColumnDescriptor desc) {
-135this(desc, true);
-136  }
-137
-138  protected 
HColumnDescriptor(HColumnDescriptor desc, boolean deepClone) {
-139this(deepClone ? new 
ModifyableColumnFamilyDescriptor(desc)
-140: desc.delegatee);
-141  }
-142
-143  protected 
HColumnDescriptor(ModifyableColumnFamilyDescriptor delegate) {
-144this.delegatee = delegate;
-145  }
-146
-147  /**
-148   * @param b Family name.
-149   * @return b
-150   * @throws IllegalArgumentException If 
not null and not a legitimate family
-151   * name: i.e. 'printable' and ends in a 
':' (Null passes are allowed because
-152   * b can be 
null when deserializing).  Cannot start with a '.'
-153   * either. Also Family can not be an 
empty value or equal "recovered.edits".
-154   * @deprecated Use {@link 
ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])}.
-155   */
-156  @Deprecated
-157  public static byte [] 
isLegalFamilyName(final byte [] b) {
-158return 
ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(b);
-159  }
-160
-161  /**
-162   * @return Name of this column family
-163   */
-164  @Override
-165  public byte [] getName() {
-166return delegatee.getName();
-167  }
-168
-169  /**
-170   * @return The name string of this 
column family
-171   */
-172  @Override
-173  public String getNameAsString() {
-174return delegatee.getNameAsString();
-175  }
-176
-177  /**
-178   * @param key The key.
-179   * @return The value.
-180   */
-181  @Override
-182  public byte[] getValue(byte[] key) {
-183return delegatee.getValue(key);
-184  }
-185
-186  /**
-187   * @param key The key.
-188   * @return The value as a string.
-189   */
-190  public String getValue(String key) {
-191byte[] value = 
getValue(Bytes.toBytes(key));
-192return value == null ? null : 
Bytes.toString(value);
-193  }
-194
-195  @Override
-196  public Map 
getValues() {
-197return delegatee.getValues();
-198  }
-199
-200  /**
-201   * @param key The key.
-202   * @param value The value.
-203   * @return this (for chained 
invocation)
-204   */
-205  public HColumnDescriptor 
setValue(byte[] key, byte[] value) {
-206
getDelegateeForModification().setValue(key, value);
-207return this;
-208  }
-209
-210  /**
-211   * @param key Key whose key and value 
we're to remove from HCD parameters.
-212   */
-213  public HColumnDescriptor remove(final 
byte [] key) {
-214
getDelegateeForModification().removeValue(new Bytes(key));
-215return this;
-216  }
-217
-218  /**
-219   * @param key The key.
-220   * @param value The value.
-221   * @return this (for chained 
invocation)
-222   */
-223  public HColumnDescriptor 
setValue(String key, String value) {
-224
getDelegateeForModification().setValue(key, value);
-225return this;
-226 

[35/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
index 0f5f496..444c7b2 100644
--- a/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
@@ -513,7 +513,9 @@ implements  
 HColumnDescriptor(byte[] familyName)
 Deprecated. 
-use ColumnFamilyDescriptorBuilder.of(byte[])
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.of(byte[]).
 
 
 
@@ -527,7 +529,9 @@ implements  
 HColumnDescriptor(HColumnDescriptor desc)
 Deprecated. 
-use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor)
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor).
 
 
 
@@ -542,7 +546,9 @@ implements  
 HColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String familyName)
 Deprecated. 
-use ColumnFamilyDescriptorBuilder.of(String)
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.of(String).
 
 
 
@@ -1894,8 +1900,11 @@ implements 
 
 HColumnDescriptor
-public HColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String familyName)
-Deprecated. use ColumnFamilyDescriptorBuilder.of(String)
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+public HColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String familyName)
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.of(String).
 Construct a column descriptor specifying only the family 
name
  The other attributes are defaulted.
 
@@ -1911,8 +1920,11 @@ implements 
 
 HColumnDescriptor
-public HColumnDescriptor(byte[] familyName)
-Deprecated. use ColumnFamilyDescriptorBuilder.of(byte[])
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+public HColumnDescriptor(byte[] familyName)
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.of(byte[]).
 Construct a column descriptor specifying only the family 
name
  The other attributes are defaulted.
 
@@ -1928,8 +1940,11 @@ implements 
 
 HColumnDescriptor
-public HColumnDescriptor(HColumnDescriptor desc)
-Deprecated. use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor)
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+public HColumnDescriptor(HColumnDescriptor desc)
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18433";>HBASE-18433).
+ Use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor).
 Constructor.
  Makes a deep copy of the supplied descriptor.
  Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
@@ -1945,7 +1960,7 @@ implements 
 
 HColumnDescriptor
-protected HColumnDescriptor(HColumnDescriptor desc,
+protected HColumnDescriptor(HColumnDescriptor desc,
 boolean deepClone)
 Deprecated. 
 
@@ -1956,7 +1971,7 @@ implements 
 
 HColumnDescriptor
-protected HColumnDescriptor(ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor delegate)
+protected HColumnDescriptor(ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor delegate)
 Deprecated. 
 
 
@@ -1975,7 +1990,7 @@ implements 
 isLegalFamilyName
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
-public static byte[] isLegalFamilyName(byte[] b)
+public static byte[] isLegalFamilyName(byte[] b)
 Deprecated. Use ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(byte[]).
 
 Parameters:
@@ -1996,7 +2011,7 @@ public static byte[] 
 
 getName
-public byte[] getName()
+public byte[] getName()
 Deprecated. 
 
 Specified by:
@@ -2012,7 

[45/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index efbf566..591b827 100644
--- a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -188,7 +188,9 @@ public interface closeRegion(byte[] regionName,
http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional serverName)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 
 
 
@@ -1416,9 +1418,12 @@ public interface 
 
 closeRegion
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureBoolean> closeRegion(byte[] regionName,
-   http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional serverName)
-Deprecated. Since 2.0. Will be removed in 3.0. Use unassign(byte[],
 boolean) instead.
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureBoolean> closeRegion(byte[] regionName,
+   http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional serverName)
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 Close a region. For expert-admins Runs close on the 
regionserver. The master will not be
  informed of the close.
 
@@ -1436,7 +1441,7 @@ public interface 
 
 getOnlineRegions
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList> getOnlineRegions(ServerName serverName)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList> getOnlineRegions(ServerName serverName)
 Get all the online regions on a region server.
 
 
@@ -1446,7 +1451,7 @@ public interface 
 
 getTableRegions
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList> getTableRegions(TableName tableName)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList> getTableRegions(TableName tableName)
 Get the regions of a given table.
 
 
@@ -1456,7 +1461,7 @@ public interface 
 
 flush
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid> flush(TableName tableName)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid> flush(TableName tabl

[24/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/monitoring/TaskMonitor.TaskFilter.TaskType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/monitoring/TaskMonitor.TaskFilter.TaskType.html
 
b/devapidocs/org/apache/hadoop/hbase/monitoring/TaskMonitor.TaskFilter.TaskType.html
new file mode 100644
index 000..1ca0acd
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/monitoring/TaskMonitor.TaskFilter.TaskType.html
@@ -0,0 +1,448 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+TaskMonitor.TaskFilter.TaskType (Apache HBase 3.0.0-SNAPSHOT 
API)
+
+
+
+
+
+var methods = {"i0":9,"i1":10,"i2":9,"i3":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Enum Constants | 
+Field | 
+Method
+
+
+Detail: 
+Enum Constants | 
+Field | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.monitoring
+Enum 
TaskMonitor.TaskFilter.TaskType
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">java.lang.Enum
+
+
+org.apache.hadoop.hbase.monitoring.TaskMonitor.TaskFilter.TaskType
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable
+
+
+Enclosing interface:
+TaskMonitor.TaskFilter
+
+
+
+public static enum TaskMonitor.TaskFilter.TaskType
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum
+
+
+
+
+
+
+
+
+
+
+
+Enum Constant Summary
+
+Enum Constants 
+
+Enum Constant and Description
+
+
+ALL 
+
+
+GENERAL 
+
+
+HANDLER 
+
+
+OPERATION 
+
+
+RPC 
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+type 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Static Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+(package private) static TaskMonitor.TaskFilter.TaskType
+getTaskType(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String type) 
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+toString() 
+
+
+static TaskMonitor.TaskFilter.TaskType
+valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+Returns the enum constant of this type with the specified 
name.
+
+
+
+static TaskMonitor.TaskFilter.TaskType[]
+values()
+Returns an array containing the constants of this enum 
type, in
+the order they are declared.
+
+
+
+
+
+
+
+Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum
+http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#compareTo-E-";
 title="class or interface in java.lang">compareTo, http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/java

[46/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Admin.html 
b/apidocs/org/apache/hadoop/hbase/client/Admin.html
index 935bd33..eb53c97 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Admin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Admin.html
@@ -261,7 +261,9 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas
 closeRegion(byte[] regionname,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 
 
 
@@ -270,7 +272,9 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas
 closeRegion(ServerName sn,
HRegionInfo hri)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 
 
 
@@ -279,7 +283,9 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas
 closeRegion(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String regionname,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 
 
 
@@ -288,7 +294,9 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas
 closeRegionWithEncodedRegionName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String encodedRegionName,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use unassign(byte[],
 boolean).
 
 
 
@@ -1459,8 +1467,9 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas
 void
 splitRegion(byte[] regionName)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use
- splitRegionAsync(byte[],
 byte[]) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18229";>HBASE-18229).
+ Use splitRegionAsync(byte[],
 byte[]).
 
 
 
@@ -1469,8 +1478,9 @@ extends org.apache.hadoop.hbase.Abortable, http://docs.oracle.com/javas
 splitRegion(byte[] regionName,
byte[] splitPoint)
 Deprecated. 
-Since 2.0. Will be removed 
in 3.0. Use
- splitRegionAsync(byte[],
 byte[]) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18229";>HBASE-18229).
+ Use splitRegionAsync(byte[],
 byte[]).
 
 
 
@@ -2910,10 +2920,13 @@ void 
 
 closeRegion
-void closeRegion(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String regionname,
- http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
-  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
-Deprecated. Since 2.0. Will be removed in 3.0. Use unassign(byte[],
 boolean) instead.
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+void closeRegion(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String regionname,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String serverName)
+  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
+Depre

[50/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index 8dfc563..1fc6d59 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,16 +5,16 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20170815144527+00'00')
-/CreationDate (D:20170815144527+00'00')
+/ModDate (D:20170816144532+00'00')
+/CreationDate (D:20170816144532+00'00')
 >>
 endobj
 2 0 obj
 << /Type /Catalog
 /Pages 3 0 R
 /Names 26 0 R
-/Outlines 4310 0 R
-/PageLabels 4518 0 R
+/Outlines 4311 0 R
+/PageLabels 4519 0 R
 /PageMode /UseOutlines
 /OpenAction [7 0 R /FitH 842.89]
 /ViewerPreferences << /DisplayDocTitle true
@@ -24,7 +24,7 @@ endobj
 3 0 obj
 << /Type /Pages
 /Count 662
-/Kids [7 0 R 12 0 R 14 0 R 16 0 R 18 0 R 20 0 R 22 0 R 24 0 R 44 0 R 47 0 R 50 
0 R 54 0 R 63 0 R 66 0 R 69 0 R 71 0 R 76 0 R 80 0 R 83 0 R 89 0 R 91 0 R 94 0 
R 96 0 R 103 0 R 109 0 R 114 0 R 116 0 R 130 0 R 133 0 R 142 0 R 150 0 R 160 0 
R 169 0 R 180 0 R 184 0 R 186 0 R 190 0 R 201 0 R 208 0 R 217 0 R 225 0 R 231 0 
R 239 0 R 248 0 R 261 0 R 270 0 R 277 0 R 286 0 R 295 0 R 302 0 R 309 0 R 317 0 
R 323 0 R 331 0 R 338 0 R 347 0 R 356 0 R 367 0 R 378 0 R 386 0 R 393 0 R 401 0 
R 408 0 R 416 0 R 425 0 R 434 0 R 441 0 R 449 0 R 462 0 R 469 0 R 477 0 R 484 0 
R 493 0 R 501 0 R 508 0 R 513 0 R 518 0 R 523 0 R 539 0 R 549 0 R 554 0 R 568 0 
R 574 0 R 579 0 R 581 0 R 583 0 R 588 0 R 596 0 R 602 0 R 607 0 R 613 0 R 624 0 
R 637 0 R 656 0 R 671 0 R 682 0 R 684 0 R 686 0 R 694 0 R 706 0 R 715 0 R 725 0 
R 731 0 R 734 0 R 738 0 R 743 0 R 746 0 R 749 0 R 751 0 R 754 0 R 758 0 R 760 0 
R 765 0 R 769 0 R 774 0 R 779 0 R 782 0 R 788 0 R 790 0 R 794 0 R 803 0 R 805 0 
R 808 0 R 811 0 R 814 0 R 817 0 R 831 0 
 R 838 0 R 847 0 R 858 0 R 864 0 R 876 0 R 880 0 R 883 0 R 887 0 R 890 0 R 895 
0 R 904 0 R 912 0 R 916 0 R 920 0 R 925 0 R 929 0 R 931 0 R 946 0 R 957 0 R 963 
0 R 969 0 R 972 0 R 980 0 R 988 0 R 992 0 R 998 0 R 1003 0 R 1005 0 R 1007 0 R 
1009 0 R 1020 0 R 1028 0 R 1032 0 R 1039 0 R 1047 0 R 1055 0 R 1059 0 R 1065 0 
R 1070 0 R 1078 0 R 1083 0 R 1088 0 R 1090 0 R 1096 0 R 1102 0 R 1104 0 R  
0 R 1121 0 R 1125 0 R 1127 0 R 1131 0 R 1134 0 R 1139 0 R 1142 0 R 1154 0 R 
1158 0 R 1164 0 R 1171 0 R 1176 0 R 1180 0 R 1184 0 R 1186 0 R 1189 0 R 1192 0 
R 1195 0 R 1199 0 R 1203 0 R 1207 0 R 1212 0 R 1216 0 R 1219 0 R 1221 0 R 1233 
0 R 1236 0 R 1244 0 R 1253 0 R 1259 0 R 1263 0 R 1265 0 R 1275 0 R 1278 0 R 
1284 0 R 1292 0 R 1295 0 R 1302 0 R 1311 0 R 1313 0 R 1315 0 R 1325 0 R 1327 0 
R 1329 0 R 1332 0 R 1334 0 R 1336 0 R 1338 0 R 1340 0 R 1343 0 R 1347 0 R 1352 
0 R 1354 0 R 1356 0 R 1358 0 R 1363 0 R 1370 0 R 1375 0 R 1378 0 R 1380 0 R 
1383 0 R 1387 0 R 1389 0 R 1392 0 R 1394 0 R 1396 0 R 1399
  0 R 1404 0 R 1409 0 R 1417 0 R 1422 0 R 1436 0 R 1448 0 R 1451 0 R 1455 0 R 
1469 0 R 1478 0 R 1492 0 R 1498 0 R 1506 0 R 1520 0 R 1534 0 R 1546 0 R 1551 0 
R 1558 0 R 1569 0 R 1575 0 R 1581 0 R 1589 0 R 1592 0 R 1601 0 R 1608 0 R 1612 
0 R 1625 0 R 1627 0 R 1633 0 R 1639 0 R 1642 0 R 1650 0 R 1659 0 R 1663 0 R 
1665 0 R 1667 0 R 1680 0 R 1686 0 R 1694 0 R 1701 0 R 1715 0 R 1720 0 R 1729 0 
R 1737 0 R 1743 0 R 1750 0 R 1754 0 R 1757 0 R 1759 0 R 1765 0 R 1771 0 R 1777 
0 R 1781 0 R 1789 0 R 1794 0 R 1800 0 R 1805 0 R 1807 0 R 1816 0 R 1823 0 R 
1829 0 R 1834 0 R 1838 0 R 1841 0 R 1846 0 R 1851 0 R 1858 0 R 1860 0 R 1862 0 
R 1865 0 R 1873 0 R 1876 0 R 1883 0 R 1893 0 R 1896 0 R 1901 0 R 1903 0 R 1908 
0 R 1911 0 R 1913 0 R 1918 0 R 1928 0 R 1930 0 R 1932 0 R 1934 0 R 1936 0 R 
1939 0 R 1941 0 R 1943 0 R 1946 0 R 1948 0 R 1950 0 R 1954 0 R 1958 0 R 1967 0 
R 1969 0 R 1971 0 R 1977 0 R 1979 0 R 1984 0 R 1986 0 R 1988 0 R 1995 0 R 2000 
0 R 2004 0 R 2008 0 R 2012 0 R 2014 0 R 2016 0 R 2020 0 R 20
 23 0 R 2025 0 R 2027 0 R 2031 0 R 2033 0 R 2036 0 R 2038 0 R 2040 0 R 2042 0 R 
2049 0 R 2052 0 R 2057 0 R 2059 0 R 2061 0 R 2063 0 R 2065 0 R 2073 0 R 2084 0 
R 2098 0 R 2109 0 R 2113 0 R 2118 0 R 2122 0 R 2125 0 R 2130 0 R 2136 0 R 2138 
0 R 2142 0 R 2144 0 R 2146 0 R 2148 0 R 2152 0 R 2154 0 R 2167 0 R 2170 0 R 
2178 0 R 2184 0 R 2196 0 R 2210 0 R 2224 0 R 2241 0 R 2245 0 R 2247 0 R 2251 0 
R 2269 0 R 2275 0 R 2287 0 R 2291 0 R 2295 0 R 2304 0 R 2316 0 R 2322 0 R 2332 
0 R 2345 0 R 2364 0 R 2373 0 R 2376 0 R 2385 0 R 2403 0 R 2410 0 R 2413 0 R 
2418 0 R 2422 0 R 2425 0 R 2434 0 R 2443 0 R 2446 0 R 2448 0 R 2452 0 R 2467 0 
R 2475 0 R 2480 0 R 2484 0 R 2488 0 R 2490 0 R 2492 0 R 2494 0 R 2499 0 R 2512 
0 R 2522 0 R 2531 0 R 2540 0 R 2546 0 R 2557 0 R 2564 0 R 2570 0 R 2572 0 R 
2582 0 R 2590 0 R 2600 0 R 2604 0 R 2615 0 R 2619 0 R 2629 0 R 2637 0 R 2645 0 
R 2651 0 R 2655 0 R 2659 0 R 2663 0 R 2665 0 R 2671 0 R 2675 0 R 2679 0 R 2685 
0 R 2691 0 R 2694 0 R 2700 0 R 2704 0 R 2

[25/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
index 06ca604..61c9f2a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class BaseLoadBalancer.DefaultRackManager
+private static class BaseLoadBalancer.DefaultRackManager
 extends RackManager
 
 
@@ -214,7 +214,7 @@ extends 
 
 DefaultRackManager
-private DefaultRackManager()
+private DefaultRackManager()
 
 
 
@@ -231,7 +231,7 @@ extends 
 
 getRack
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getRack(ServerName server)
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getRack(ServerName server)
 Description copied from 
class: RackManager
 Get the name of the rack containing a server, according to 
the DNS to
  switch mapping.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
index b42e509..5bcefd1 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public abstract class BaseLoadBalancer
+public abstract class BaseLoadBalancer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements LoadBalancer
 The base class for load balancers. It provides the the 
functions used to by
@@ -499,7 +499,7 @@ implements 
 
 MIN_SERVER_BALANCE
-protected static final int MIN_SERVER_BALANCE
+protected static final int MIN_SERVER_BALANCE
 
 See Also:
 Constant
 Field Values
@@ -512,7 +512,7 @@ implements 
 
 stopped
-private volatile boolean stopped
+private volatile boolean stopped
 
 
 
@@ -521,7 +521,7 @@ implements 
 
 EMPTY_REGION_LIST
-private static final http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List EMPTY_REGION_LIST
+private static final http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List EMPTY_REGION_LIST
 
 
 
@@ -530,7 +530,7 @@ implements 
 
 IDLE_SERVER_PREDICATOR
-static final http://docs.oracle.com/javase/8/docs/api/java/util/function/Predicate.html?is-external=true";
 title="class or interface in java.util.function">Predicate IDLE_SERVER_PREDICATOR
+static final http://docs.oracle.com/javase/8/docs/api/java/util/function/Predicate.html?is-external=true";
 title="class or interface in java.util.function">Predicate IDLE_SERVER_PREDICATOR
 
 
 
@@ -539,7 +539,7 @@ implements 
 
 regionFinder
-protected final RegionLocationFinder regionFinder
+protected final RegionLocationFinder regionFinder
 
 
 
@@ -548,7 +548,7 @@ implements 
 
 slop
-protected float slop
+protected float slop
 
 
 
@@ -557,7 +557,7 @@ implements 
 
 overallSlop
-protected float overallSlop
+protected float overallSlop
 
 
 
@@ -566,7 +566,7 @@ implements 
 
 config
-protected org.apache.hadoop.conf.Configuration config
+protected org.apache.hadoop.conf.Configuration config
 
 
 
@@ -575,7 +575,7 @@ implements 
 
 rackManager
-protected RackManager rackManager
+protected RackManager rackManager
 
 
 
@@ -584,7 +584,7 @@ implements 
 
 RANDOM
-private static final http://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true";
 title="class or interface in java.util">Random RANDOM
+private static final http://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true";
 title="class or interface in java.util">Random RANDOM
 
 
 
@@ -593,7 +593,7 @@ implements 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -602,7 +602,7 @@ implements 
 
 DEFAULT_TABLES_ON_MASTER
-private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] D

[13/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 2ab3342..d459974 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -304,824 +304,827 @@
 296   * @param regionName region name to 
close
 297   * @param serverName Deprecated. Not 
used anymore after deprecation.
 298   * @return Deprecated. Always returns 
true now.
-299   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-300   */
-301  CompletableFuture 
closeRegion(byte[] regionName, Optional serverName);
-302
-303  /**
-304   * Get all the online regions on a 
region server.
-305   */
-306  
CompletableFuture> getOnlineRegions(ServerName 
serverName);
-307
-308  /**
-309   * Get the regions of a given table.
-310   */
-311  
CompletableFuture> getTableRegions(TableName 
tableName);
-312
-313  /**
-314   * Flush a table.
-315   * @param tableName table to flush
-316   */
-317  CompletableFuture 
flush(TableName tableName);
-318
-319  /**
-320   * Flush an individual region.
-321   * @param regionName region to flush
-322   */
-323  CompletableFuture 
flushRegion(byte[] regionName);
-324
-325  /**
-326   * Compact a table. When the returned 
CompletableFuture is done, it only means the compact request
-327   * was sent to HBase and may need some 
time to finish the compact operation.
-328   * @param tableName table to compact
-329   */
-330  default CompletableFuture 
compact(TableName tableName) {
-331return compact(tableName, 
Optional.empty());
-332  }
-333
-334  /**
-335   * Compact a column family within a 
table. When the returned CompletableFuture is done, it only
-336   * means the compact request was sent 
to HBase and may need some time to finish the compact
-337   * operation.
-338   * @param tableName table to compact
-339   * @param columnFamily column family 
within a table. If not present, compact the table's all
-340   *  column families.
-341   */
-342  CompletableFuture 
compact(TableName tableName, Optional columnFamily);
-343
-344  /**
-345   * Compact an individual region. When 
the returned CompletableFuture is done, it only means the
-346   * compact request was sent to HBase 
and may need some time to finish the compact operation.
-347   * @param regionName region to 
compact
-348   */
-349  default CompletableFuture 
compactRegion(byte[] regionName) {
-350return compactRegion(regionName, 
Optional.empty());
-351  }
-352
-353  /**
-354   * Compact a column family within a 
region. When the returned CompletableFuture is done, it only
-355   * means the compact request was sent 
to HBase and may need some time to finish the compact
-356   * operation.
-357   * @param regionName region to 
compact
-358   * @param columnFamily column family 
within a region. If not present, compact the region's all
-359   *  column families.
-360   */
-361  CompletableFuture 
compactRegion(byte[] regionName, Optional columnFamily);
-362
-363  /**
-364   * Major compact a table. When the 
returned CompletableFuture is done, it only means the compact
-365   * request was sent to HBase and may 
need some time to finish the compact operation.
-366   * @param tableName table to major 
compact
-367   */
-368  default CompletableFuture 
majorCompact(TableName tableName) {
-369return majorCompact(tableName, 
Optional.empty());
-370  }
-371
-372  /**
-373   * Major compact a column family within 
a table. When the returned CompletableFuture is done, it
-374   * only means the compact request was 
sent to HBase and may need some time to finish the compact
-375   * operation.
-376   * @param tableName table to major 
compact
-377   * @param columnFamily column family 
within a table. If not present, major compact the table's all
-378   *  column families.
-379   */
-380  CompletableFuture 
majorCompact(TableName tableName, Optional columnFamily);
-381
-382  /**
-383   * Major compact a region. When the 
returned CompletableFuture is done, it only means the compact
-384   * request was sent to HBase and may 
need some time to finish the compact operation.
-385   * @param regionName region to major 
compact
-386   */
-387  default CompletableFuture 
majorCompactRegion(byte[] regionName) {
-388return majorCompactRegion(regionName, 
Optional.empty());
-389  }
-390
-391  /**
-392   * Major compact a column family within 
region. When the returned CompletableFuture is done, it
-393   * only means the compact request was 
sent to HBase and may 

[18/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
index 63d1c91..7a10320 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -375,6 +375,12 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+private static void
+removePermissionRecord(org.apache.hadoop.conf.Configuration conf,
+  UserPermission userPerm,
+  Table t) 
+
+
 (package private) static void
 removeTablePermissions(org.apache.hadoop.conf.Configuration conf,
   TableName tableName,
@@ -383,7 +389,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Remove specified table column from the acl table.
 
 
-
+
 (package private) static void
 removeTablePermissions(org.apache.hadoop.conf.Configuration conf,
   TableName tableName,
@@ -391,14 +397,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Remove specified table from the _acl_ table.
 
 
-
+
 private static void
 removeTablePermissions(TableName tableName,
   byte[] column,
   Table table,
   boolean closeTable) 
 
-
+
 (package private) static void
 removeUserPermission(org.apache.hadoop.conf.Configuration conf,
 UserPermission userPerm,
@@ -407,15 +413,15 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
  lists.
 
 
-
+
 static byte[]
 toNamespaceEntry(byte[] namespace) 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toNamespaceEntry(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String namespace) 
 
-
+
 (package private) static byte[]
 userPermissionKey(UserPermission userPerm)
 Build qualifier key from user permission:
@@ -424,11 +430,11 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
   username,family,qualifier
 
 
-
+
 (package private) static byte[]
 userPermissionRowKey(UserPermission userPerm) 
 
-
+
 static byte[]
 writePermissionsAsBytes(org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimapString,TablePermission> perms,
org.apache.hadoop.conf.Configuration conf)
@@ -557,7 +563,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LIST_CODE
-private static final int LIST_CODE
+private static final int LIST_CODE
 
 See Also:
 Constant
 Field Values
@@ -570,7 +576,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 WRITABLE_CODE
-private static final int WRITABLE_CODE
+private static final int WRITABLE_CODE
 
 See Also:
 Constant
 Field Values
@@ -583,7 +589,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 WRITABLE_NOT_ENCODED
-private static final int WRITABLE_NOT_ENCODED
+private static final int WRITABLE_NOT_ENCODED
 
 See Also:
 Constant
 Field Values
@@ -697,13 +703,29 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+
+
+
+
+
+removePermissionRecord
+private static void removePermissionRecord(org.apache.hadoop.conf.Configuration conf,
+   UserPermission userPerm,
+   Table t)
+throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
+
+Throws:
+http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOExcep

[10/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
index 13bde46..9aa8673 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
@@ -37,18 +37,18 @@
 029
 030import java.io.IOException;
 031import java.util.Arrays;
-032import java.util.HashMap;
-033import java.util.HashSet;
-034import java.util.Iterator;
+032import java.util.HashSet;
+033import java.util.Iterator;
+034import java.util.LinkedHashMap;
 035import java.util.List;
 036import java.util.Map;
-037import java.util.Set;
-038import 
java.util.concurrent.CompletableFuture;
-039import 
java.util.concurrent.ConcurrentHashMap;
-040import 
java.util.concurrent.ConcurrentMap;
-041import 
java.util.concurrent.ConcurrentNavigableMap;
-042import 
java.util.concurrent.ConcurrentSkipListMap;
-043import 
java.util.concurrent.ThreadLocalRandom;
+037import java.util.Optional;
+038import java.util.Set;
+039import 
java.util.concurrent.CompletableFuture;
+040import 
java.util.concurrent.ConcurrentHashMap;
+041import 
java.util.concurrent.ConcurrentMap;
+042import 
java.util.concurrent.ConcurrentNavigableMap;
+043import 
java.util.concurrent.ConcurrentSkipListMap;
 044
 045import org.apache.commons.logging.Log;
 046import 
org.apache.commons.logging.LogFactory;
@@ -115,7 +115,7 @@
 107public final Set 
pendingRequests = new HashSet<>();
 108
 109public final Map> allRequests =
-110new HashMap<>();
+110new LinkedHashMap<>();
 111
 112public boolean hasQuota(int max) {
 113  return pendingRequests.size() < 
max;
@@ -128,353 +128,358 @@
 120public void send(LocateRequest req) 
{
 121  pendingRequests.add(req);
 122}
-123  }
-124
-125  
AsyncNonMetaRegionLocator(AsyncConnectionImpl conn) {
-126this.conn = conn;
-127
this.maxConcurrentLocateRequestPerTable = conn.getConfiguration().getInt(
-128  
MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE, 
DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE);
-129  }
-130
-131  private TableCache 
getTableCache(TableName tableName) {
-132return computeIfAbsent(cache, 
tableName, TableCache::new);
-133  }
-134
-135  private void 
removeFromCache(HRegionLocation loc) {
-136TableCache tableCache = 
cache.get(loc.getRegionInfo().getTable());
-137if (tableCache == null) {
-138  return;
-139}
-140
tableCache.cache.computeIfPresent(loc.getRegionInfo().getStartKey(), (k, 
oldLoc) -> {
-141  if (oldLoc.getSeqNum() > 
loc.getSeqNum() ||
-142  
!oldLoc.getServerName().equals(loc.getServerName())) {
-143return oldLoc;
-144  }
-145  return null;
-146});
-147  }
-148
-149  // return whether we add this loc to 
cache
-150  private boolean addToCache(TableCache 
tableCache, HRegionLocation loc) {
-151if (LOG.isTraceEnabled()) {
-152  LOG.trace("Try adding " + loc + " 
to cache");
-153}
-154byte[] startKey = 
loc.getRegionInfo().getStartKey();
-155HRegionLocation oldLoc = 
tableCache.cache.putIfAbsent(startKey, loc);
-156if (oldLoc == null) {
-157  return true;
-158}
-159if (oldLoc.getSeqNum() > 
loc.getSeqNum() ||
-160
oldLoc.getServerName().equals(loc.getServerName())) {
-161  if (LOG.isTraceEnabled()) {
-162LOG.trace("Will not add " + loc + 
" to cache because the old value " + oldLoc +
-163" is newer than us or has the 
same server name");
+123
+124public Optional 
getCandidate() {
+125  return 
allRequests.keySet().stream().filter(r -> !isPending(r)).findFirst();
+126}
+127
+128public void 
clearCompletedRequests(Optional location) {
+129  for 
(Iterator>> iter = allRequests
+130  .entrySet().iterator(); 
iter.hasNext();) {
+131Map.Entry> entry = iter.next();
+132if (tryComplete(entry.getKey(), 
entry.getValue(), location)) {
+133  iter.remove();
+134}
+135  }
+136}
+137
+138private boolean 
tryComplete(LocateRequest req, CompletableFuture 
future,
+139Optional 
location) {
+140  if (future.isDone()) {
+141return true;
+142  }
+143  if (!location.isPresent()) {
+144return false;
+145  }
+146  HRegionLocation loc = 
location.get();
+147  boolean completed;
+148  if 
(req.locateType.equals(RegionLocateType.BEFORE)) {
+149// for locating the row

[42/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html 
b/apidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
index 19ef87b..a4266b6 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -52,7 +52,7 @@
 044 * if the table is read only, the maximum 
size of the memstore,
 045 * when the region split should occur, 
coprocessors associated with it etc...
 046 * @deprecated As of release 2.0.0, this 
will be removed in HBase 3.0.0.
-047 * use {@link 
TableDescriptorBuilder} to build {@link HTableDescriptor}.
+047 * Use {@link 
TableDescriptorBuilder} to build {@link HTableDescriptor}.
 048 */
 049@Deprecated
 050@InterfaceAudience.Public
@@ -610,282 +610,286 @@
 602   * HTableDescriptor contains mapping of 
family name to HColumnDescriptors.
 603   * This returns all the keys of the 
family map which represents the column
 604   * family names of the table.
-605   * @return Immutable sorted set of the 
keys of the families.
-606   * @deprecated Use {@link 
#getColumnFamilyNames()}.
-607   */
-608  public Set 
getFamiliesKeys() {
-609return 
delegatee.getColumnFamilyNames();
-610  }
-611
-612  /**
-613   * Returns the count of the column 
families of the table.
-614   *
-615   * @return Count of column families of 
the table
-616   */
-617  @Override
-618  public int getColumnFamilyCount() {
-619return 
delegatee.getColumnFamilyCount();
-620  }
-621
-622  /**
-623   * Returns an array all the {@link 
HColumnDescriptor} of the column families
-624   * of the table.
-625   *
-626   * @return Array of all the 
HColumnDescriptors of the current table
-627   *
-628   * @see #getFamilies()
-629   */
-630  @Deprecated
-631  @Override
-632  public HColumnDescriptor[] 
getColumnFamilies() {
-633return 
Stream.of(delegatee.getColumnFamilies())
-634
.map(this::toHColumnDescriptor)
-635.toArray(size -> new 
HColumnDescriptor[size]);
-636  }
-637
-638  /**
-639   * Returns the HColumnDescriptor for a 
specific column family with name as
-640   * specified by the parameter column.
-641   * @param column Column family name
-642   * @return Column descriptor for the 
passed family name or the family on
-643   * passed in column.
-644   * @deprecated Use {@link 
#getColumnFamily(byte[])}.
-645   */
-646  @Deprecated
-647  public HColumnDescriptor 
getFamily(final byte[] column) {
-648return 
toHColumnDescriptor(delegatee.getColumnFamily(column));
-649  }
-650
-651
-652  /**
-653   * Removes the HColumnDescriptor with 
name specified by the parameter column
-654   * from the table descriptor
-655   *
-656   * @param column Name of the column 
family to be removed.
-657   * @return Column descriptor for the 
passed family name or the family on
-658   * passed in column.
-659   */
-660  public HColumnDescriptor 
removeFamily(final byte [] column) {
-661return 
toHColumnDescriptor(getDelegateeForModification().removeColumnFamily(column));
-662  }
-663
-664  /**
-665   * Return a HColumnDescriptor for user 
to keep the compatibility as much as possible.
-666   * @param desc read-only 
ColumnFamilyDescriptor
-667   * @return The older implementation of 
ColumnFamilyDescriptor
-668   */
-669  protected HColumnDescriptor 
toHColumnDescriptor(ColumnFamilyDescriptor desc) {
-670if (desc == null) {
-671  return null;
-672} else if (desc instanceof 
ModifyableColumnFamilyDescriptor) {
-673  return new 
HColumnDescriptor((ModifyableColumnFamilyDescriptor) desc);
-674} else if (desc instanceof 
HColumnDescriptor) {
-675  return (HColumnDescriptor) desc;
-676} else {
-677  return new HColumnDescriptor(new 
ModifyableColumnFamilyDescriptor(desc));
-678}
-679  }
-680
-681  /**
-682   * Add a table coprocessor to this 
table. The coprocessor
-683   * type must be 
org.apache.hadoop.hbase.coprocessor.RegionObserver
-684   * or Endpoint.
-685   * It won't check if the class can be 
loaded or not.
-686   * Whether a coprocessor is loadable or 
not will be determined when
-687   * a region is opened.
-688   * @param className Full class name.
-689   * @throws IOException
-690   */
-691  public HTableDescriptor 
addCoprocessor(String className) throws IOException {
-692
getDelegateeForModification().addCoprocessor(className);
-693return this;
-694  }
-695
-696  /**
-697   * Add a table coprocessor to this 
table. The coprocessor
-698   * type must be 
org.apache.hadoop.hbase.coprocessor.RegionObserver
-699   * or Endpoint.
-700   * It won't check if the class can be 
loaded or not.
-701   * Whether a coprocessor is loadable or 
not will be determined when
-702   * a region is opened.
-703   * @param jarFilePath Pat

[33/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
index bcfd0f1..3e2933d 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
@@ -855,33 +855,27 @@ service.
 RegionServerCallable.setLocation(HRegionLocation location) 
 
 
-private boolean
-AsyncNonMetaRegionLocator.tryComplete(AsyncNonMetaRegionLocator.LocateRequest req,
-   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture future,
-   HRegionLocation loc) 
-
-
 private static void
 ZKAsyncRegistry.tryComplete(org.apache.commons.lang.mutable.MutableInt remaining,
HRegionLocation[] locs,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture future) 
 
-
+
 (package private) void
 AsyncMetaRegionLocator.updateCachedLocation(HRegionLocation loc,
 http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in 
java.lang">Throwable exception) 
 
-
+
 (package private) void
 AsyncNonMetaRegionLocator.updateCachedLocation(HRegionLocation loc,
 http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in 
java.lang">Throwable exception) 
 
-
+
 (package private) void
 AsyncRegionLocator.updateCachedLocation(HRegionLocation loc,
 http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in 
java.lang">Throwable exception) 
 
-
+
 (package private) static void
 AsyncRegionLocator.updateCachedLocation(HRegionLocation loc,
 http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable exception,
@@ -889,13 +883,13 @@ service.
 http://docs.oracle.com/javase/8/docs/api/java/util/function/Consumer.html?is-external=true";
 title="class or interface in java.util.function">Consumer addToCache,
 http://docs.oracle.com/javase/8/docs/api/java/util/function/Consumer.html?is-external=true";
 title="class or interface in java.util.function">Consumer removeFromCache) 
 
-
+
 static  T
 ResultStatsUtil.updateStats(T r,
ServerStatisticTracker stats,
HRegionLocation regionLocation) 
 
-
+
 private static  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
 RawAsyncTableImpl.voidMutate(HBaseRpcController controller,
   HRegionLocation loc,
@@ -913,10 +907,14 @@ service.
 
 
 
+void
+AsyncNonMetaRegionLocator.TableCache.clearCompletedRequests(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional location) 
+
+
 private void
 HTableMultiplexer.HTableMultiplexerStatus.initialize(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map serverToFlushWorkerMap) 
 
-
+
 private  void
 RawAsyncTableImpl.onLocateComplete(http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true";
 title="class or interface in 
java.util.function">Function stubMaker,
 RawAsyncTable.CoprocessorCallable callable,
@@ -929,11 +927,17 @@ service.
 HRegionLocation loc,
 http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in 
java.lang">Throwable error) 
 
+
+private boolean
+AsyncNonMetaRegionLocator.TableCache.tryComplete(AsyncNonMetaRegionLocator.LocateRequest req,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture future,
+   http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional location) 
+
 
 private boole

[09/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerImpl.html
index e7cd47c..d61d346 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanControllerImpl.html
@@ -43,132 +43,132 @@
 035
 036import java.io.IOException;
 037import java.util.ArrayList;
-038import java.util.List;
-039import java.util.Optional;
-040import 
java.util.concurrent.CompletableFuture;
-041import java.util.concurrent.TimeUnit;
-042
-043import org.apache.commons.logging.Log;
-044import 
org.apache.commons.logging.LogFactory;
-045import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.NotServingRegionException;
-048import 
org.apache.hadoop.hbase.UnknownScannerException;
-049import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-050import 
org.apache.hadoop.hbase.client.RawScanResultConsumer.ScanResumer;
-051import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-052import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-053import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-055import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
-063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-064
-065/**
-066 * Retry caller for scanning a region.
-067 * 

-068 * We will modify the {@link Scan} object passed in directly. The upper layer should store the -069 * reference of this object and use it to open new single region scanners. -070 */ -071@InterfaceAudience.Private -072class AsyncScanSingleRegionRpcRetryingCaller { -073 -074 private static final Log LOG = LogFactory.getLog(AsyncScanSingleRegionRpcRetryingCaller.class); -075 -076 private final HashedWheelTimer retryTimer; -077 -078 private final Scan scan; -079 -080 private final ScanMetrics scanMetrics; -081 -082 private final long scannerId; -083 -084 private final ScanResultCache resultCache; -085 -086 private final RawScanResultConsumer consumer; -087 -088 private final ClientService.Interface stub; -089 -090 private final HRegionLocation loc; -091 -092 private final boolean regionServerRemote; -093 -094 private final long scannerLeaseTimeoutPeriodNs; -095 -096 private final long pauseNs; -097 -098 private final int maxAttempts; -099 -100 private final long scanTimeoutNs; -101 -102 private final long rpcTimeoutNs; -103 -104 private final int startLogErrorsCnt; -105 -106 private final Runnable completeWhenNoMoreResultsInRegion; -107 -108 private final CompletableFuture future; -109 -110 private final HBaseRpcController controller; -111 -112 private byte[] nextStartRowWhenError; -113 -114 private boolean includeNextStartRowWhenError; -115 -116 private long nextCallStartNs; -117 -118 private int tries; -119 -120 private final List exceptions; -121 -122 private long nextCallSeq = -1L; -123 -124 private enum ScanControllerState { -125INITIALIZED, SUSPENDED, TERMINATED, DESTROYED -126 } -127 -128 // Since suspend and terminate should only be called within onNext or onHeartbeat(see the comments -129 // of RawScanResultConsumer.onNext and onHeartbeat), we need to add some check to prevent invalid -130 // usage. We use two things to prevent invalid usage: -131 // 1. Record the thread that construct the ScanControllerImpl instance. We will throw an -132 // IllegalStateException if the caller thread is not this thread. -133 // 2. The ControllerState. The initial state is INITIALIZED, if you call suspend, the state will -134 // be transformed to SUSPENDED, and if you call terminate, the state will be transformed to -135 // TERMINATED. And when we are back from onNext or onHeartbeat in the


[28/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html
index ee25eee..960ba59 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html
@@ -227,7 +227,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString>> stagingFamilyPaths,
  boolean hasLoaded)
 Deprecated. 
-Use postBulkLoadHFile(ObserverContext,
 List, Map, boolean)
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-17123";>HBASE-17123).
+ Use postBulkLoadHFile(ObserverContext,
 List, Map, boolean).
 
 
 
@@ -3201,11 +3203,14 @@ default boolean 
 
 postBulkLoadHFile
-default boolean postBulkLoadHFile(ObserverContext ctx,
-  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString>> stagingFamilyPaths,
-  boolean hasLoaded)
-   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
-Deprecated. Use postBulkLoadHFile(ObserverContext,
 List, Map, boolean)
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+default boolean postBulkLoadHFile(ObserverContext ctx,
+  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString>> stagingFamilyPaths,
+  boolean hasLoaded)
+   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-17123";>HBASE-17123).
+ Use postBulkLoadHFile(ObserverContext,
 List, Map, boolean).
 Called after bulkLoadHFile.
 
 Parameters:
@@ -3225,7 +3230,7 @@ default boolean 
 
 preStoreFileReaderOpen
-default StoreFileReader preStoreFileReaderOpen(ObserverContext ctx,
+default StoreFileReader preStoreFileReaderOpen(ObserverContext ctx,

org.apache.hadoop.fs.FileSystem fs,

org.apache.hadoop.fs.Path p,
FSDataInputStreamWrapper in,
@@ -3261,7 +3266,7 @@ default boolean 
 
 postStoreFileReaderOpen
-default StoreFileReader postStoreFileReaderOpen(ObserverContext ctx,
+default StoreFileReader postStoreFileReaderOpen(ObserverContext ctx,
 
org.apache.hadoop.fs.FileSystem fs,
 
org.apache.hadoop.fs.Path p,
 FSDataInputStreamWrapper in,
@@ -3294,7 +3299,7 @@ default boolean 
 
 postMutationBeforeWAL
-default Cell postMutationBeforeWAL(ObserverContext ctx,
+default Cell postMutationBeforeWAL(ObserverContext ctx,
RegionObserver.MutationType opType,
Mutation mutation,
Cell oldCell,
@@ -3324,7 +3329,7 @@ default boolean 
 
 postInstantiateDeleteTracker
-default DeleteTracker postInstantiateDeleteTracker(ObserverContext ctx,
+default DeleteTracker postInstantiateDeleteTracker(ObserverContext ctx,
DeleteTracker delTracker)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOExc

[51/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/1ada5f22
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/1ada5f22
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/1ada5f22

Branch: refs/heads/asf-site
Commit: 1ada5f22c5941c25cc74ddad43aa952ef4cd3354
Parents: 4c7741b
Author: jenkins 
Authored: Wed Aug 16 15:06:03 2017 +
Committer: jenkins 
Committed: Wed Aug 16 15:06:03 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 17105 +++--
 apidocs/deprecated-list.html|48 +-
 apidocs/index-all.html  |52 +-
 .../apache/hadoop/hbase/HColumnDescriptor.html  |   205 +-
 .../apache/hadoop/hbase/HTableDescriptor.html   |75 +-
 .../hbase/class-use/HColumnDescriptor.html  | 4 +-
 .../hadoop/hbase/class-use/HRegionInfo.html | 4 +-
 .../hadoop/hbase/class-use/ServerName.html  | 8 +-
 .../org/apache/hadoop/hbase/client/Admin.html   |   366 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |   207 +-
 .../hbase/client/RawScanResultConsumer.html |23 +-
 .../RawScanResultConsumer.ScanController.html   | 4 +-
 .../hbase/client/class-use/TableDescriptor.html | 2 +-
 .../org/apache/hadoop/hbase/package-use.html| 6 +-
 .../apache/hadoop/hbase/HColumnDescriptor.html  |  1401 +-
 .../apache/hadoop/hbase/HTableDescriptor.html   |   552 +-
 .../org/apache/hadoop/hbase/client/Admin.html   |  2832 ++-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |  1639 +-
 .../RawScanResultConsumer.ScanController.html   |65 +-
 .../RawScanResultConsumer.ScanResumer.html  |65 +-
 .../hbase/client/RawScanResultConsumer.html |65 +-
 book.html   |23 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 22578 -
 checkstyle.rss  |30 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/allclasses-frame.html| 3 +
 devapidocs/allclasses-noframe.html  | 3 +
 devapidocs/constant-values.html | 6 +-
 devapidocs/deprecated-list.html |60 +-
 devapidocs/index-all.html   |   146 +-
 .../apache/hadoop/hbase/HColumnDescriptor.html  |   205 +-
 .../apache/hadoop/hbase/HTableDescriptor.html   |75 +-
 .../hadoop/hbase/backup/package-tree.html   | 6 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |   119 +-
 .../hbase/class-use/HColumnDescriptor.html  | 4 +-
 .../hadoop/hbase/class-use/HRegionInfo.html | 4 +-
 .../hadoop/hbase/class-use/HRegionLocation.html |34 +-
 .../hbase/class-use/RegionTooBusyException.html | 2 +-
 .../hadoop/hbase/class-use/ServerName.html  | 8 +-
 .../class-use/InterfaceAudience.Public.html | 2 +-
 .../hbase/classification/package-tree.html  | 8 +-
 .../org/apache/hadoop/hbase/client/Admin.html   |   366 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |   207 +-
 .../AsyncNonMetaRegionLocator.TableCache.html   |51 +-
 .../hbase/client/AsyncNonMetaRegionLocator.html |49 +-
 ...ionRpcRetryingCaller.ScanControllerImpl.html |16 +-
 ...onRpcRetryingCaller.ScanControllerState.html |14 +-
 .../AsyncScanSingleRegionRpcRetryingCaller.html |60 +-
 .../hbase/client/AsyncTableResultScanner.html   |76 +-
 .../hbase/client/RawScanResultConsumer.html |23 +-
 ...AsyncNonMetaRegionLocator.LocateRequest.html |17 +-
 .../hadoop/hbase/client/class-use/Cursor.html   |11 +
 .../RawScanResultConsumer.ScanController.html   | 4 +-
 .../hadoop/hbase/client/class-use/Scan.html |16 +-
 .../hadoop/hbase/client/class-use/Table.html|12 +-
 .../hbase/client/class-use/TableDescriptor.html | 2 +-
 .../client/metrics/ServerSideScanMetrics.html   |38 +-
 .../hadoop/hbase/client/package-tree.html   |28 +-
 .../hbase/coprocessor/RegionObserver.html   |25 +-
 .../coprocessor/class-use/ObserverContext.html  | 4 +-
 .../class-use/RegionCoprocessorEnvironment.html | 4 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hfile/bucket/FileIOEngine.FileAccessor.html | 4 +-
 .../bucket/FileIOEngine.FileReadAccessor.html   | 6 +-
 .../bucket/FileIOEngine.FileWriteAccessor.html  | 6 +-
 .../hbase/io/hfile/bucket/FileIOEngine.html |42 +-
 .../had

[34/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
index fe98c7c..7ab0e0b 100644
--- a/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
@@ -503,7 +503,9 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set
 getFamiliesKeys()
 Deprecated. 
-Use getColumnFamilyNames().
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18008";>HBASE-18008).
+ Use getColumnFamilyNames().
 
 
 
@@ -1228,7 +1230,7 @@ implements 
 
 NAMESPACE_FAMILY_INFO
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String NAMESPACE_FAMILY_INFO
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String NAMESPACE_FAMILY_INFO
 Deprecated. 
 
 See Also:
@@ -1242,7 +1244,7 @@ implements 
 
 NAMESPACE_FAMILY_INFO_BYTES
-public static final byte[] NAMESPACE_FAMILY_INFO_BYTES
+public static final byte[] NAMESPACE_FAMILY_INFO_BYTES
 Deprecated. 
 
 
@@ -1252,7 +1254,7 @@ implements 
 
 NAMESPACE_COL_DESC_BYTES
-public static final byte[] NAMESPACE_COL_DESC_BYTES
+public static final byte[] NAMESPACE_COL_DESC_BYTES
 Deprecated. 
 
 
@@ -1262,7 +1264,7 @@ implements 
 
 NAMESPACE_TABLEDESC
-public static final HTableDescriptor NAMESPACE_TABLEDESC
+public static final HTableDescriptor NAMESPACE_TABLEDESC
 Deprecated. 
 Table descriptor for namespace table
 
@@ -2138,8 +2140,11 @@ public http://docs.oracle.com/javase/8/docs/api/java/util/Collecti
 
 
 getFamiliesKeys
-public http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set getFamiliesKeys()
-Deprecated. Use getColumnFamilyNames().
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+public http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set getFamiliesKeys()
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18008";>HBASE-18008).
+ Use getColumnFamilyNames().
 Returns all the column family names of the current table. 
The map of
  HTableDescriptor contains mapping of family name to HColumnDescriptors.
  This returns all the keys of the family map which represents the column
@@ -2156,7 +2161,7 @@ public http://docs.oracle.com/javase/8/docs/api/java/util/Collecti
 
 
 getColumnFamilyCount
-public int getColumnFamilyCount()
+public int getColumnFamilyCount()
 Deprecated. 
 Returns the count of the column families of the table.
 
@@ -2174,7 +2179,7 @@ public http://docs.oracle.com/javase/8/docs/api/java/util/Collecti
 
 getColumnFamilies
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
-public HColumnDescriptor[] getColumnFamilies()
+public HColumnDescriptor[] getColumnFamilies()
 Deprecated. 
 Returns an array all the HColumnDescriptor of the column 
families
  of the table.
@@ -2195,7 +2200,7 @@ public 
 getFamily
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
-public HColumnDescriptor getFamily(byte[] column)
+public HColumnDescriptor getFamily(byte[] column)
 Deprecated. Use getColumnFamily(byte[]).
 Returns the HColumnDescriptor for a specific column family 
with name as
  specified by the parameter column.
@@ -2214,7 +2219,7 @@ public 
 
 removeFamily
-public HColumnDescriptor removeFamily(byte[] column)
+public HColumnDescriptor removeFamily(byte[] column)
 Deprecated. 
 Removes the HColumnDescriptor with name specified by the 
parameter column
  from the table descriptor
@@ -2233,7 +2238,7 @@ public 
 
 toHColumnDescriptor
-protected HColumnDescriptor toHColumnDescriptor(ColumnFamilyDescriptor desc)
+protected HColumnDescriptor toHColumnDescriptor(ColumnFamilyDescriptor desc)
 Deprecated.

[04/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanResumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanResumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanResumer.html
index 59c2836..c804d26 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanResumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanResumer.html
@@ -103,38 +103,45 @@
 095  void onNext(Result[] results, 
ScanController controller);
 096
 097  /**
-098   * Indicate that there is an heartbeat 
message but we have not cumulated enough cells to call
-099   * onNext.
+098   * Indicate that there is a heartbeat 
message but we have not cumulated enough cells to call
+099   * {@link #onNext(Result[], 
ScanController)}.
 100   * 

-101 * This method give you a chance to terminate a slow scan operation. -102 * @param controller used to suspend or terminate the scan. Notice that the {@code controller} -103 * instance is only valid within the scope of onHeartbeat method. You can only call its -104 * method in onHeartbeat, do NOT store it and call it later outside onHeartbeat. -105 */ -106 default void onHeartbeat(ScanController controller) { -107 } -108 -109 /** -110 * Indicate that we hit an unrecoverable error and the scan operation is terminated. -111 *

-112 * We will not call {@link #onComplete()} after calling {@link #onError(Throwable)}. -113 */ -114 void onError(Throwable error); +101 * Note that this method will always be called when RS returns something to us but we do not have +102 * enough cells to call {@link #onNext(Result[], ScanController)}. Sometimes it may not be a +103 * 'heartbeat' message for RS, for example, we have a large row with many cells and size limit is +104 * exceeded before sending all the cells for this row. For RS it does send some data to us and the +105 * time limit has not been reached, but we can not return the data to client so here we call this +106 * method to tell client we have already received something. +107 *

+108 * This method give you a chance to terminate a slow scan operation. +109 * @param controller used to suspend or terminate the scan. Notice that the {@code controller} +110 * instance is only valid within the scope of onHeartbeat method. You can only call its +111 * method in onHeartbeat, do NOT store it and call it later outside onHeartbeat. +112 */ +113 default void onHeartbeat(ScanController controller) { +114 } 115 116 /** -117 * Indicate that the scan operation is completed normally. -118 */ -119 void onComplete(); -120 -121 /** -122 * If {@code scan.isScanMetricsEnabled()} returns true, then this method will be called prior to -123 * all other methods in this interface to give you the {@link ScanMetrics} instance for this scan -124 * operation. The {@link ScanMetrics} instance will be updated on-the-fly during the scan, you can -125 * store it somewhere to get the metrics at any time if you want. -126 */ -127 default void onScanMetricsCreated(ScanMetrics scanMetrics) { -128 } -129} +117 * Indicate that we hit an unrecoverable error and the scan operation is terminated. +118 *

+119 * We will not call {@link #onComplete()} after calling {@link #onError(Throwable)}. +120 */ +121 void onError(Throwable error); +122 +123 /** +124 * Indicate that the scan operation is completed normally. +125 */ +126 void onComplete(); +127 +128 /** +129 * If {@code scan.isScanMetricsEnabled()} returns true, then this method will be called prior to +130 * all other methods in this interface to give you the {@link ScanMetrics} instance for this scan +131 * operation. The {@link ScanMetrics} instance will be updated on-the-fly during the scan, you can +132 * store it somewhere to get the metrics at any time if you want. +133 */ +134 default void onScanMetricsCreated(ScanMetrics scanMetrics) { +135 } +136} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.html index 59c2836..c804d26 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.html @@ -103,38 +103,45 @@ 095 void onNext(Result[] results, ScanController controller); 096 097 /** -098 * Indicate that the


[06/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState.html
index e7cd47c..d61d346 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState.html
@@ -43,132 +43,132 @@
 035
 036import java.io.IOException;
 037import java.util.ArrayList;
-038import java.util.List;
-039import java.util.Optional;
-040import 
java.util.concurrent.CompletableFuture;
-041import java.util.concurrent.TimeUnit;
-042
-043import org.apache.commons.logging.Log;
-044import 
org.apache.commons.logging.LogFactory;
-045import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.NotServingRegionException;
-048import 
org.apache.hadoop.hbase.UnknownScannerException;
-049import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-050import 
org.apache.hadoop.hbase.client.RawScanResultConsumer.ScanResumer;
-051import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-052import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-053import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-055import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
-063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-064
-065/**
-066 * Retry caller for scanning a region.
-067 * 

-068 * We will modify the {@link Scan} object passed in directly. The upper layer should store the -069 * reference of this object and use it to open new single region scanners. -070 */ -071@InterfaceAudience.Private -072class AsyncScanSingleRegionRpcRetryingCaller { -073 -074 private static final Log LOG = LogFactory.getLog(AsyncScanSingleRegionRpcRetryingCaller.class); -075 -076 private final HashedWheelTimer retryTimer; -077 -078 private final Scan scan; -079 -080 private final ScanMetrics scanMetrics; -081 -082 private final long scannerId; -083 -084 private final ScanResultCache resultCache; -085 -086 private final RawScanResultConsumer consumer; -087 -088 private final ClientService.Interface stub; -089 -090 private final HRegionLocation loc; -091 -092 private final boolean regionServerRemote; -093 -094 private final long scannerLeaseTimeoutPeriodNs; -095 -096 private final long pauseNs; -097 -098 private final int maxAttempts; -099 -100 private final long scanTimeoutNs; -101 -102 private final long rpcTimeoutNs; -103 -104 private final int startLogErrorsCnt; -105 -106 private final Runnable completeWhenNoMoreResultsInRegion; -107 -108 private final CompletableFuture future; -109 -110 private final HBaseRpcController controller; -111 -112 private byte[] nextStartRowWhenError; -113 -114 private boolean includeNextStartRowWhenError; -115 -116 private long nextCallStartNs; -117 -118 private int tries; -119 -120 private final List exceptions; -121 -122 private long nextCallSeq = -1L; -123 -124 private enum ScanControllerState { -125INITIALIZED, SUSPENDED, TERMINATED, DESTROYED -126 } -127 -128 // Since suspend and terminate should only be called within onNext or onHeartbeat(see the comments -129 // of RawScanResultConsumer.onNext and onHeartbeat), we need to add some check to prevent invalid -130 // usage. We use two things to prevent invalid usage: -131 // 1. Record the thread that construct the ScanControllerImpl instance. We will throw an -132 // IllegalStateException if the caller thread is not this thread. -133 // 2. The ControllerState. The initial state is INITIALIZED, if you call suspend, the state will -134 // be transformed to SUSPENDED, and if you call terminate, the state will be transformed to -135 // TERMINATED. And when we are back from onNext or onHeartbeat in the onComplet


[22/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/regionserver/CellFlatMap.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/CellFlatMap.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CellFlatMap.html
index 33c8e93..560b3da 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/CellFlatMap.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CellFlatMap.html
@@ -152,6 +152,10 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Navigable
 CellFlatMap.CellFlatMapCollection 
 
 
+private static class 
+CellFlatMap.CellFlatMapEntry 
+
+
 private class 
 CellFlatMap.CellFlatMapIterator 
 
@@ -832,7 +836,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Navigable
 
 
 lowerEntry
-public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry lowerEntry(Cell k)
+public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry lowerEntry(Cell k)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true#lowerEntry-K-";
 title="class or interface in java.util">lowerEntry in 
interface http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMap
@@ -845,7 +849,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Navigable
 
 
 higherEntry
-public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry higherEntry(Cell k)
+public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry higherEntry(Cell k)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true#higherEntry-K-";
 title="class or interface in java.util">higherEntry in 
interface http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMap
@@ -858,7 +862,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Navigable
 
 
 ceilingEntry
-public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry ceilingEntry(Cell k)
+public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry ceilingEntry(Cell k)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true#ceilingEntry-K-";
 title="class or interface in java.util">ceilingEntry in 
interface http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMap
@@ -871,7 +875,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Navigable
 
 
 floorEntry
-public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry floorEntry(Cell k)
+public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry floorEntry(Cell k)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true#floorEntry-K-";
 title="class or interface in java.util">floorEntry in 
interface http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMap
@@ -884,7 +888,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Navigable
 
 
 firstEntry
-public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry firstEntry()
+public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry firstEntry()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true#firstEntry--";
 title="class or interface in java.util">firstEntry in 
interface http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMap
@@ -897,7 +901,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Navigable
 
 
 lastEntry
-public http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry

[41/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index f47225a..5a77704 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -841,1497 +841,1513 @@
 833   * @param regionname region name to 
close
 834   * @param serverName Deprecated. Not 
used.
 835   * @throws IOException if a remote or 
network exception occurs
-836   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-837   */
-838  void closeRegion(final String 
regionname, final String serverName) throws IOException;
-839
-840  /**
-841   * Uses {@link #unassign(byte[], 
boolean)} to unassign the region. For expert-admins.
-842   *
-843   * @param regionname region name to 
close
-844   * @param serverName Deprecated. Not 
used.
-845   * @throws IOException if a remote or 
network exception occurs
-846   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-847   */
-848  void closeRegion(final byte[] 
regionname, final String serverName) throws IOException;
-849
-850  /**
-851   * Uses {@link #unassign(byte[], 
boolean)} to unassign the region. For expert-admins.
-852   *
-853   * @param encodedRegionName The encoded 
region name; i.e. the hash that makes up the region name
-854   * suffix: e.g. if regionname is
-855   * 
TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.,
-856   * then the encoded region name is: 
527db22f95c8a9e0116f0cc13c680396.
-857   * @param serverName Deprecated. Not 
used.
-858   * @return Deprecated. Returns true 
always.
-859   * @throws IOException if a remote or 
network exception occurs
-860   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-861   */
-862  boolean 
closeRegionWithEncodedRegionName(final String encodedRegionName, final String 
serverName)
-863  throws IOException;
-864
-865  /**
-866   * Used {@link #unassign(byte[], 
boolean)} to unassign the region. For expert-admins.
-867   *
-868   * @param sn Deprecated. Not used.
-869   * @deprecated Since 2.0. Will be 
removed in 3.0. Use {@link #unassign(byte[], boolean)} instead.
-870   */
-871  void closeRegion(final ServerName sn, 
final HRegionInfo hri) throws IOException;
-872
-873  /**
-874   * Get all the online regions on a 
region server.
-875   */
-876  List 
getOnlineRegions(final ServerName sn) throws IOException;
-877
-878  /**
-879   * Flush a table. Synchronous 
operation.
-880   *
-881   * @param tableName table to flush
-882   * @throws IOException if a remote or 
network exception occurs
-883   */
-884  void flush(final TableName tableName) 
throws IOException;
-885
-886  /**
-887   * Flush an individual region. 
Synchronous operation.
-888   *
-889   * @param regionName region to flush
-890   * @throws IOException if a remote or 
network exception occurs
-891   */
-892  void flushRegion(final byte[] 
regionName) throws IOException;
-893
-894  /**
-895   * Compact a table. Asynchronous 
operation.
-896   *
-897   * @param tableName table to compact
-898   * @throws IOException if a remote or 
network exception occurs
-899   */
-900  void compact(final TableName tableName) 
throws IOException;
-901
-902  /**
-903   * Compact an individual region. 
Asynchronous operation.
-904   *
-905   * @param regionName region to 
compact
-906   * @throws IOException if a remote or 
network exception occurs
-907   */
-908  void compactRegion(final byte[] 
regionName) throws IOException;
-909
-910  /**
-911   * Compact a column family within a 
table. Asynchronous operation.
-912   *
-913   * @param tableName table to compact
-914   * @param columnFamily column family 
within a table
-915   * @throws IOException if a remote or 
network exception occurs
-916   */
-917  void compact(final TableName tableName, 
final byte[] columnFamily)
-918throws IOException;
-919
-920  /**
-921   * Compact a column family within a 
region. Asynchronous operation.
-922   *
-923   * @param regionName region to 
compact
-924   * @param columnFamily column family 
within a region
-925   * @throws IOException if a remote or 
network exception occurs
-926   */
-927  void compactRegion(final byte[] 
regionName, final byte[] columnFamily)
-928throws IOException;
-929
-930  /**
-931   * Major compact a table. Asynchronous 
operation.
-932   *
-933   * @param tableName table to major 
compact
-934   * @throws IOException if a remote or 
network exception occurs
-935   */
-936  void majorCompact(TableName tableName) 
throws IOException;
-937
-938  /**
-939   * Major compact a table or an 
individual region. Asynchronous o

[30/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
index dd354bf..5fff896 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -180,17 +180,31 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Method and Description
 
 
+void
+clearCompletedRequests(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional location) 
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
+getCandidate() 
+
+
 boolean
 hasQuota(int max) 
 
-
+
 boolean
 isPending(AsyncNonMetaRegionLocator.LocateRequest req) 
 
-
+
 void
 send(AsyncNonMetaRegionLocator.LocateRequest req) 
 
+
+private boolean
+tryComplete(AsyncNonMetaRegionLocator.LocateRequest req,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture future,
+   http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional location) 
+
 
 
 
@@ -286,12 +300,41 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 send
 public void send(AsyncNonMetaRegionLocator.LocateRequest req)
 
 
+
+
+
+
+
+getCandidate
+public http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional getCandidate()
+
+
+
+
+
+
+
+clearCompletedRequests
+public void clearCompletedRequests(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional location)
+
+
+
+
+
+
+
+tryComplete
+private boolean tryComplete(AsyncNonMetaRegionLocator.LocateRequest req,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture future,
+http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional location)
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
index 1671bb4..192e564 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -284,12 +284,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 removeFromCache(HRegionLocation loc) 
 
 
-private boolean
-tryComplete(AsyncNonMetaRegionLocator.LocateRequest req,
-   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture future,
-   HRegionLocation loc) 
-
-
 (package private) void
 updateCachedLocation(HRegionLocation loc,
 http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in 
java.lang">Throwable exception) 
@@ -392,7 +

[44/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/org/apache/hadoop/hbase/client/RawScanResultConsumer.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/RawScanResultConsumer.html 
b/apidocs/org/apache/hadoop/hbase/client/RawScanResultConsumer.html
index fbf7f4d..1f9ef0b 100644
--- a/apidocs/org/apache/hadoop/hbase/client/RawScanResultConsumer.html
+++ b/apidocs/org/apache/hadoop/hbase/client/RawScanResultConsumer.html
@@ -170,8 +170,8 @@ public interface 
 default void
 onHeartbeat(RawScanResultConsumer.ScanController controller)
-Indicate that there is an heartbeat message but we have not 
cumulated enough cells to call
- onNext.
+Indicate that there is a heartbeat message but we have not 
cumulated enough cells to call
+ onNext(Result[],
 ScanController).
 
 
 
@@ -228,9 +228,16 @@ public interface 
 
 onHeartbeat
-default void onHeartbeat(RawScanResultConsumer.ScanController controller)
-Indicate that there is an heartbeat message but we have not 
cumulated enough cells to call
- onNext.
+default void onHeartbeat(RawScanResultConsumer.ScanController controller)
+Indicate that there is a heartbeat message but we have not 
cumulated enough cells to call
+ onNext(Result[],
 ScanController).
+ 
+ Note that this method will always be called when RS returns something to us 
but we do not have
+ enough cells to call onNext(Result[],
 ScanController). Sometimes it may not be a
+ 'heartbeat' message for RS, for example, we have a large row with many cells 
and size limit is
+ exceeded before sending all the cells for this row. For RS it does send some 
data to us and the
+ time limit has not been reached, but we can not return the data to client so 
here we call this
+ method to tell client we have already received something.
  
  This method give you a chance to terminate a slow scan operation.
 
@@ -247,7 +254,7 @@ public interface 
 
 onError
-void onError(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable error)
+void onError(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable error)
 Indicate that we hit an unrecoverable error and the scan 
operation is terminated.
  
  We will not call onComplete()
 after calling onError(Throwable).
@@ -259,7 +266,7 @@ public interface 
 
 onComplete
-void onComplete()
+void onComplete()
 Indicate that the scan operation is completed 
normally.
 
 
@@ -269,7 +276,7 @@ public interface 
 
 onScanMetricsCreated
-default void onScanMetricsCreated(org.apache.hadoop.hbase.client.metrics.ScanMetrics scanMetrics)
+default void onScanMetricsCreated(org.apache.hadoop.hbase.client.metrics.ScanMetrics scanMetrics)
 If scan.isScanMetricsEnabled() returns true, 
then this method will be called prior to
  all other methods in this interface to give you the ScanMetrics 
instance for this scan
  operation. The ScanMetrics instance will be updated on-the-fly 
during the scan, you can

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/org/apache/hadoop/hbase/client/class-use/RawScanResultConsumer.ScanController.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/client/class-use/RawScanResultConsumer.ScanController.html
 
b/apidocs/org/apache/hadoop/hbase/client/class-use/RawScanResultConsumer.ScanController.html
index 939469e..f5d80c8 100644
--- 
a/apidocs/org/apache/hadoop/hbase/client/class-use/RawScanResultConsumer.ScanController.html
+++ 
b/apidocs/org/apache/hadoop/hbase/client/class-use/RawScanResultConsumer.ScanController.html
@@ -107,8 +107,8 @@
 
 default void
 RawScanResultConsumer.onHeartbeat(RawScanResultConsumer.ScanController controller)
-Indicate that there is an heartbeat message but we have not 
cumulated enough cells to call
- onNext.
+Indicate that there is a heartbeat message but we have not 
cumulated enough cells to call
+ RawScanResultConsumer.onNext(Result[],
 ScanController).
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
b/apidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
index cba3174..fbce224 100644
--- a/apidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
@@ -124,7 +124,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 HTableDescriptor
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- use TableDescriptorBuilder to 
build HTableDescriptor.
+ Use TableDescriptorBuilder to

[02/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
index 803999d..8af4814 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
@@ -41,241 +41,248 @@
 033import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
 034import 
org.apache.hadoop.hbase.nio.ByteBuff;
 035import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-036import 
org.apache.hadoop.util.StringUtils;
-037
-038/**
-039 * IO engine that stores data to a file 
on the local file system.
-040 */
-041@InterfaceAudience.Private
-042public class FileIOEngine implements 
IOEngine {
-043  private static final Log LOG = 
LogFactory.getLog(FileIOEngine.class);
-044  public static final String 
FILE_DELIMITER = ",";
-045  private final String[] filePaths;
-046  private final FileChannel[] 
fileChannels;
-047  private final RandomAccessFile[] 
rafs;
-048
-049  private final long sizePerFile;
-050  private final long capacity;
-051
-052  private FileReadAccessor readAccessor = 
new FileReadAccessor();
-053  private FileWriteAccessor writeAccessor 
= new FileWriteAccessor();
-054
-055  public FileIOEngine(long capacity, 
boolean maintainPersistence, String... filePaths)
-056  throws IOException {
-057this.sizePerFile = capacity / 
filePaths.length;
-058this.capacity = this.sizePerFile * 
filePaths.length;
-059this.filePaths = filePaths;
-060this.fileChannels = new 
FileChannel[filePaths.length];
-061if (!maintainPersistence) {
-062  for (String filePath : filePaths) 
{
-063File file = new File(filePath);
-064if (file.exists()) {
-065  if (LOG.isDebugEnabled()) {
-066LOG.debug("File " + filePath 
+ " already exists. Deleting!!");
-067  }
-068  file.delete();
-069  // If deletion fails still we 
can manage with the writes
-070}
-071  }
-072}
-073this.rafs = new 
RandomAccessFile[filePaths.length];
-074for (int i = 0; i < 
filePaths.length; i++) {
-075  String filePath = filePaths[i];
-076  try {
-077rafs[i] = new 
RandomAccessFile(filePath, "rw");
-078long totalSpace = new 
File(filePath).getTotalSpace();
-079if (totalSpace < sizePerFile) 
{
-080  // The next setting length will 
throw exception,logging this message
-081  // is just used for the detail 
reason of exception,
-082  String msg = "Only " + 
StringUtils.byteDesc(totalSpace)
-083  + " total space under " + 
filePath + ", not enough for requested "
-084  + 
StringUtils.byteDesc(sizePerFile);
-085  LOG.warn(msg);
-086}
-087rafs[i].setLength(sizePerFile);
-088fileChannels[i] = 
rafs[i].getChannel();
-089LOG.info("Allocating cache " + 
StringUtils.byteDesc(sizePerFile)
-090+ ", on the path:" + 
filePath);
-091  } catch (IOException fex) {
-092LOG.error("Failed allocating 
cache on " + filePath, fex);
-093shutdown();
-094throw fex;
-095  }
-096}
-097  }
-098
-099  @Override
-100  public String toString() {
-101return "ioengine=" + 
this.getClass().getSimpleName() + ", paths="
-102+ Arrays.asList(filePaths) + ", 
capacity=" + String.format("%,d", this.capacity);
-103  }
-104
-105  /**
-106   * File IO engine is always able to 
support persistent storage for the cache
-107   * @return true
-108   */
-109  @Override
-110  public boolean isPersistent() {
-111return true;
-112  }
-113
-114  /**
-115   * Transfers data from file to the 
given byte buffer
-116   * @param offset The offset in the file 
where the first byte to be read
-117   * @param length The length of buffer 
that should be allocated for reading
-118   *   from the file 
channel
-119   * @return number of bytes read
-120   * @throws IOException
-121   */
-122  @Override
-123  public Cacheable read(long offset, int 
length, CacheableDeserializer deserializer)
-124  throws IOException {
-125ByteBuffer dstBuffer = 
ByteBuffer.allocate(length);
-126accessFile(readAccessor, dstBuffer, 
offset);
-127// The buffer created out of the 
fileChannel is formed by copying the data from the file
-128// Hence in this case there is no 
shared memory that we point to. Even if the BucketCache evicts
-129// this buffer from the file the data 
is already copied and there is no need to ensure that
-130// the results are not corrupted 
before consuming them.
-131if (dstBuffer.limit

[15/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
index 19ef87b..a4266b6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -52,7 +52,7 @@
 044 * if the table is read only, the maximum 
size of the memstore,
 045 * when the region split should occur, 
coprocessors associated with it etc...
 046 * @deprecated As of release 2.0.0, this 
will be removed in HBase 3.0.0.
-047 * use {@link 
TableDescriptorBuilder} to build {@link HTableDescriptor}.
+047 * Use {@link 
TableDescriptorBuilder} to build {@link HTableDescriptor}.
 048 */
 049@Deprecated
 050@InterfaceAudience.Public
@@ -610,282 +610,286 @@
 602   * HTableDescriptor contains mapping of 
family name to HColumnDescriptors.
 603   * This returns all the keys of the 
family map which represents the column
 604   * family names of the table.
-605   * @return Immutable sorted set of the 
keys of the families.
-606   * @deprecated Use {@link 
#getColumnFamilyNames()}.
-607   */
-608  public Set 
getFamiliesKeys() {
-609return 
delegatee.getColumnFamilyNames();
-610  }
-611
-612  /**
-613   * Returns the count of the column 
families of the table.
-614   *
-615   * @return Count of column families of 
the table
-616   */
-617  @Override
-618  public int getColumnFamilyCount() {
-619return 
delegatee.getColumnFamilyCount();
-620  }
-621
-622  /**
-623   * Returns an array all the {@link 
HColumnDescriptor} of the column families
-624   * of the table.
-625   *
-626   * @return Array of all the 
HColumnDescriptors of the current table
-627   *
-628   * @see #getFamilies()
-629   */
-630  @Deprecated
-631  @Override
-632  public HColumnDescriptor[] 
getColumnFamilies() {
-633return 
Stream.of(delegatee.getColumnFamilies())
-634
.map(this::toHColumnDescriptor)
-635.toArray(size -> new 
HColumnDescriptor[size]);
-636  }
-637
-638  /**
-639   * Returns the HColumnDescriptor for a 
specific column family with name as
-640   * specified by the parameter column.
-641   * @param column Column family name
-642   * @return Column descriptor for the 
passed family name or the family on
-643   * passed in column.
-644   * @deprecated Use {@link 
#getColumnFamily(byte[])}.
-645   */
-646  @Deprecated
-647  public HColumnDescriptor 
getFamily(final byte[] column) {
-648return 
toHColumnDescriptor(delegatee.getColumnFamily(column));
-649  }
-650
-651
-652  /**
-653   * Removes the HColumnDescriptor with 
name specified by the parameter column
-654   * from the table descriptor
-655   *
-656   * @param column Name of the column 
family to be removed.
-657   * @return Column descriptor for the 
passed family name or the family on
-658   * passed in column.
-659   */
-660  public HColumnDescriptor 
removeFamily(final byte [] column) {
-661return 
toHColumnDescriptor(getDelegateeForModification().removeColumnFamily(column));
-662  }
-663
-664  /**
-665   * Return a HColumnDescriptor for user 
to keep the compatibility as much as possible.
-666   * @param desc read-only 
ColumnFamilyDescriptor
-667   * @return The older implementation of 
ColumnFamilyDescriptor
-668   */
-669  protected HColumnDescriptor 
toHColumnDescriptor(ColumnFamilyDescriptor desc) {
-670if (desc == null) {
-671  return null;
-672} else if (desc instanceof 
ModifyableColumnFamilyDescriptor) {
-673  return new 
HColumnDescriptor((ModifyableColumnFamilyDescriptor) desc);
-674} else if (desc instanceof 
HColumnDescriptor) {
-675  return (HColumnDescriptor) desc;
-676} else {
-677  return new HColumnDescriptor(new 
ModifyableColumnFamilyDescriptor(desc));
-678}
-679  }
-680
-681  /**
-682   * Add a table coprocessor to this 
table. The coprocessor
-683   * type must be 
org.apache.hadoop.hbase.coprocessor.RegionObserver
-684   * or Endpoint.
-685   * It won't check if the class can be 
loaded or not.
-686   * Whether a coprocessor is loadable or 
not will be determined when
-687   * a region is opened.
-688   * @param className Full class name.
-689   * @throws IOException
-690   */
-691  public HTableDescriptor 
addCoprocessor(String className) throws IOException {
-692
getDelegateeForModification().addCoprocessor(className);
-693return this;
-694  }
-695
-696  /**
-697   * Add a table coprocessor to this 
table. The coprocessor
-698   * type must be 
org.apache.hadoop.hbase.coprocessor.RegionObserver
-699   * or Endpoint.
-700   * It won't check if the class can be 
loaded or not.
-701   * Whether a coprocessor is loadable or 
not will be determined when
-702   * a region is opened.
-703   * @param 

[36/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index 3491fe2..7f06226 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -2158,11 +2158,6 @@
 
 Adds a (single) hbase:meta row for the specified new region 
and its daughters.
 
-addRegionToMETA(HRegion,
 HRegion) - Static method in class 
org.apache.hadoop.hbase.regionserver.HRegion
-
-Inserts a new region's meta information into the passed
- meta region.
-
 addRegionToRemove(HRegionInfo)
 - Method in class org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.RestoreMetaChanges
  
 addRegionToRestore(HRegionInfo)
 - Method in class org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.RestoreMetaChanges
@@ -8189,8 +8184,6 @@
 
 Calculates the number of MapReduce input splits for the map 
tasks.
 
-calculateRegionServerLocalities()
 - Method in class org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster
- 
 calculateResultSize(Result)
 - Static method in class org.apache.hadoop.hbase.quotas.QuotaUtil
  
 calculateResultSize(List)
 - Static method in class org.apache.hadoop.hbase.quotas.QuotaUtil
@@ -9067,6 +9060,8 @@
  
 cell
 - Variable in class org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyCell
  
+cell
 - Variable in class org.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapEntry
+ 
 Cell() 
- Constructor for class org.apache.hadoop.hbase.util.Counter.Cell
 
 Deprecated.
@@ -9280,10 +9275,14 @@
  
 CellFlatMap.CellFlatMapCollection - Class in org.apache.hadoop.hbase.regionserver
  
+CellFlatMap.CellFlatMapEntry - Class in org.apache.hadoop.hbase.regionserver
+ 
 CellFlatMap.CellFlatMapIterator - Class in org.apache.hadoop.hbase.regionserver
  
 CellFlatMapCollection()
 - Constructor for class org.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapCollection
  
+CellFlatMapEntry(Cell)
 - Constructor for class org.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapEntry
+ 
 CellFlatMapIterator()
 - Constructor for class org.apache.hadoop.hbase.regionserver.CellFlatMap.CellFlatMapIterator
  
 cellFromHBase(Cell)
 - Static method in class org.apache.hadoop.hbase.thrift.ThriftUtilities
@@ -11614,6 +11613,8 @@
  
 clearCompactionQueues(RpcController,
 AdminProtos.ClearCompactionQueuesRequest) - Method in class 
org.apache.hadoop.hbase.regionserver.RSRpcServices
  
+clearCompletedRequests(Optional)
 - Method in class org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.TableCache
+ 
 clearCurrentRow()
 - Method in class org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher
 
 Make ScanQueryMatcher.currentRow()
 return null.
@@ -12616,25 +12617,33 @@
 closeRegion(String,
 String) - Method in interface org.apache.hadoop.hbase.client.Admin
 
 Deprecated.
-Since 2.0. Will be removed 
in 3.0. Use Admin.unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use Admin.unassign(byte[],
 boolean).
 
 
 closeRegion(byte[],
 String) - Method in interface org.apache.hadoop.hbase.client.Admin
 
 Deprecated.
-Since 2.0. Will be removed 
in 3.0. Use Admin.unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use Admin.unassign(byte[],
 boolean).
 
 
 closeRegion(ServerName,
 HRegionInfo) - Method in interface 
org.apache.hadoop.hbase.client.Admin
 
 Deprecated.
-Since 2.0. Will be removed 
in 3.0. Use Admin.unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use Admin.unassign(byte[],
 boolean).
 
 
 closeRegion(byte[],
 Optional) - Method in interface 
org.apache.hadoop.hbase.client.AsyncAdmin
 
 Deprecated.
-Since 2.0. Will be removed 
in 3.0. Use AsyncAdmin.unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use AsyncAdmin.unassign(byte[],
 boolean).
 
 
 closeRegion(byte[],
 Optional) - Method in class 
org.apache.hadoop.hbase.client.AsyncHBaseAdmin
@@ -12710,7 +12719,9 @@
 closeRegionWithEncodedRegionName(String,
 String) - Method in interface org.apache.hadoop.hbase.client.Admin
 
 Deprecated.
-Since 2.0. Will be removed 
in 3.0. Use Admin.unassign(byte[],
 boolean) instead.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-18231";>HBASE-18231).
+ Use Admin.unassign(byte[],
 boolean).
 
 
 closeRegionWithEncodedRegionName(String,
 String) - Method in clas

[27/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
index 7797f72..bb219ec 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class BaseLoadBalancer.Cluster.SwapRegionsAction
+public static class BaseLoadBalancer.Cluster.SwapRegionsAction
 extends BaseLoadBalancer.Cluster.Action
 
 
@@ -249,7 +249,7 @@ extends 
 
 fromServer
-public int fromServer
+public int fromServer
 
 
 
@@ -258,7 +258,7 @@ extends 
 
 fromRegion
-public int fromRegion
+public int fromRegion
 
 
 
@@ -267,7 +267,7 @@ extends 
 
 toServer
-public int toServer
+public int toServer
 
 
 
@@ -276,7 +276,7 @@ extends 
 
 toRegion
-public int toRegion
+public int toRegion
 
 
 
@@ -293,7 +293,7 @@ extends 
 
 SwapRegionsAction
-public SwapRegionsAction(int fromServer,
+public SwapRegionsAction(int fromServer,
  int fromRegion,
  int toServer,
  int toRegion)
@@ -313,7 +313,7 @@ extends 
 
 undoAction
-public BaseLoadBalancer.Cluster.Action undoAction()
+public BaseLoadBalancer.Cluster.Action undoAction()
 Description copied from 
class: BaseLoadBalancer.Cluster.Action
 Returns an Action which would undo this action
 
@@ -328,7 +328,7 @@ extends 
 
 toString
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
 
 Overrides:
 toString in
 class BaseLoadBalancer.Cluster.Action



hbase-site git commit: INFRA-10751 Empty commit

2017-08-16 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 1ada5f22c -> 6fa5abd53


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/6fa5abd5
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/6fa5abd5
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/6fa5abd5

Branch: refs/heads/asf-site
Commit: 6fa5abd538e838bf1e1799b5ddd094a52ea4448d
Parents: 1ada5f2
Author: jenkins 
Authored: Wed Aug 16 15:07:43 2017 +
Committer: jenkins 
Committed: Wed Aug 16 15:07:43 2017 +

--

--




hbase git commit: HBASE-18493 [AMv2] Skipped re-assignment of regions on crashed server through AssignmentManager.checkIfShouldMoveSystemRegionAsync() as those regions are handled by ServerCrashProced

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 4c74a73d5 -> acf9b87dc


HBASE-18493 [AMv2] Skipped re-assignment of regions on crashed server through 
AssignmentManager.checkIfShouldMoveSystemRegionAsync() as those regions are 
handled by ServerCrashProcedure

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/acf9b87d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/acf9b87d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/acf9b87d

Branch: refs/heads/master
Commit: acf9b87dca2cd190f4b5318efd5dc48e19b317f4
Parents: 4c74a73
Author: Umesh Agashe 
Authored: Tue Aug 15 14:00:04 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 08:12:05 2017 -0700

--
 .../hadoop/hbase/master/assignment/AssignmentManager.java   | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/acf9b87d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 54cb1ca..0b23f47 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -480,6 +480,15 @@ public class AssignmentManager implements ServerListener {
 synchronized (checkIfShouldMoveSystemRegionLock) {
   List plans = new ArrayList<>();
   for (ServerName server : getExcludedServersForSystemTable()) {
+if (master.getServerManager().isServerDead(server)) {
+  // TODO: See HBASE-18494 and HBASE-18495. Though 
getExcludedServersForSystemTable()
+  // considers only online servers, the server could be queued for 
dead server
+  // processing. As region assignments for crashed server is 
handled by
+  // ServerCrashProcedure, do NOT handle them here. The goal is to 
handle this through
+  // regular flow of LoadBalancer as a favored node and not to 
have this special
+  // handling.
+  continue;
+}
 List regionsShouldMove = 
getCarryingSystemTables(server);
 if (!regionsShouldMove.isEmpty()) {
   for (HRegionInfo regionInfo : regionsShouldMove) {



hbase git commit: HBASE-18493 [AMv2] Skipped re-assignment of regions on crashed server through AssignmentManager.checkIfShouldMoveSystemRegionAsync() as those regions are handled by ServerCrashProced

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 770312a8c -> 53c5bcc58


HBASE-18493 [AMv2] Skipped re-assignment of regions on crashed server through 
AssignmentManager.checkIfShouldMoveSystemRegionAsync() as those regions are 
handled by ServerCrashProcedure

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/53c5bcc5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/53c5bcc5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/53c5bcc5

Branch: refs/heads/branch-2
Commit: 53c5bcc5827217bfa05136bd14b78fde93529783
Parents: 770312a
Author: Umesh Agashe 
Authored: Tue Aug 15 14:00:04 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 08:12:29 2017 -0700

--
 .../hadoop/hbase/master/assignment/AssignmentManager.java   | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/53c5bcc5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 54cb1ca..0b23f47 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -480,6 +480,15 @@ public class AssignmentManager implements ServerListener {
 synchronized (checkIfShouldMoveSystemRegionLock) {
   List plans = new ArrayList<>();
   for (ServerName server : getExcludedServersForSystemTable()) {
+if (master.getServerManager().isServerDead(server)) {
+  // TODO: See HBASE-18494 and HBASE-18495. Though 
getExcludedServersForSystemTable()
+  // considers only online servers, the server could be queued for 
dead server
+  // processing. As region assignments for crashed server is 
handled by
+  // ServerCrashProcedure, do NOT handle them here. The goal is to 
handle this through
+  // regular flow of LoadBalancer as a favored node and not to 
have this special
+  // handling.
+  continue;
+}
 List regionsShouldMove = 
getCarryingSystemTables(server);
 if (!regionsShouldMove.isEmpty()) {
   for (HRegionInfo regionInfo : regionsShouldMove) {



[19/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/devapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
index c60367c..d03c1de 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
@@ -465,32 +465,36 @@
 CellFlatMap stores a constant number of elements and is 
immutable after creation stage.
 
 
-
+
+CellFlatMap.CellFlatMapEntry
+ 
+
+
 CellSet
 
 A http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set of Cells, where an add will overwrite 
the entry if already
  exists in the set.
 
 
-
+
 Chunk
 
 A chunk of memory out of which allocations are sliced.
 
 
-
+
 ChunkCreator
 
 Does the management of memstoreLAB chunk creations.
 
 
-
+
 CompactedHFilesDischargeHandler
 
 Event handler that handles the removal and archival of the 
compacted hfiles
 
 
-
+
 CompactedHFilesDischarger
 
 A chore service that periodically cleans up the compacted 
files when there are no active readers
@@ -498,58 +502,58 @@
  file entries.
 
 
-
+
 CompactingMemStore
 
 A memstore implementation which supports in-memory 
compaction.
 
 
-
+
 CompactionPipeline
 
 The compaction pipeline of a CompactingMemStore, is a 
FIFO queue of segments.
 
 
-
+
 CompactionTool
  
 
-
+
 CompactionTool.CompactionInputFormat
 
 Input format that uses store files block location as input 
split locality.
 
 
-
+
 CompactionTool.CompactionMapper
  
 
-
+
 CompactionTool.CompactionWorker
 
 Class responsible to execute the Compaction on the 
specified path.
 
 
-
+
 CompactSplit
 
 Compact region on request and then run split if 
appropriate
 
 
-
+
 CompactSplit.Rejection
 
 Cleanup class to use when rejecting a compaction request 
from the queue.
 
 
-
+
 CompositeImmutableSegment
 
 The CompositeImmutableSegments is created as a collection 
of ImmutableSegments and supports
  the interface of a single ImmutableSegments.
 
 
-
+
 ConstantSizeRegionSplitPolicy
 
 A RegionSplitPolicy 
implementation which splits a region
@@ -557,142 +561,142 @@
  size.
 
 
-
+
 CSLMImmutableSegment
 
 CSLMImmutableSegment is an abstract class that extends the 
API supported by a Segment,
  and ImmutableSegment.
 
 
-
+
 DateTieredMultiFileWriter
 
 class for cell sink that separates the provided cells into 
multiple files for date tiered
  compaction.
 
 
-
+
 DateTieredStoreEngine
 
 HBASE-15400 This store engine allows us to store data in 
date tiered layout with exponential
  sizing so that the more recent data has more granularity.
 
 
-
+
 DefaultHeapMemoryTuner
 
 The default implementation for the HeapMemoryTuner.
 
 
-
+
 DefaultMemStore
 
 The MemStore holds in-memory modifications to the 
Store.
 
 
-
+
 DefaultStoreEngine
 
 Default StoreEngine creates the default compactor, policy, 
and store file manager, or
  their derivatives.
 
 
-
+
 DefaultStoreFileManager
 
 Default implementation of StoreFileManager.
 
 
-
+
 DefaultStoreFlusher
 
 Default implementation of StoreFlusher.
 
 
-
+
 DelimitedKeyPrefixRegionSplitPolicy
 
 A custom RegionSplitPolicy implementing a SplitPolicy that 
groups
  rows by a prefix of the row-key with a delimiter.
 
 
-
+
 DisabledRegionSplitPolicy
 
 A RegionSplitPolicy that 
disables region splits.
 
 
-
+
 FifoRpcSchedulerFactory
 
 Factory to use when you want to use the FifoRpcScheduler
 
 
-
+
 FlushAllLargeStoresPolicy
 
 A FlushPolicy that only 
flushes store larger a given threshold.
 
 
-
+
 FlushAllStoresPolicy
 
 A FlushPolicy that always 
flushes all stores for a given region.
 
 
-
+
 FlushLargeStoresPolicy
 
 A FlushPolicy that only 
flushes store larger a given threshold.
 
 
-
+
 FlushNonSloppyStoresFirstPolicy
 
 A FlushPolicy that only 
flushes store larger than a given threshold.
 
 
-
+
 FlushPolicy
 
 A flush policy determines the stores that need to be 
flushed when flushing a region.
 
 
-
+
 FlushPolicyFactory
 
 The class that creates a flush policy from a conf and 
HTableDescriptor.
 
 
-
+
 HeapMemoryManager
 
 Manages tuning of Heap memory using 
HeapMemoryTuner.
 
 
-
+
 HeapMemoryManager.TunerContext
 
 POJO to pass all the relevant information required to do 
the heap memory tuning.
 
 
-
+
 HeapMemoryManager.TunerResult
 
 POJO which holds the result of memory tuning done by 
HeapMemoryTuner implementation.
 
 
-
+
 HMobStore
 
 The store implementation to save MOBs (medium objects), it 
extends the HStore.
 
 
-
+
 HRegion
  
 
-
+
 HRegion.BatchOperation
 
 Struct-like class that tracks the progress of a batch 
operation,
@@ -700,106 +704,106 @@
  is proceeding.
 
 
-
+
 HRegion.FlushResultImpl
 
 Objects from this class are created when flushing to 
describe all th

[39/51] [partial] hbase-site git commit: Published site at .

2017-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanController.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanController.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanController.html
index 59c2836..c804d26 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanController.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanController.html
@@ -103,38 +103,45 @@
 095  void onNext(Result[] results, 
ScanController controller);
 096
 097  /**
-098   * Indicate that there is an heartbeat 
message but we have not cumulated enough cells to call
-099   * onNext.
+098   * Indicate that there is a heartbeat 
message but we have not cumulated enough cells to call
+099   * {@link #onNext(Result[], 
ScanController)}.
 100   * 

-101 * This method give you a chance to terminate a slow scan operation. -102 * @param controller used to suspend or terminate the scan. Notice that the {@code controller} -103 * instance is only valid within the scope of onHeartbeat method. You can only call its -104 * method in onHeartbeat, do NOT store it and call it later outside onHeartbeat. -105 */ -106 default void onHeartbeat(ScanController controller) { -107 } -108 -109 /** -110 * Indicate that we hit an unrecoverable error and the scan operation is terminated. -111 *

-112 * We will not call {@link #onComplete()} after calling {@link #onError(Throwable)}. -113 */ -114 void onError(Throwable error); +101 * Note that this method will always be called when RS returns something to us but we do not have +102 * enough cells to call {@link #onNext(Result[], ScanController)}. Sometimes it may not be a +103 * 'heartbeat' message for RS, for example, we have a large row with many cells and size limit is +104 * exceeded before sending all the cells for this row. For RS it does send some data to us and the +105 * time limit has not been reached, but we can not return the data to client so here we call this +106 * method to tell client we have already received something. +107 *

+108 * This method give you a chance to terminate a slow scan operation. +109 * @param controller used to suspend or terminate the scan. Notice that the {@code controller} +110 * instance is only valid within the scope of onHeartbeat method. You can only call its +111 * method in onHeartbeat, do NOT store it and call it later outside onHeartbeat. +112 */ +113 default void onHeartbeat(ScanController controller) { +114 } 115 116 /** -117 * Indicate that the scan operation is completed normally. -118 */ -119 void onComplete(); -120 -121 /** -122 * If {@code scan.isScanMetricsEnabled()} returns true, then this method will be called prior to -123 * all other methods in this interface to give you the {@link ScanMetrics} instance for this scan -124 * operation. The {@link ScanMetrics} instance will be updated on-the-fly during the scan, you can -125 * store it somewhere to get the metrics at any time if you want. -126 */ -127 default void onScanMetricsCreated(ScanMetrics scanMetrics) { -128 } -129} +117 * Indicate that we hit an unrecoverable error and the scan operation is terminated. +118 *

+119 * We will not call {@link #onComplete()} after calling {@link #onError(Throwable)}. +120 */ +121 void onError(Throwable error); +122 +123 /** +124 * Indicate that the scan operation is completed normally. +125 */ +126 void onComplete(); +127 +128 /** +129 * If {@code scan.isScanMetricsEnabled()} returns true, then this method will be called prior to +130 * all other methods in this interface to give you the {@link ScanMetrics} instance for this scan +131 * operation. The {@link ScanMetrics} instance will be updated on-the-fly during the scan, you can +132 * store it somewhere to get the metrics at any time if you want. +133 */ +134 default void onScanMetricsCreated(ScanMetrics scanMetrics) { +135 } +136} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ada5f22/apidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanResumer.html -- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanResumer.html b/apidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanResumer.html index 59c2836..c804d26 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanResumer.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/client/RawScanResultConsumer.ScanResumer.html @@ -103,38 +103,45 @@ 095 void onNext(Result[] results, ScanController controll


hbase git commit: HBASE-18511 Default no regions on master

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master acf9b87dc -> 473446719


HBASE-18511 Default no regions on master

Changes the configuration hbase.balancer.tablesOnMaster from list of
table names to instead be a boolean; true if master carries
tables/regions and false if it does not.

Adds a new configuration hbase.balancer.tablesOnMaster.systemTablesOnly.
If true, hbase.balancer.tablesOnMaster is considered true but only
system tables are put on the master.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
  Master was claiming itself active master though it had stopped. Fix
the activeMaster flag. Set it to false on exit.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
 Add new configs and convenience methods for getting current state of
settings.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 Move configs up into super Interface and now the settings mean
different, remove the no longer needed processing.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/47344671
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/47344671
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/47344671

Branch: refs/heads/master
Commit: 473446719b7b81b56216862bf2a94a576ff90f60
Parents: acf9b87
Author: Michael Stack 
Authored: Wed Aug 2 22:54:21 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 08:39:36 2017 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  28 ++-
 .../hadoop/hbase/master/LoadBalancer.java   |  31 ++-
 .../hadoop/hbase/master/ServerManager.java  |  30 +--
 .../hbase/master/balancer/BaseLoadBalancer.java | 110 --
 .../balancer/FavoredStochasticBalancer.java |  11 +-
 .../hbase/regionserver/HRegionServer.java   |   5 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |   2 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |   3 +-
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   8 +-
 .../hbase/client/TestAsyncTableAdminApi.java|  16 +-
 .../hbase/client/TestClientClusterStatus.java   |   5 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   5 +-
 .../hadoop/hbase/fs/TestBlockReorder.java   |   4 +-
 .../hadoop/hbase/master/TestMasterMetrics.java  |  19 +-
 .../hbase/master/TestMasterMetricsWrapper.java  |  13 +-
 .../hbase/master/TestMasterNoCluster.java   |   7 +-
 .../master/balancer/TestBaseLoadBalancer.java   |  10 +-
 .../balancer/TestRegionsOnMasterOptions.java| 200 +++
 .../hbase/regionserver/TestClusterId.java   |   4 +-
 .../TestRSKilledWhenInitializing.java   |  15 +-
 .../hbase/regionserver/TestRegionOpen.java  |   5 +-
 .../regionserver/TestRegionServerAbort.java |  14 +-
 .../regionserver/TestRegionServerHostname.java  |  11 +-
 .../regionserver/TestRegionServerMetrics.java   |  57 --
 .../TestRegionServerReadRequestMetrics.java |  12 +-
 .../TestRegionServerReportForDuty.java  |  15 +-
 .../TestSplitTransactionOnCluster.java  |  16 +-
 .../TestFlushWithThroughputController.java  |   8 +-
 .../security/access/TestNamespaceCommands.java  |  13 +-
 29 files changed, 491 insertions(+), 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/47344671/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ce83838..6b4d4e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -530,6 +530,17 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 }
   }
 
+  // Main run loop. Calls through to the regionserver run loop.
+  @Override
+  public void run() {
+try {
+  super.run();
+} finally {
+  // If on way out, then we are no longer active master.
+  this.activeMaster = false;
+}
+  }
+
   // return the actual infoPort, -1 means disable info server.
   private int putUpJettyServer() throws IOException {
 if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
@@ -604,9 +615,8 @@ public class HMaster extends HRegionServer implements 
MasterServices {
*/
   @Override
   protected void waitForMasterActive(){
-boolean tablesOnMaster = BaseLoadBalancer.tablesOnMaster(conf);
-while (!(tablesOnMaster && activeMaster)
-&& !isStopped() && !isAborted()) {
+boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(conf);
+while (!(tablesOnMaster && activeMaster) && !isStopped() && !isAborted()) {
   slee

hbase git commit: HBASE-18511 Default no regions on master

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 53c5bcc58 -> 7149f9997


HBASE-18511 Default no regions on master

Changes the configuration hbase.balancer.tablesOnMaster from list of
table names to instead be a boolean; true if master carries
tables/regions and false if it does not.

Adds a new configuration hbase.balancer.tablesOnMaster.systemTablesOnly.
If true, hbase.balancer.tablesOnMaster is considered true but only
system tables are put on the master.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
  Master was claiming itself active master though it had stopped. Fix
the activeMaster flag. Set it to false on exit.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
 Add new configs and convenience methods for getting current state of
settings.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 Move configs up into super Interface and now the settings mean
different, remove the no longer needed processing.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7149f999
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7149f999
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7149f999

Branch: refs/heads/branch-2
Commit: 7149f999786b6fd5a3fc1f7aec1214afb738925e
Parents: 53c5bcc
Author: Michael Stack 
Authored: Wed Aug 2 22:54:21 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 08:45:59 2017 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  28 ++-
 .../hadoop/hbase/master/LoadBalancer.java   |  31 ++-
 .../hadoop/hbase/master/ServerManager.java  |  30 +--
 .../hbase/master/balancer/BaseLoadBalancer.java | 110 --
 .../balancer/FavoredStochasticBalancer.java |  11 +-
 .../hbase/regionserver/HRegionServer.java   |   5 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |   2 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |   3 +-
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   8 +-
 .../hbase/client/TestAsyncTableAdminApi.java|  16 +-
 .../hbase/client/TestClientClusterStatus.java   |   5 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   5 +-
 .../hadoop/hbase/fs/TestBlockReorder.java   |   4 +-
 .../hadoop/hbase/master/TestMasterMetrics.java  |  19 +-
 .../hbase/master/TestMasterMetricsWrapper.java  |  13 +-
 .../hbase/master/TestMasterNoCluster.java   |   7 +-
 .../master/balancer/TestBaseLoadBalancer.java   |  10 +-
 .../balancer/TestRegionsOnMasterOptions.java| 200 +++
 .../hbase/regionserver/TestClusterId.java   |   4 +-
 .../TestRSKilledWhenInitializing.java   |  15 +-
 .../hbase/regionserver/TestRegionOpen.java  |   5 +-
 .../regionserver/TestRegionServerAbort.java |  14 +-
 .../regionserver/TestRegionServerHostname.java  |  11 +-
 .../regionserver/TestRegionServerMetrics.java   |  57 --
 .../TestRegionServerReadRequestMetrics.java |  12 +-
 .../TestRegionServerReportForDuty.java  |  15 +-
 .../TestSplitTransactionOnCluster.java  |  16 +-
 .../TestFlushWithThroughputController.java  |   8 +-
 .../security/access/TestNamespaceCommands.java  |  13 +-
 29 files changed, 491 insertions(+), 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7149f999/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ce83838..6b4d4e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -530,6 +530,17 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 }
   }
 
+  // Main run loop. Calls through to the regionserver run loop.
+  @Override
+  public void run() {
+try {
+  super.run();
+} finally {
+  // If on way out, then we are no longer active master.
+  this.activeMaster = false;
+}
+  }
+
   // return the actual infoPort, -1 means disable info server.
   private int putUpJettyServer() throws IOException {
 if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
@@ -604,9 +615,8 @@ public class HMaster extends HRegionServer implements 
MasterServices {
*/
   @Override
   protected void waitForMasterActive(){
-boolean tablesOnMaster = BaseLoadBalancer.tablesOnMaster(conf);
-while (!(tablesOnMaster && activeMaster)
-&& !isStopped() && !isAborted()) {
+boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(conf);
+while (!(tablesOnMaster && activeMaster) && !isStopped() && !isAborted()) {
   

hbase git commit: HBASE-18271 Shade netty Purge mention of netty-all; ADDENDUM for sparktest

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 473446719 -> 5d2c3ddf5


HBASE-18271 Shade netty Purge mention of netty-all; ADDENDUM for sparktest


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5d2c3ddf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5d2c3ddf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5d2c3ddf

Branch: refs/heads/master
Commit: 5d2c3ddf5238fa0b48b8aa4fb99108eaf69f16aa
Parents: 4734467
Author: Michael Stack 
Authored: Wed Aug 16 10:04:33 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 10:04:56 2017 -0700

--
 hbase-spark/pom.xml | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5d2c3ddf/hbase-spark/pom.xml
--
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index 8137b53..d294835 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -559,9 +559,11 @@
   test
 
 
-  
--Xmx1536m -XX:ReservedCodeCacheSize=512m
-
+  
+
org.apache.hadoop.hbase.shaded.
+  
+   -Xmx1536m -XX:ReservedCodeCacheSize=512m
+  
   false
 
   



hbase git commit: HBASE-18271 Shade netty Purge mention of netty-all; ADDENDUM for sparktest

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/2 [created] c0b263051


HBASE-18271 Shade netty Purge mention of netty-all; ADDENDUM for sparktest


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c0b26305
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c0b26305
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c0b26305

Branch: refs/heads/2
Commit: c0b263051b64eacddb213d0f2db8410d4d63b7c4
Parents: 7149f99
Author: Michael Stack 
Authored: Wed Aug 16 10:04:33 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 10:05:37 2017 -0700

--
 hbase-spark/pom.xml | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c0b26305/hbase-spark/pom.xml
--
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index 1fd458c..9efca4f 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -563,9 +563,11 @@
   test
 
 
-  
--Xmx1536m -XX:ReservedCodeCacheSize=512m
-
+  
+
org.apache.hadoop.hbase.shaded.
+  
+   -Xmx1536m -XX:ReservedCodeCacheSize=512m
+  
   false
 
   



[hbase] Git Push Summary

2017-08-16 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/2 [deleted] c0b263051


hbase git commit: HBASE-18251 Remove unnecessary traversing to the first and last keys in the CellSet (Toshihoro Suzuki)

2017-08-16 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 a49d43bfb -> 6255dc700


HBASE-18251 Remove unnecessary traversing to the first and last keys in
the CellSet (Toshihoro Suzuki)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6255dc70
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6255dc70
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6255dc70

Branch: refs/heads/branch-1
Commit: 6255dc70014ad3faf70ae43a786e8d4f5b79dd41
Parents: a49d43b
Author: Ramkrishna 
Authored: Wed Aug 16 11:05:43 2017 +0530
Committer: Andrew Purtell 
Committed: Wed Aug 16 10:50:58 2017 -0700

--
 .../org/apache/hadoop/hbase/regionserver/CellSkipListSet.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6255dc70/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java
index 4c3ab50..916a428 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java
@@ -123,11 +123,11 @@ public class CellSkipListSet implements 
NavigableSet {
   }
 
   public Cell first() {
-return this.delegatee.get(this.delegatee.firstKey());
+return this.delegatee.firstEntry().getValue();
   }
 
   public Cell last() {
-return this.delegatee.get(this.delegatee.lastKey());
+return this.delegatee.lastEntry().getValue();
   }
 
   public boolean add(Cell e) {



[11/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/3feb87b0/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 2661dc1..b3ae957 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -101,11 +101,11 @@ public final class MasterProtos {
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName();
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder();
 
 // required .hbase.pb.ColumnFamilySchema column_families = 2;
 /**
@@ -193,11 +193,11 @@ public final class MasterProtos {
   break;
 }
 case 10: {
-  
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder 
subBuilder = null;
+  
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder 
subBuilder = null;
   if (((bitField0_ & 0x0001) == 0x0001)) {
 subBuilder = tableName_.toBuilder();
   }
-  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER,
 extensionRegistry);
+  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER,
 extensionRegistry);
   if (subBuilder != null) {
 subBuilder.mergeFrom(tableName_);
 tableName_ = subBuilder.buildPartial();
@@ -270,7 +270,7 @@ public final class MasterProtos {
 private int bitField0_;
 // required .hbase.pb.TableName table_name = 1;
 public static final int TABLE_NAME_FIELD_NUMBER = 1;
-private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_;
+private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_;
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
@@ -280,13 +280,13 @@ public final class MasterProtos {
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName() {
+public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName() {
   return tableName_;
 }
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-public 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
+public 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
   return tableName_;
 }
 
@@ -345,7 +345,7 @@ public final class MasterProtos {
 }
 
 private void initFields() {
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
   columnFamilies_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance();
   nonceGroup_ = 0L;
   nonce_ = 0L;
@@ -599,7 +599,7 @@ public final class MasterProtos {
   public Builder clear() {
 super.clear();
 if (tableNameBuilder_ == null) {
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
 } else {
   tableNameBuilder_.clear();
 }
@@ -738,9 +738,9 @@ public final class MasterProtos {
   private int bitField0_;
 
   // required .hbase.pb.TableName table_name = 1;
-  private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
   private com.google.protobuf.SingleFieldBuilder<
-  org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, 
org.apache.hadoop.hbase.proto

[05/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/ea5789a7/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index d40c1f7..6a8cd7d 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -3814,11 +3814,11 @@ public final class MasterProcedureProtos {
 /**
  * optional .hbase.pb.TableName table_name = 3;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName();
 /**
  * optional .hbase.pb.TableName table_name = 3;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder();
 
 // optional .hbase.pb.TableSchema table_schema = 4;
 /**
@@ -3929,11 +3929,11 @@ public final class MasterProcedureProtos {
   break;
 }
 case 26: {
-  
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder 
subBuilder = null;
+  
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder 
subBuilder = null;
   if (((bitField0_ & 0x0004) == 0x0004)) {
 subBuilder = tableName_.toBuilder();
   }
-  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER,
 extensionRegistry);
+  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER,
 extensionRegistry);
   if (subBuilder != null) {
 subBuilder.mergeFrom(tableName_);
 tableName_ = subBuilder.buildPartial();
@@ -4045,7 +4045,7 @@ public final class MasterProcedureProtos {
 
 // optional .hbase.pb.TableName table_name = 3;
 public static final int TABLE_NAME_FIELD_NUMBER = 3;
-private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_;
+private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_;
 /**
  * optional .hbase.pb.TableName table_name = 3;
  */
@@ -4055,13 +4055,13 @@ public final class MasterProcedureProtos {
 /**
  * optional .hbase.pb.TableName table_name = 3;
  */
-public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName() {
+public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName() {
   return tableName_;
 }
 /**
  * optional .hbase.pb.TableName table_name = 3;
  */
-public 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
+public 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
   return tableName_;
 }
 
@@ -4126,7 +4126,7 @@ public final class MasterProcedureProtos {
 private void initFields() {
   userInfo_ = 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
   preserveSplits_ = false;
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
   tableSchema_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
   regionInfo_ = java.util.Collections.emptyList();
 }
@@ -4416,7 +4416,7 @@ public final class MasterProcedureProtos {
 preserveSplits_ = false;
 bitField0_ = (bitField0_ & ~0x0002);
 if (tableNameBuilder_ == null) {
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
 } else {
   tableNameBuilder_.clear();
 }
@@ -4760,9 +4760,9 @@ public final class MasterProcedureProtos {
   }
 
   // optional .hbase.pb.TableName table_name = 3;
-  private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();

[01/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 6255dc700 -> 3feb87b00
  refs/heads/branch-1.4 3552c70b5 -> ea5789a7f


http://git-wip-us.apache.org/repos/asf/hbase/blob/ea5789a7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index c216995..32f62ee 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -60,6 +60,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStor
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
@@ -185,7 +186,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Repor
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
@@ -903,7 +903,7 @@ public class MasterRpcServices extends RSRpcServices
   List tableNameList = null;
   if (req.getTableNamesCount() > 0) {
 tableNameList = new ArrayList(req.getTableNamesCount());
-for (HBaseProtos.TableName tableNamePB: req.getTableNamesList()) {
+for (TableProtos.TableName tableNamePB: req.getTableNamesList()) {
   tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ea5789a7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
index 0f1f495..2f769f3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.master;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
-import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 
 /**
  * Watch the current snapshot under process

http://git-wip-us.apache.org/repos/asf/hbase/blob/ea5789a7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
index ff59ea1..6f8bcd4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.master.SnapshotSentinel;
 import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
-import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;

http://git-wip-us.apache.org/repos/asf/hbase/bl

[12/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/3feb87b0/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index d40c1f7..6a8cd7d 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -3814,11 +3814,11 @@ public final class MasterProcedureProtos {
 /**
  * optional .hbase.pb.TableName table_name = 3;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName();
 /**
  * optional .hbase.pb.TableName table_name = 3;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder();
 
 // optional .hbase.pb.TableSchema table_schema = 4;
 /**
@@ -3929,11 +3929,11 @@ public final class MasterProcedureProtos {
   break;
 }
 case 26: {
-  
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder 
subBuilder = null;
+  
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder 
subBuilder = null;
   if (((bitField0_ & 0x0004) == 0x0004)) {
 subBuilder = tableName_.toBuilder();
   }
-  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER,
 extensionRegistry);
+  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER,
 extensionRegistry);
   if (subBuilder != null) {
 subBuilder.mergeFrom(tableName_);
 tableName_ = subBuilder.buildPartial();
@@ -4045,7 +4045,7 @@ public final class MasterProcedureProtos {
 
 // optional .hbase.pb.TableName table_name = 3;
 public static final int TABLE_NAME_FIELD_NUMBER = 3;
-private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_;
+private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_;
 /**
  * optional .hbase.pb.TableName table_name = 3;
  */
@@ -4055,13 +4055,13 @@ public final class MasterProcedureProtos {
 /**
  * optional .hbase.pb.TableName table_name = 3;
  */
-public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName() {
+public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName() {
   return tableName_;
 }
 /**
  * optional .hbase.pb.TableName table_name = 3;
  */
-public 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
+public 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
   return tableName_;
 }
 
@@ -4126,7 +4126,7 @@ public final class MasterProcedureProtos {
 private void initFields() {
   userInfo_ = 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
   preserveSplits_ = false;
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
   tableSchema_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
   regionInfo_ = java.util.Collections.emptyList();
 }
@@ -4416,7 +4416,7 @@ public final class MasterProcedureProtos {
 preserveSplits_ = false;
 bitField0_ = (bitField0_ & ~0x0002);
 if (tableNameBuilder_ == null) {
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
 } else {
   tableNameBuilder_.clear();
 }
@@ -4760,9 +4760,9 @@ public final class MasterProcedureProtos {
   }
 
   // optional .hbase.pb.TableName table_name = 3;
-  private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();

[07/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

- Refactor TableName into its own proto module and fix up users

- Move SnapshotDescription from Snapshot.proto back to HBase.proto

- Restore FastLongHistogram and TestFastLongHistogram;
  deprecate FastLongHistogram

- Move DeleteQueryTracker back to o.a.h.h.regionserver


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ea5789a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ea5789a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ea5789a7

Branch: refs/heads/branch-1.4
Commit: ea5789a7f2953c4cbfa6ce29132e882186d81799
Parents: 3552c70
Author: Andrew Purtell 
Authored: Wed Aug 16 10:52:18 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Aug 16 11:33:49 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Admin.java   |3 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |5 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java |9 +-
 .../ClientSnapshotDescriptionUtils.java |2 +-
 .../snapshot/CorruptedSnapshotException.java|2 +-
 .../hbase/snapshot/HBaseSnapshotException.java  |2 +-
 .../snapshot/RestoreSnapshotException.java  |2 +-
 .../snapshot/SnapshotCreationException.java |2 +-
 .../snapshot/SnapshotDoesNotExistException.java |2 +-
 .../hbase/snapshot/SnapshotExistsException.java |2 +-
 .../hbase/client/TestSnapshotFromAdmin.java |2 +-
 .../hadoop/hbase/util/FastLongHistogram.java|  319 +
 .../hbase/util/TestFastLongHistogram.java   |  132 +
 hbase-protocol/pom.xml  |1 +
 .../protobuf/generated/AccessControlProtos.java |   92 +-
 .../hbase/protobuf/generated/ClientProtos.java  |2 +-
 .../hbase/protobuf/generated/HBaseProtos.java   | 5603 ++
 .../generated/MasterProcedureProtos.java|  586 +-
 .../hbase/protobuf/generated/MasterProtos.java  | 1689 +++---
 .../generated/SecureBulkLoadProtos.java |   94 +-
 .../protobuf/generated/SnapshotProtos.java  | 1551 +
 .../hbase/protobuf/generated/TableProtos.java   |  607 ++
 .../hbase/protobuf/generated/WALProtos.java |  145 +-
 .../protobuf/generated/ZooKeeperProtos.java |  171 +-
 .../src/main/protobuf/AccessControl.proto   |2 +-
 hbase-protocol/src/main/protobuf/HBase.proto|   28 +-
 hbase-protocol/src/main/protobuf/Master.proto   |1 +
 .../src/main/protobuf/MasterProcedure.proto |1 +
 .../src/main/protobuf/SecureBulkLoad.proto  |1 +
 hbase-protocol/src/main/protobuf/Snapshot.proto |   19 -
 hbase-protocol/src/main/protobuf/Table.proto|   33 +
 hbase-protocol/src/main/protobuf/WAL.proto  |1 +
 .../src/main/protobuf/ZooKeeper.proto   |1 +
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|2 +-
 .../BaseMasterAndRegionObserver.java|2 +-
 .../hbase/coprocessor/BaseMasterObserver.java   |2 +-
 .../hbase/coprocessor/BaseRegionObserver.java   |2 +-
 .../hbase/coprocessor/MasterObserver.java   |2 +-
 .../hbase/coprocessor/RegionObserver.java   |2 +-
 .../mapreduce/TableSnapshotInputFormatImpl.java |2 +-
 .../hbase/master/MasterCoprocessorHost.java |2 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |4 +-
 .../hadoop/hbase/master/SnapshotSentinel.java   |2 +-
 .../master/snapshot/CloneSnapshotHandler.java   |2 +-
 .../snapshot/DisabledTableSnapshotHandler.java  |2 +-
 .../snapshot/EnabledTableSnapshotHandler.java   |2 +-
 .../master/snapshot/MasterSnapshotVerifier.java |2 +-
 .../master/snapshot/RestoreSnapshotHandler.java |2 +-
 .../hbase/master/snapshot/SnapshotManager.java  |4 +-
 .../master/snapshot/TakeSnapshotHandler.java|2 +-
 .../hbase/regionserver/DeleteTracker.java   |  101 +
 .../hadoop/hbase/regionserver/HRegion.java  |2 +-
 .../regionserver/RegionCoprocessorHost.java |2 +-
 .../CompactionScanQueryMatcher.java |1 +
 .../querymatcher/DeleteTracker.java |  101 -
 .../DropDeletesCompactionScanQueryMatcher.java  |1 +
 .../querymatcher/LegacyScanQueryMatcher.java|3 +-
 .../MajorCompactionScanQueryMatcher.java|1 +
 .../MinorCompactionScanQueryMatcher.java|1 +
 .../NormalUserScanQueryMatcher.java |1 +
 .../querymatcher/ScanDeleteTracker.java |1 +
 .../querymatcher/ScanQueryMatcher.java  |3 +-
 .../StripeCompactionScanQueryMatcher.java   |1 +
 .../snapshot/FlushSnapshotSubprocedure.java |2 +-
 .../snapshot/RegionServerSnapshotManager.java   |2 +-
 .../hbase/security/access/AccessController.java |2 +-
 .../visibility/VisibilityController.java|2 +-
 .../hadoop/hbase/snapshot/CreateSnapshot.java   |2 +-
 .../hadoop/hbase/snapshot/ExportSnapshot.java   |2 +-
 .../hba

[03/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/ea5789a7/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
index 538e031..8521ba8 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
@@ -2517,11 +2517,11 @@ public final class SecureBulkLoadProtos {
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName();
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder();
   }
   /**
* Protobuf type {@code hbase.pb.PrepareBulkLoadRequest}
@@ -2575,11 +2575,11 @@ public final class SecureBulkLoadProtos {
   break;
 }
 case 10: {
-  
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder 
subBuilder = null;
+  
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder 
subBuilder = null;
   if (((bitField0_ & 0x0001) == 0x0001)) {
 subBuilder = tableName_.toBuilder();
   }
-  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER,
 extensionRegistry);
+  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER,
 extensionRegistry);
   if (subBuilder != null) {
 subBuilder.mergeFrom(tableName_);
 tableName_ = subBuilder.buildPartial();
@@ -2629,7 +2629,7 @@ public final class SecureBulkLoadProtos {
 private int bitField0_;
 // required .hbase.pb.TableName table_name = 1;
 public static final int TABLE_NAME_FIELD_NUMBER = 1;
-private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_;
+private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_;
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
@@ -2639,18 +2639,18 @@ public final class SecureBulkLoadProtos {
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName() {
+public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName() {
   return tableName_;
 }
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-public 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
+public 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
   return tableName_;
 }
 
 private void initFields() {
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -2844,7 +2844,7 @@ public final class SecureBulkLoadProtos {
   public Builder clear() {
 super.clear();
 if (tableNameBuilder_ == null) {
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
 } else {
   tableNameBuilder_.clear();
 }
@@ -2940,9 +2940,9 @@ public final class SecureBulkLoadProtos {
   private int bitField0_;
 
   // required .hbase.pb.TableName table_name = 1;
-  private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
   private com.google.protobuf.SingleFieldBuilder<
-  org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBu

[09/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/3feb87b0/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
new file mode 100644
index 000..9507d01
--- /dev/null
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
@@ -0,0 +1,607 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Table.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class TableProtos {
+  private TableProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface TableNameOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes namespace = 1;
+/**
+ * required bytes namespace = 1;
+ */
+boolean hasNamespace();
+/**
+ * required bytes namespace = 1;
+ */
+com.google.protobuf.ByteString getNamespace();
+
+// required bytes qualifier = 2;
+/**
+ * required bytes qualifier = 2;
+ */
+boolean hasQualifier();
+/**
+ * required bytes qualifier = 2;
+ */
+com.google.protobuf.ByteString getQualifier();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.TableName}
+   *
+   * 
+   **
+   * Table Name
+   * 
+   */
+  public static final class TableName extends
+  com.google.protobuf.GeneratedMessage
+  implements TableNameOrBuilder {
+// Use TableName.newBuilder() to construct.
+private TableName(com.google.protobuf.GeneratedMessage.Builder builder) 
{
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private TableName(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final TableName defaultInstance;
+public static TableName getDefaultInstance() {
+  return defaultInstance;
+}
+
+public TableName getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private TableName(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  namespace_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  qualifier_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.internal_static_hbase_pb_TableName_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.internal_static_hbase_pb_TableName_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.class, 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() {
+  public TableName parsePartialFrom(
+  com.google.protobuf.CodedInputStream input,
+  com.google.protobuf.ExtensionRegistryL

[10/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/3feb87b0/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
index 538e031..8521ba8 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
@@ -2517,11 +2517,11 @@ public final class SecureBulkLoadProtos {
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName();
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder();
   }
   /**
* Protobuf type {@code hbase.pb.PrepareBulkLoadRequest}
@@ -2575,11 +2575,11 @@ public final class SecureBulkLoadProtos {
   break;
 }
 case 10: {
-  
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder 
subBuilder = null;
+  
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder 
subBuilder = null;
   if (((bitField0_ & 0x0001) == 0x0001)) {
 subBuilder = tableName_.toBuilder();
   }
-  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER,
 extensionRegistry);
+  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER,
 extensionRegistry);
   if (subBuilder != null) {
 subBuilder.mergeFrom(tableName_);
 tableName_ = subBuilder.buildPartial();
@@ -2629,7 +2629,7 @@ public final class SecureBulkLoadProtos {
 private int bitField0_;
 // required .hbase.pb.TableName table_name = 1;
 public static final int TABLE_NAME_FIELD_NUMBER = 1;
-private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_;
+private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_;
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
@@ -2639,18 +2639,18 @@ public final class SecureBulkLoadProtos {
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName() {
+public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName() {
   return tableName_;
 }
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-public 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
+public 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
   return tableName_;
 }
 
 private void initFields() {
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -2844,7 +2844,7 @@ public final class SecureBulkLoadProtos {
   public Builder clear() {
 super.clear();
 if (tableNameBuilder_ == null) {
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
 } else {
   tableNameBuilder_.clear();
 }
@@ -2940,9 +2940,9 @@ public final class SecureBulkLoadProtos {
   private int bitField0_;
 
   // required .hbase.pb.TableName table_name = 1;
-  private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
   private com.google.protobuf.SingleFieldBuilder<
-  org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBu

[02/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/ea5789a7/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
new file mode 100644
index 000..9507d01
--- /dev/null
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableProtos.java
@@ -0,0 +1,607 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Table.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class TableProtos {
+  private TableProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface TableNameOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes namespace = 1;
+/**
+ * required bytes namespace = 1;
+ */
+boolean hasNamespace();
+/**
+ * required bytes namespace = 1;
+ */
+com.google.protobuf.ByteString getNamespace();
+
+// required bytes qualifier = 2;
+/**
+ * required bytes qualifier = 2;
+ */
+boolean hasQualifier();
+/**
+ * required bytes qualifier = 2;
+ */
+com.google.protobuf.ByteString getQualifier();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.TableName}
+   *
+   * 
+   **
+   * Table Name
+   * 
+   */
+  public static final class TableName extends
+  com.google.protobuf.GeneratedMessage
+  implements TableNameOrBuilder {
+// Use TableName.newBuilder() to construct.
+private TableName(com.google.protobuf.GeneratedMessage.Builder builder) 
{
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private TableName(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final TableName defaultInstance;
+public static TableName getDefaultInstance() {
+  return defaultInstance;
+}
+
+public TableName getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private TableName(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  namespace_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  qualifier_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.internal_static_hbase_pb_TableName_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.internal_static_hbase_pb_TableName_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.class, 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() {
+  public TableName parsePartialFrom(
+  com.google.protobuf.CodedInputStream input,
+  com.google.protobuf.ExtensionRegistryL

[14/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

- Refactor TableName into its own proto module and fix up users

- Move SnapshotDescription from Snapshot.proto back to HBase.proto

- Restore FastLongHistogram and TestFastLongHistogram;
  deprecate FastLongHistogram

- Move DeleteQueryTracker back to o.a.h.h.regionserver


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3feb87b0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3feb87b0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3feb87b0

Branch: refs/heads/branch-1
Commit: 3feb87b0054d23d8452b4fa557e9400781c3440d
Parents: 6255dc7
Author: Andrew Purtell 
Authored: Wed Aug 16 10:52:18 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Aug 16 11:34:18 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Admin.java   |3 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |5 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java |9 +-
 .../ClientSnapshotDescriptionUtils.java |2 +-
 .../snapshot/CorruptedSnapshotException.java|2 +-
 .../hbase/snapshot/HBaseSnapshotException.java  |2 +-
 .../snapshot/RestoreSnapshotException.java  |2 +-
 .../snapshot/SnapshotCreationException.java |2 +-
 .../snapshot/SnapshotDoesNotExistException.java |2 +-
 .../hbase/snapshot/SnapshotExistsException.java |2 +-
 .../hbase/client/TestSnapshotFromAdmin.java |2 +-
 .../hadoop/hbase/util/FastLongHistogram.java|  319 +
 .../hbase/util/TestFastLongHistogram.java   |  132 +
 hbase-protocol/pom.xml  |1 +
 .../protobuf/generated/AccessControlProtos.java |   92 +-
 .../hbase/protobuf/generated/ClientProtos.java  |2 +-
 .../hbase/protobuf/generated/HBaseProtos.java   | 5603 ++
 .../generated/MasterProcedureProtos.java|  586 +-
 .../hbase/protobuf/generated/MasterProtos.java  | 1689 +++---
 .../generated/SecureBulkLoadProtos.java |   94 +-
 .../protobuf/generated/SnapshotProtos.java  | 1551 +
 .../hbase/protobuf/generated/TableProtos.java   |  607 ++
 .../hbase/protobuf/generated/WALProtos.java |  145 +-
 .../protobuf/generated/ZooKeeperProtos.java |  171 +-
 .../src/main/protobuf/AccessControl.proto   |2 +-
 hbase-protocol/src/main/protobuf/HBase.proto|   28 +-
 hbase-protocol/src/main/protobuf/Master.proto   |1 +
 .../src/main/protobuf/MasterProcedure.proto |1 +
 .../src/main/protobuf/SecureBulkLoad.proto  |1 +
 hbase-protocol/src/main/protobuf/Snapshot.proto |   19 -
 hbase-protocol/src/main/protobuf/Table.proto|   33 +
 hbase-protocol/src/main/protobuf/WAL.proto  |1 +
 .../src/main/protobuf/ZooKeeper.proto   |1 +
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|2 +-
 .../BaseMasterAndRegionObserver.java|2 +-
 .../hbase/coprocessor/BaseMasterObserver.java   |2 +-
 .../hbase/coprocessor/BaseRegionObserver.java   |2 +-
 .../hbase/coprocessor/MasterObserver.java   |2 +-
 .../hbase/coprocessor/RegionObserver.java   |2 +-
 .../mapreduce/TableSnapshotInputFormatImpl.java |2 +-
 .../hbase/master/MasterCoprocessorHost.java |2 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |4 +-
 .../hadoop/hbase/master/SnapshotSentinel.java   |2 +-
 .../master/snapshot/CloneSnapshotHandler.java   |2 +-
 .../snapshot/DisabledTableSnapshotHandler.java  |2 +-
 .../snapshot/EnabledTableSnapshotHandler.java   |2 +-
 .../master/snapshot/MasterSnapshotVerifier.java |2 +-
 .../master/snapshot/RestoreSnapshotHandler.java |2 +-
 .../hbase/master/snapshot/SnapshotManager.java  |4 +-
 .../master/snapshot/TakeSnapshotHandler.java|2 +-
 .../hbase/regionserver/DeleteTracker.java   |  101 +
 .../hadoop/hbase/regionserver/HRegion.java  |2 +-
 .../regionserver/RegionCoprocessorHost.java |2 +-
 .../CompactionScanQueryMatcher.java |1 +
 .../querymatcher/DeleteTracker.java |  101 -
 .../DropDeletesCompactionScanQueryMatcher.java  |1 +
 .../querymatcher/LegacyScanQueryMatcher.java|3 +-
 .../MajorCompactionScanQueryMatcher.java|1 +
 .../MinorCompactionScanQueryMatcher.java|1 +
 .../NormalUserScanQueryMatcher.java |1 +
 .../querymatcher/ScanDeleteTracker.java |1 +
 .../querymatcher/ScanQueryMatcher.java  |3 +-
 .../StripeCompactionScanQueryMatcher.java   |1 +
 .../snapshot/FlushSnapshotSubprocedure.java |2 +-
 .../snapshot/RegionServerSnapshotManager.java   |2 +-
 .../hbase/security/access/AccessController.java |2 +-
 .../visibility/VisibilityController.java|2 +-
 .../hadoop/hbase/snapshot/CreateSnapshot.java   |2 +-
 .../hadoop/hbase/snapshot/ExportSnapshot.java   |2 +-
 .../hbase

[04/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/ea5789a7/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 2661dc1..b3ae957 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -101,11 +101,11 @@ public final class MasterProtos {
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName();
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder();
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder();
 
 // required .hbase.pb.ColumnFamilySchema column_families = 2;
 /**
@@ -193,11 +193,11 @@ public final class MasterProtos {
   break;
 }
 case 10: {
-  
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder 
subBuilder = null;
+  
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder 
subBuilder = null;
   if (((bitField0_ & 0x0001) == 0x0001)) {
 subBuilder = tableName_.toBuilder();
   }
-  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER,
 extensionRegistry);
+  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER,
 extensionRegistry);
   if (subBuilder != null) {
 subBuilder.mergeFrom(tableName_);
 tableName_ = subBuilder.buildPartial();
@@ -270,7 +270,7 @@ public final class MasterProtos {
 private int bitField0_;
 // required .hbase.pb.TableName table_name = 1;
 public static final int TABLE_NAME_FIELD_NUMBER = 1;
-private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_;
+private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_;
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
@@ -280,13 +280,13 @@ public final class MasterProtos {
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName() {
+public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName() {
   return tableName_;
 }
 /**
  * required .hbase.pb.TableName table_name = 1;
  */
-public 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
+public 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder() {
   return tableName_;
 }
 
@@ -345,7 +345,7 @@ public final class MasterProtos {
 }
 
 private void initFields() {
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
   columnFamilies_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance();
   nonceGroup_ = 0L;
   nonce_ = 0L;
@@ -599,7 +599,7 @@ public final class MasterProtos {
   public Builder clear() {
 super.clear();
 if (tableNameBuilder_ == null) {
-  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
 } else {
   tableNameBuilder_.clear();
 }
@@ -738,9 +738,9 @@ public final class MasterProtos {
   private int bitField0_;
 
   // required .hbase.pb.TableName table_name = 1;
-  private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+  private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
tableName_ = 
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
   private com.google.protobuf.SingleFieldBuilder<
-  org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, 
org.apache.hadoop.hbase.proto

[06/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/ea5789a7/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
index 3c4fb61..b4c6c04 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
@@ -266,53 +266,123 @@ public final class HBaseProtos {
 // @@protoc_insertion_point(enum_scope:hbase.pb.TimeUnit)
   }
 
-  public interface TableNameOrBuilder
+  public interface TableSchemaOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
-// required bytes namespace = 1;
+// optional .hbase.pb.TableName table_name = 1;
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+boolean hasTableName();
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName();
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder();
+
+// repeated .hbase.pb.BytesBytesPair attributes = 2;
+/**
+ * repeated .hbase.pb.BytesBytesPair attributes = 2;
+ */
+
java.util.List
 
+getAttributesList();
+/**
+ * repeated .hbase.pb.BytesBytesPair attributes = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair 
getAttributes(int index);
+/**
+ * repeated .hbase.pb.BytesBytesPair attributes = 2;
+ */
+int getAttributesCount();
+/**
+ * repeated .hbase.pb.BytesBytesPair attributes = 2;
+ */
+java.util.List 
+getAttributesOrBuilderList();
+/**
+ * repeated .hbase.pb.BytesBytesPair attributes = 2;
+ */
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder 
getAttributesOrBuilder(
+int index);
+
+// repeated .hbase.pb.ColumnFamilySchema column_families = 3;
+/**
+ * repeated .hbase.pb.ColumnFamilySchema column_families = 3;
+ */
+
java.util.List
 
+getColumnFamiliesList();
+/**
+ * repeated .hbase.pb.ColumnFamilySchema column_families = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema 
getColumnFamilies(int index);
 /**
- * required bytes namespace = 1;
+ * repeated .hbase.pb.ColumnFamilySchema column_families = 3;
+ */
+int getColumnFamiliesCount();
+/**
+ * repeated .hbase.pb.ColumnFamilySchema column_families = 3;
  */
-boolean hasNamespace();
+java.util.List
 
+getColumnFamiliesOrBuilderList();
 /**
- * required bytes namespace = 1;
+ * repeated .hbase.pb.ColumnFamilySchema column_families = 3;
  */
-com.google.protobuf.ByteString getNamespace();
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder
 getColumnFamiliesOrBuilder(
+int index);
 
-// required bytes qualifier = 2;
+// repeated .hbase.pb.NameStringPair configuration = 4;
+/**
+ * repeated .hbase.pb.NameStringPair configuration = 4;
+ */
+
java.util.List
 
+getConfigurationList();
 /**
- * required bytes qualifier = 2;
+ * repeated .hbase.pb.NameStringPair configuration = 4;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair 
getConfiguration(int index);
+/**
+ * repeated .hbase.pb.NameStringPair configuration = 4;
+ */
+int getConfigurationCount();
+/**
+ * repeated .hbase.pb.NameStringPair configuration = 4;
  */
-boolean hasQualifier();
+java.util.List 
+getConfigurationOrBuilderList();
 /**
- * required bytes qualifier = 2;
+ * repeated .hbase.pb.NameStringPair configuration = 4;
  */
-com.google.protobuf.ByteString getQualifier();
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder 
getConfigurationOrBuilder(
+int index);
   }
   /**
-   * Protobuf type {@code hbase.pb.TableName}
+   * Protobuf type {@code hbase.pb.TableSchema}
*
* 
**
-   * Table Name
+   * Table Schema
+   * Inspired by the rest TableSchema
* 
*/
-  public static final class TableName extends
+  public static final class TableSchema extends
   com.google.protobuf.GeneratedMessage
-  implements TableNameOrBuilder {
-// Use TableName.newBuilder() to construct.
-private TableName(com.google.protobuf.GeneratedMessage.Builder builder) 
{
+  implements TableSchemaOrBuilder {
+// Use TableSchema.newBuilder() to construct.
+private TableSchema(com.google.pro

[13/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/3feb87b0/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
index 3c4fb61..b4c6c04 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
@@ -266,53 +266,123 @@ public final class HBaseProtos {
 // @@protoc_insertion_point(enum_scope:hbase.pb.TimeUnit)
   }
 
-  public interface TableNameOrBuilder
+  public interface TableSchemaOrBuilder
   extends com.google.protobuf.MessageOrBuilder {
 
-// required bytes namespace = 1;
+// optional .hbase.pb.TableName table_name = 1;
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+boolean hasTableName();
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName 
getTableName();
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder 
getTableNameOrBuilder();
+
+// repeated .hbase.pb.BytesBytesPair attributes = 2;
+/**
+ * repeated .hbase.pb.BytesBytesPair attributes = 2;
+ */
+
java.util.List
 
+getAttributesList();
+/**
+ * repeated .hbase.pb.BytesBytesPair attributes = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair 
getAttributes(int index);
+/**
+ * repeated .hbase.pb.BytesBytesPair attributes = 2;
+ */
+int getAttributesCount();
+/**
+ * repeated .hbase.pb.BytesBytesPair attributes = 2;
+ */
+java.util.List 
+getAttributesOrBuilderList();
+/**
+ * repeated .hbase.pb.BytesBytesPair attributes = 2;
+ */
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder 
getAttributesOrBuilder(
+int index);
+
+// repeated .hbase.pb.ColumnFamilySchema column_families = 3;
+/**
+ * repeated .hbase.pb.ColumnFamilySchema column_families = 3;
+ */
+
java.util.List
 
+getColumnFamiliesList();
+/**
+ * repeated .hbase.pb.ColumnFamilySchema column_families = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema 
getColumnFamilies(int index);
 /**
- * required bytes namespace = 1;
+ * repeated .hbase.pb.ColumnFamilySchema column_families = 3;
+ */
+int getColumnFamiliesCount();
+/**
+ * repeated .hbase.pb.ColumnFamilySchema column_families = 3;
  */
-boolean hasNamespace();
+java.util.List
 
+getColumnFamiliesOrBuilderList();
 /**
- * required bytes namespace = 1;
+ * repeated .hbase.pb.ColumnFamilySchema column_families = 3;
  */
-com.google.protobuf.ByteString getNamespace();
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder
 getColumnFamiliesOrBuilder(
+int index);
 
-// required bytes qualifier = 2;
+// repeated .hbase.pb.NameStringPair configuration = 4;
+/**
+ * repeated .hbase.pb.NameStringPair configuration = 4;
+ */
+
java.util.List
 
+getConfigurationList();
 /**
- * required bytes qualifier = 2;
+ * repeated .hbase.pb.NameStringPair configuration = 4;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair 
getConfiguration(int index);
+/**
+ * repeated .hbase.pb.NameStringPair configuration = 4;
+ */
+int getConfigurationCount();
+/**
+ * repeated .hbase.pb.NameStringPair configuration = 4;
  */
-boolean hasQualifier();
+java.util.List 
+getConfigurationOrBuilderList();
 /**
- * required bytes qualifier = 2;
+ * repeated .hbase.pb.NameStringPair configuration = 4;
  */
-com.google.protobuf.ByteString getQualifier();
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder 
getConfigurationOrBuilder(
+int index);
   }
   /**
-   * Protobuf type {@code hbase.pb.TableName}
+   * Protobuf type {@code hbase.pb.TableSchema}
*
* 
**
-   * Table Name
+   * Table Schema
+   * Inspired by the rest TableSchema
* 
*/
-  public static final class TableName extends
+  public static final class TableSchema extends
   com.google.protobuf.GeneratedMessage
-  implements TableNameOrBuilder {
-// Use TableName.newBuilder() to construct.
-private TableName(com.google.protobuf.GeneratedMessage.Builder builder) 
{
+  implements TableSchemaOrBuilder {
+// Use TableSchema.newBuilder() to construct.
+private TableSchema(com.google.pro

[08/14] hbase git commit: HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/3feb87b0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index c216995..32f62ee 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -60,6 +60,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStor
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
@@ -185,7 +186,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Repor
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
@@ -903,7 +903,7 @@ public class MasterRpcServices extends RSRpcServices
   List tableNameList = null;
   if (req.getTableNamesCount() > 0) {
 tableNameList = new ArrayList(req.getTableNamesCount());
-for (HBaseProtos.TableName tableNamePB: req.getTableNamesList()) {
+for (TableProtos.TableName tableNamePB: req.getTableNamesList()) {
   tableNameList.add(ProtobufUtil.toTableName(tableNamePB));
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3feb87b0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
index 0f1f495..2f769f3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SnapshotSentinel.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.master;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
-import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 
 /**
  * Watch the current snapshot under process

http://git-wip-us.apache.org/repos/asf/hbase/blob/3feb87b0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
index ff59ea1..6f8bcd4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.master.SnapshotSentinel;
 import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
-import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;

http://git-wip-us.apache.org/repos/asf/hbase/blob/3feb87b0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
-

[hbase] Git Push Summary

2017-08-16 Thread stack
Repository: hbase
Updated Tags:  refs/tags/2.0.0-alpha-2RC0 [created] c8cbba635


svn commit: r21196 - /dev/hbase/hbase-2.0.0-alpha-2RC0/

2017-08-16 Thread stack
Author: stack
Date: Wed Aug 16 21:00:31 2017
New Revision: 21196

Log:
Added 2.0.0-alpha-2RC0

Added:
dev/hbase/hbase-2.0.0-alpha-2RC0/
dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz   (with 
props)
dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.asc
dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.md5
dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.mds
dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.sha
dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-src.tar.gz   (with 
props)
dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-src.tar.gz.asc
dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-src.tar.gz.md5
dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-src.tar.gz.mds
dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-src.tar.gz.sha

Added: dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.asc
==
--- dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.asc (added)
+++ dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.asc Wed Aug 
16 21:00:31 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v2
+
+iQIcBAABCAAGBQJZlKcwAAoJEJgWx/yKzJPSPGwP/3eZVnV4YSMhXTeui/4ZuYYt
+00IZpaMp2jLDUhv9qin6JBOJWQsryU1Jfwu0y/9sF1sByJ/WrpDbq6rtrfVyh/sx
+XrJmaP0Q8C2yC8xPj/DujaR2iu0itRVhhX98XzpNnhIvJmg9OGiiCsg3Xhd6v2F3
+re/fJUkcZOYDoTCwl3LJo00qGcdUnoN02p7r+3M4hXblleHvhYEEMj4Bk9Oxv/JW
++FXESx9LZuaIq7jAEPpxG+2oK0LgmSBmRJlEnY+npvtRmFCB4CZYUjUur+TwtMHg
+bBs3vLPPpPltdPbF99C4yBYo6/YHyo137FJwnKN0OPJj3RdlEsAAPYS4FSsOGXwh
+8T2MYlIdSK4Syfu0AXg37NPhRl8qd+/pp+HtyyvOMdWbSCSBR5O+59NV0OrnY5ef
+kdD5pP++QZVn6L6fXMYvwesRWzGBslChfhsIcW359fK8vy6cTIzdHqPMrFK0A96U
+3btfQzDckvmQfcVwiy/rdMhXyNtznTr1Bm+5sRSD8i4eu1h5CRepuWjm3Gm1EtDe
+guMBHVORtxoE+3RVNWE0KClV5JWDKomwbkc3hjoeAi0UJ+ZLXwsf03ERQSBppyN2
+pRnoqv/mDiBc9aWiWSNVwk1KtUiPw/kpoMmfvryrOQSfz9w84ey/OQX84+XihiBa
+3xf/bTwsQkBFPg4JXHUh
+=7UdA
+-END PGP SIGNATURE-

Added: dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.md5
==
--- dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.md5 (added)
+++ dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.md5 Wed Aug 
16 21:00:31 2017
@@ -0,0 +1 @@
+hbase-2.0.0-alpha2-bin.tar.gz: 2F 6E 58 C8 37 D3 51 99  FC 8F DC C2 D8 63 22 D5

Added: dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.mds
==
--- dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.mds (added)
+++ dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.mds Wed Aug 
16 21:00:31 2017
@@ -0,0 +1,17 @@
+hbase-2.0.0-alpha2-bin.tar.gz:MD5 = 2F 6E 58 C8 37 D3 51 99  FC 8F DC C2 D8
+63 22 D5
+hbase-2.0.0-alpha2-bin.tar.gz:   SHA1 = 8412 A3DD 0907 EE86 81DA  337A 1285 
9F2C
+CDC1 8F42
+hbase-2.0.0-alpha2-bin.tar.gz: RMD160 = 050A 028B 0AE4 832D 2E09  CC47 C037 
D4B1
+979F 88EF
+hbase-2.0.0-alpha2-bin.tar.gz: SHA224 = E31466A5 C7E79B03 0EF62953 89BF4FCB
+59207F0D 2D3460AB 27159F0B
+hbase-2.0.0-alpha2-bin.tar.gz: SHA256 = C3553FCD 6CFA8909 5916681A C46D18C2
+11ADD29E D1CA1A94 3F64BCD9 E7C75FD9
+hbase-2.0.0-alpha2-bin.tar.gz: SHA384 = 4E5CA4AE C24B6FB9 0A81D36D F77E116B
+23F683D3 BBB2EC8B E1FDCAF0 A23F4C3E
+E494CF70 57D4DB60 2CF469D0 7089A6F7
+hbase-2.0.0-alpha2-bin.tar.gz: SHA512 = DD42B9D3 AE001E2B 0BC93691 0160986A
+073A3EFD FFC8EDDC DD94404D B13E97DB
+555EB425 D121DEDF C457E0B9 FE2C0FB2
+E8CCACBB 405E5A93 41D9469A D62A73CB

Added: dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.sha
==
--- dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.sha (added)
+++ dev/hbase/hbase-2.0.0-alpha-2RC0/hbase-2.0.0-alpha2-bin.tar.gz.sha Wed Aug 
16 21:00:31 2017
@@ -0,0 +1,4 @@
+hbase-2.0.0-alpha2-bin.tar.gz: DD42B9D3 AE001E2B 0BC93691 0160986A 073A3EFD
+   FFC8EDDC DD94404D B13E97DB 555EB425 D121DEDF
+   C457E0B9 FE2C0FB2 

hbase git commit: HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

2017-08-16 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 7149f9997 -> 4057552ed


HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4057552e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4057552e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4057552e

Branch: refs/heads/branch-2
Commit: 4057552ed6192c7b2e61035636dc8c7a897917c6
Parents: 7149f99
Author: Xiang Li 
Authored: Thu Aug 17 00:39:35 2017 +0800
Committer: Jerry He 
Committed: Wed Aug 16 14:45:33 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Append.java  |  9 +++---
 .../org/apache/hadoop/hbase/client/Delete.java  | 31 
 2 files changed, 10 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4057552e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 2bd0860..6947313 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -134,11 +134,10 @@ public class Append extends Mutation {
   public Append add(final Cell cell) {
 // Presume it is KeyValue for now.
 byte [] family = CellUtil.cloneFamily(cell);
-List list = this.familyMap.get(family);
-if (list == null) {
-  list  = new ArrayList<>(1);
-  this.familyMap.put(family, list);
-}
+
+// Get cell list for the family
+List list = getCellList(family);
+
 // find where the new entry should be placed in the List
 list.add(cell);
 return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4057552e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index bf5241c..66b6cfc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -180,11 +180,7 @@ public class Delete extends Mutation implements 
Comparable {
 " doesn't match the original one " +  Bytes.toStringBinary(this.row));
 }
 byte [] family = CellUtil.cloneFamily(kv);
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(kv);
 return this;
   }
@@ -216,11 +212,8 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-} else if(!list.isEmpty()) {
+List list = getCellList(family);
+if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
@@ -236,11 +229,7 @@ public class Delete extends Mutation implements 
Comparable {
* @return this for invocation chaining
*/
   public Delete addFamilyVersion(final byte [] family, final long timestamp) {
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
 return this;
@@ -269,11 +258,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
 return this;
@@ -304,11 +289,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCell

hbase git commit: HBASE-18587 Fix flaky TestFileIOEngine

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 3feb87b00 -> 1f1ab8c87


HBASE-18587 Fix flaky TestFileIOEngine

This short circuits reads and writes with 0 length and also removes flakiness 
in TestFileIOEngine

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1f1ab8c8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1f1ab8c8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1f1ab8c8

Branch: refs/heads/branch-1
Commit: 1f1ab8c873b193675766969df83db91213137d72
Parents: 3feb87b
Author: Zach York 
Authored: Thu Aug 10 16:55:28 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 14:50:11 2017 -0700

--
 .../hbase/io/hfile/bucket/FileIOEngine.java |   8 +-
 .../hbase/io/hfile/bucket/TestFileIOEngine.java | 122 +++
 2 files changed, 79 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1f1ab8c8/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index a7d6956..3419587 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -102,7 +102,10 @@ public class FileIOEngine implements IOEngine {
*/
   @Override
   public int read(ByteBuffer dstBuffer, long offset) throws IOException {
-return accessFile(readAccessor, dstBuffer, offset);
+if (dstBuffer.remaining() != 0) {
+  return accessFile(readAccessor, dstBuffer, offset);
+}
+return 0;
   }
 
   /**
@@ -113,6 +116,9 @@ public class FileIOEngine implements IOEngine {
*/
   @Override
   public void write(ByteBuffer srcBuffer, long offset) throws IOException {
+if (!srcBuffer.hasRemaining()) {
+  return;
+}
 accessFile(writeAccessor, srcBuffer, offset);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1f1ab8c8/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
index 8c71c09..a03818b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
@@ -18,7 +18,7 @@
  */
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertArrayEquals;
 
 import java.io.File;
 import java.io.IOException;
@@ -27,6 +27,8 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -35,61 +37,81 @@ import org.junit.experimental.categories.Category;
  */
 @Category(SmallTests.class)
 public class TestFileIOEngine {
-  @Test
-  public void testFileIOEngine() throws IOException {
-long totalCapacity = 6 * 1024 * 1024; // 6 MB
-String[] filePaths = { "testFileIOEngine1", "testFileIOEngine2",
-"testFileIOEngine3" };
-long sizePerFile = totalCapacity / filePaths.length; // 2 MB per File
-List boundaryStartPositions = new ArrayList();
+
+  private static final long TOTAL_CAPACITY = 6 * 1024 * 1024; // 6 MB
+  private static final String[] FILE_PATHS = {"testFileIOEngine1", 
"testFileIOEngine2",
+  "testFileIOEngine3"};
+  private static final long SIZE_PER_FILE = TOTAL_CAPACITY / 
FILE_PATHS.length; // 2 MB per File
+  private final static List boundaryStartPositions = new 
ArrayList();
+  private final static List boundaryStopPositions = new 
ArrayList();
+
+  private FileIOEngine fileIOEngine;
+
+  static {
 boundaryStartPositions.add(0L);
-for (int i = 1; i < filePaths.length; i++) {
-  boundaryStartPositions.add(sizePerFile * i - 1);
-  boundaryStartPositions.add(sizePerFile * i);
-  boundaryStartPositions.add(sizePerFile * i + 1);
+for (int i = 1; i < FILE_PATHS.length; i++) {
+  boundaryStartPositions.add(SIZE_PER_FILE * i - 1);
+  boundaryStartPositions.add(SIZE_PER_FILE * i);
+  boundaryStartPositions.add(SIZE_PER_FILE * i + 1);
 }
-List boundaryStopPositions = new ArrayList();
-for (int i = 1; i < filePaths.length; i++) {
-  

hbase git commit: HBASE-18587 Fix flaky TestFileIOEngine

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 ea5789a7f -> 255319a0e


HBASE-18587 Fix flaky TestFileIOEngine

This short circuits reads and writes with 0 length and also removes flakiness 
in TestFileIOEngine

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/255319a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/255319a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/255319a0

Branch: refs/heads/branch-1.4
Commit: 255319a0e680ce15c8179c54743e9ad4d343cadb
Parents: ea5789a
Author: Zach York 
Authored: Thu Aug 10 16:55:28 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 14:50:48 2017 -0700

--
 .../hbase/io/hfile/bucket/FileIOEngine.java |   8 +-
 .../hbase/io/hfile/bucket/TestFileIOEngine.java | 122 +++
 2 files changed, 79 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/255319a0/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index a7d6956..3419587 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -102,7 +102,10 @@ public class FileIOEngine implements IOEngine {
*/
   @Override
   public int read(ByteBuffer dstBuffer, long offset) throws IOException {
-return accessFile(readAccessor, dstBuffer, offset);
+if (dstBuffer.remaining() != 0) {
+  return accessFile(readAccessor, dstBuffer, offset);
+}
+return 0;
   }
 
   /**
@@ -113,6 +116,9 @@ public class FileIOEngine implements IOEngine {
*/
   @Override
   public void write(ByteBuffer srcBuffer, long offset) throws IOException {
+if (!srcBuffer.hasRemaining()) {
+  return;
+}
 accessFile(writeAccessor, srcBuffer, offset);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/255319a0/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
index 8c71c09..a03818b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
@@ -18,7 +18,7 @@
  */
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertArrayEquals;
 
 import java.io.File;
 import java.io.IOException;
@@ -27,6 +27,8 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -35,61 +37,81 @@ import org.junit.experimental.categories.Category;
  */
 @Category(SmallTests.class)
 public class TestFileIOEngine {
-  @Test
-  public void testFileIOEngine() throws IOException {
-long totalCapacity = 6 * 1024 * 1024; // 6 MB
-String[] filePaths = { "testFileIOEngine1", "testFileIOEngine2",
-"testFileIOEngine3" };
-long sizePerFile = totalCapacity / filePaths.length; // 2 MB per File
-List boundaryStartPositions = new ArrayList();
+
+  private static final long TOTAL_CAPACITY = 6 * 1024 * 1024; // 6 MB
+  private static final String[] FILE_PATHS = {"testFileIOEngine1", 
"testFileIOEngine2",
+  "testFileIOEngine3"};
+  private static final long SIZE_PER_FILE = TOTAL_CAPACITY / 
FILE_PATHS.length; // 2 MB per File
+  private final static List boundaryStartPositions = new 
ArrayList();
+  private final static List boundaryStopPositions = new 
ArrayList();
+
+  private FileIOEngine fileIOEngine;
+
+  static {
 boundaryStartPositions.add(0L);
-for (int i = 1; i < filePaths.length; i++) {
-  boundaryStartPositions.add(sizePerFile * i - 1);
-  boundaryStartPositions.add(sizePerFile * i);
-  boundaryStartPositions.add(sizePerFile * i + 1);
+for (int i = 1; i < FILE_PATHS.length; i++) {
+  boundaryStartPositions.add(SIZE_PER_FILE * i - 1);
+  boundaryStartPositions.add(SIZE_PER_FILE * i);
+  boundaryStartPositions.add(SIZE_PER_FILE * i + 1);
 }
-List boundaryStopPositions = new ArrayList();
-for (int i = 1; i < filePaths.length; i++) {

hbase git commit: HBASE-18587 Fix flaky TestFileIOEngine

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 7800fa152 -> 2fd2b3f9b


HBASE-18587 Fix flaky TestFileIOEngine

This short circuits reads and writes with 0 length and also removes flakiness 
in TestFileIOEngine

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2fd2b3f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2fd2b3f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2fd2b3f9

Branch: refs/heads/branch-1.3
Commit: 2fd2b3f9b86509c37a7ed22eb93bf1c493711739
Parents: 7800fa1
Author: Zach York 
Authored: Thu Aug 10 16:55:28 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 16 14:51:11 2017 -0700

--
 .../hbase/io/hfile/bucket/FileIOEngine.java |   8 +-
 .../hbase/io/hfile/bucket/TestFileIOEngine.java | 122 +++
 2 files changed, 79 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2fd2b3f9/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index a7d6956..3419587 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -102,7 +102,10 @@ public class FileIOEngine implements IOEngine {
*/
   @Override
   public int read(ByteBuffer dstBuffer, long offset) throws IOException {
-return accessFile(readAccessor, dstBuffer, offset);
+if (dstBuffer.remaining() != 0) {
+  return accessFile(readAccessor, dstBuffer, offset);
+}
+return 0;
   }
 
   /**
@@ -113,6 +116,9 @@ public class FileIOEngine implements IOEngine {
*/
   @Override
   public void write(ByteBuffer srcBuffer, long offset) throws IOException {
+if (!srcBuffer.hasRemaining()) {
+  return;
+}
 accessFile(writeAccessor, srcBuffer, offset);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2fd2b3f9/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
index 8c71c09..a03818b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
@@ -18,7 +18,7 @@
  */
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertArrayEquals;
 
 import java.io.File;
 import java.io.IOException;
@@ -27,6 +27,8 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -35,61 +37,81 @@ import org.junit.experimental.categories.Category;
  */
 @Category(SmallTests.class)
 public class TestFileIOEngine {
-  @Test
-  public void testFileIOEngine() throws IOException {
-long totalCapacity = 6 * 1024 * 1024; // 6 MB
-String[] filePaths = { "testFileIOEngine1", "testFileIOEngine2",
-"testFileIOEngine3" };
-long sizePerFile = totalCapacity / filePaths.length; // 2 MB per File
-List boundaryStartPositions = new ArrayList();
+
+  private static final long TOTAL_CAPACITY = 6 * 1024 * 1024; // 6 MB
+  private static final String[] FILE_PATHS = {"testFileIOEngine1", 
"testFileIOEngine2",
+  "testFileIOEngine3"};
+  private static final long SIZE_PER_FILE = TOTAL_CAPACITY / 
FILE_PATHS.length; // 2 MB per File
+  private final static List boundaryStartPositions = new 
ArrayList();
+  private final static List boundaryStopPositions = new 
ArrayList();
+
+  private FileIOEngine fileIOEngine;
+
+  static {
 boundaryStartPositions.add(0L);
-for (int i = 1; i < filePaths.length; i++) {
-  boundaryStartPositions.add(sizePerFile * i - 1);
-  boundaryStartPositions.add(sizePerFile * i);
-  boundaryStartPositions.add(sizePerFile * i + 1);
+for (int i = 1; i < FILE_PATHS.length; i++) {
+  boundaryStartPositions.add(SIZE_PER_FILE * i - 1);
+  boundaryStartPositions.add(SIZE_PER_FILE * i);
+  boundaryStartPositions.add(SIZE_PER_FILE * i + 1);
 }
-List boundaryStopPositions = new ArrayList();
-for (int i = 1; i < filePaths.length; i++) {

hbase git commit: HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

2017-08-16 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 5d2c3ddf5 -> 4c3a64db1


HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4c3a64db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4c3a64db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4c3a64db

Branch: refs/heads/master
Commit: 4c3a64db13b086ad3d8a6ffa1be8ba2f5a24719c
Parents: 5d2c3dd
Author: Xiang Li 
Authored: Thu Aug 17 00:39:35 2017 +0800
Committer: Jerry He 
Committed: Wed Aug 16 14:50:46 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Append.java  |  9 +++---
 .../org/apache/hadoop/hbase/client/Delete.java  | 31 
 2 files changed, 10 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4c3a64db/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 2bd0860..6947313 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -134,11 +134,10 @@ public class Append extends Mutation {
   public Append add(final Cell cell) {
 // Presume it is KeyValue for now.
 byte [] family = CellUtil.cloneFamily(cell);
-List list = this.familyMap.get(family);
-if (list == null) {
-  list  = new ArrayList<>(1);
-  this.familyMap.put(family, list);
-}
+
+// Get cell list for the family
+List list = getCellList(family);
+
 // find where the new entry should be placed in the List
 list.add(cell);
 return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4c3a64db/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index bf5241c..66b6cfc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -180,11 +180,7 @@ public class Delete extends Mutation implements 
Comparable {
 " doesn't match the original one " +  Bytes.toStringBinary(this.row));
 }
 byte [] family = CellUtil.cloneFamily(kv);
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(kv);
 return this;
   }
@@ -216,11 +212,8 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-} else if(!list.isEmpty()) {
+List list = getCellList(family);
+if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
@@ -236,11 +229,7 @@ public class Delete extends Mutation implements 
Comparable {
* @return this for invocation chaining
*/
   public Delete addFamilyVersion(final byte [] family, final long timestamp) {
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
 return this;
@@ -269,11 +258,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
 return this;
@@ -304,11 +289,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList

hbase git commit: HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

2017-08-16 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 1f1ab8c87 -> 54aaf6bfb


HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/54aaf6bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/54aaf6bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/54aaf6bf

Branch: refs/heads/branch-1
Commit: 54aaf6bfb72815c343885f7ec40edbe20e4bc394
Parents: 1f1ab8c
Author: Xiang Li 
Authored: Thu Aug 17 00:39:35 2017 +0800
Committer: Jerry He 
Committed: Wed Aug 16 15:12:56 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Append.java  |  9 +++---
 .../org/apache/hadoop/hbase/client/Delete.java  | 31 
 2 files changed, 10 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/54aaf6bf/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 0741a0d..efc958d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -122,11 +122,10 @@ public class Append extends Mutation {
   public Append add(final Cell cell) {
 // Presume it is KeyValue for now.
 byte [] family = CellUtil.cloneFamily(cell);
-List list = this.familyMap.get(family);
-if (list == null) {
-  list  = new ArrayList();
-  this.familyMap.put(family, list);
-}
+
+// Get cell list for the family
+List list = getCellList(family);
+
 // find where the new entry should be placed in the List
 list.add(cell);
 return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/54aaf6bf/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 8682eae..e45ae59 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -170,11 +170,7 @@ public class Delete extends Mutation implements 
Comparable {
 " doesn't match the original one " +  Bytes.toStringBinary(this.row));
 }
 byte [] family = CellUtil.cloneFamily(kv);
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(kv);
 return this;
   }
@@ -236,11 +232,8 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-} else if(!list.isEmpty()) {
+List list = getCellList(family);
+if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
@@ -269,11 +262,7 @@ public class Delete extends Mutation implements 
Comparable {
* @return this for invocation chaining
*/
   public Delete addFamilyVersion(final byte [] family, final long timestamp) {
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
 return this;
@@ -328,11 +317,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
 return this;
@@ -391,11 +376,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);

hbase git commit: HBASE-18125 shell disregards spaces at the end of a split key in a split file

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 4c3a64db1 -> a17ed0356


HBASE-18125 shell disregards spaces at the end of a split key in a split file

Signed-off-by: fchenxi 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a17ed035
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a17ed035
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a17ed035

Branch: refs/heads/master
Commit: a17ed0356f12c6f7a682557d92cc401b7a4297f1
Parents: 4c3a64d
Author: Chenxi Tong 
Authored: Wed Aug 9 12:00:53 2017 +0800
Committer: Michael Stack 
Committed: Wed Aug 16 15:14:12 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a17ed035/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 4b0de5f..2aacd7f 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -386,7 +386,7 @@ module Hbase
   end
   arg[SPLITS] = []
   File.foreach(splits_file) do |line|
-arg[SPLITS].push(line.strip)
+arg[SPLITS].push(line.chomp)
   end
   htd.setValue(SPLITS_FILE, arg[SPLITS_FILE])
 end



hbase git commit: HBASE-18125 shell disregards spaces at the end of a split key in a split file

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 4057552ed -> 242ccd588


HBASE-18125 shell disregards spaces at the end of a split key in a split file

Signed-off-by: fchenxi 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/242ccd58
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/242ccd58
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/242ccd58

Branch: refs/heads/branch-2
Commit: 242ccd5881c23ae55f95e1db54dbcf1b0934f521
Parents: 4057552
Author: Chenxi Tong 
Authored: Wed Aug 9 12:00:53 2017 +0800
Committer: Michael Stack 
Committed: Wed Aug 16 15:14:39 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/242ccd58/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 4b0de5f..2aacd7f 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -386,7 +386,7 @@ module Hbase
   end
   arg[SPLITS] = []
   File.foreach(splits_file) do |line|
-arg[SPLITS].push(line.strip)
+arg[SPLITS].push(line.chomp)
   end
   htd.setValue(SPLITS_FILE, arg[SPLITS_FILE])
 end



hbase git commit: HBASE-18125 shell disregards spaces at the end of a split key in a split file

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 54aaf6bfb -> f1c6f1642


HBASE-18125 shell disregards spaces at the end of a split key in a split file

Signed-off-by: fchenxi 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f1c6f164
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f1c6f164
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f1c6f164

Branch: refs/heads/branch-1
Commit: f1c6f164288f1a6b81aa17428a7ce795ff088d31
Parents: 54aaf6b
Author: Chenxi Tong 
Authored: Wed Aug 9 12:00:53 2017 +0800
Committer: Michael Stack 
Committed: Wed Aug 16 15:16:32 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f1c6f164/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 502abe2..b225e1a 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -357,7 +357,7 @@ module Hbase
   end
   arg[SPLITS] = []
   File.foreach(splits_file) do |line|
-arg[SPLITS].push(line.strip())
+arg[SPLITS].push(line.chomp)
   end
   htd.setValue(SPLITS_FILE, arg[SPLITS_FILE])
 end



hbase git commit: HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

2017-08-16 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 255319a0e -> 60f88a970


HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/60f88a97
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/60f88a97
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/60f88a97

Branch: refs/heads/branch-1.4
Commit: 60f88a97084656ad18e4b890d311cb110d4b6fa8
Parents: 255319a
Author: Xiang Li 
Authored: Thu Aug 17 00:39:35 2017 +0800
Committer: Jerry He 
Committed: Wed Aug 16 15:13:43 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Append.java  |  9 +++---
 .../org/apache/hadoop/hbase/client/Delete.java  | 31 
 2 files changed, 10 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/60f88a97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 0741a0d..efc958d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -122,11 +122,10 @@ public class Append extends Mutation {
   public Append add(final Cell cell) {
 // Presume it is KeyValue for now.
 byte [] family = CellUtil.cloneFamily(cell);
-List list = this.familyMap.get(family);
-if (list == null) {
-  list  = new ArrayList();
-  this.familyMap.put(family, list);
-}
+
+// Get cell list for the family
+List list = getCellList(family);
+
 // find where the new entry should be placed in the List
 list.add(cell);
 return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/60f88a97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 8682eae..e45ae59 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -170,11 +170,7 @@ public class Delete extends Mutation implements 
Comparable {
 " doesn't match the original one " +  Bytes.toStringBinary(this.row));
 }
 byte [] family = CellUtil.cloneFamily(kv);
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(kv);
 return this;
   }
@@ -236,11 +232,8 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-} else if(!list.isEmpty()) {
+List list = getCellList(family);
+if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
@@ -269,11 +262,7 @@ public class Delete extends Mutation implements 
Comparable {
* @return this for invocation chaining
*/
   public Delete addFamilyVersion(final byte [] family, final long timestamp) {
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
 return this;
@@ -328,11 +317,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
 return this;
@@ -391,11 +376,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);

hbase git commit: HBASE-18125 shell disregards spaces at the end of a split key in a split file

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 60f88a970 -> f936b6305


HBASE-18125 shell disregards spaces at the end of a split key in a split file

Signed-off-by: fchenxi 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f936b630
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f936b630
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f936b630

Branch: refs/heads/branch-1.4
Commit: f936b630530f7ed528c4f6b2feec80efa258abe3
Parents: 60f88a9
Author: Chenxi Tong 
Authored: Wed Aug 9 12:00:53 2017 +0800
Committer: Michael Stack 
Committed: Wed Aug 16 15:17:40 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f936b630/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 502abe2..b225e1a 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -357,7 +357,7 @@ module Hbase
   end
   arg[SPLITS] = []
   File.foreach(splits_file) do |line|
-arg[SPLITS].push(line.strip())
+arg[SPLITS].push(line.chomp)
   end
   htd.setValue(SPLITS_FILE, arg[SPLITS_FILE])
 end



hbase git commit: HBASE-18125 shell disregards spaces at the end of a split key in a split file

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 2fd2b3f9b -> 1fdc24764


HBASE-18125 shell disregards spaces at the end of a split key in a split file

Signed-off-by: fchenxi 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1fdc2476
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1fdc2476
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1fdc2476

Branch: refs/heads/branch-1.3
Commit: 1fdc2476464e78f76b2c6e5f32fc253c51e26d63
Parents: 2fd2b3f
Author: Chenxi Tong 
Authored: Wed Aug 9 12:00:53 2017 +0800
Committer: Michael Stack 
Committed: Wed Aug 16 15:18:10 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1fdc2476/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 7950ca6..e6dd672 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -336,7 +336,7 @@ module Hbase
   end
   arg[SPLITS] = []
   File.foreach(splits_file) do |line|
-arg[SPLITS].push(line.strip())
+arg[SPLITS].push(line.chomp)
   end
   htd.setValue(SPLITS_FILE, arg[SPLITS_FILE])
 end



hbase git commit: HBASE-18125 shell disregards spaces at the end of a split key in a split file

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 d61254b90 -> 4f639e7d8


HBASE-18125 shell disregards spaces at the end of a split key in a split file

Signed-off-by: fchenxi 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4f639e7d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4f639e7d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4f639e7d

Branch: refs/heads/branch-1.2
Commit: 4f639e7d8a3ed10851cf2529e5f83493b40ea08e
Parents: d61254b
Author: Chenxi Tong 
Authored: Wed Aug 9 12:00:53 2017 +0800
Committer: Michael Stack 
Committed: Wed Aug 16 15:18:34 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4f639e7d/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index a755a32..a9e49c5 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -304,7 +304,7 @@ module Hbase
   end
   arg[SPLITS] = []
   File.foreach(splits_file) do |line|
-arg[SPLITS].push(line.strip())
+arg[SPLITS].push(line.chomp)
   end
   htd.setValue(SPLITS_FILE, arg[SPLITS_FILE])
 end



hbase git commit: HBASE-18125 shell disregards spaces at the end of a split key in a split file

2017-08-16 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 2550236ac -> a566f3352


HBASE-18125 shell disregards spaces at the end of a split key in a split file

Signed-off-by: fchenxi 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a566f335
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a566f335
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a566f335

Branch: refs/heads/branch-1.1
Commit: a566f33528d0f93f1de5418c6d7b45ab48a95373
Parents: 2550236
Author: Chenxi Tong 
Authored: Wed Aug 9 12:00:53 2017 +0800
Committer: Michael Stack 
Committed: Wed Aug 16 15:18:56 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a566f335/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 32885d9..b0252f8 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -268,7 +268,7 @@ module Hbase
   end
   arg[SPLITS] = []
   File.foreach(splits_file) do |line|
-arg[SPLITS].push(line.strip())
+arg[SPLITS].push(line.chomp)
   end
   htd.setValue(SPLITS_FILE, arg[SPLITS_FILE])
 end



[2/2] hbase git commit: Amend HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
Amend HBASE-18431 Mitigate compatibility concerns between branch-1.3 and 
branch-1.4

Fix NameError in admin.rb


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6a1a9742
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6a1a9742
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6a1a9742

Branch: refs/heads/branch-1
Commit: 6a1a97422ccdc1a6b9aeadda9a2eaf89ac548870
Parents: f1c6f16
Author: Andrew Purtell 
Authored: Wed Aug 16 18:05:55 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Aug 16 18:06:34 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6a1a9742/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index b225e1a..0727f7c 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -24,7 +24,7 @@ java_import org.apache.hadoop.hbase.util.RegionSplitter
 java_import org.apache.hadoop.hbase.util.Bytes
 java_import org.apache.hadoop.hbase.ServerName
 java_import org.apache.hadoop.hbase.TableName
-java_import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos::SnapshotDescription
+java_import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos::SnapshotDescription
 
 # Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
 



[1/2] hbase git commit: Amend HBASE-18431 Mitigate compatibility concerns between branch-1.3 and branch-1.4

2017-08-16 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 f1c6f1642 -> 6a1a97422
  refs/heads/branch-1.4 f936b6305 -> ee87edd89


Amend HBASE-18431 Mitigate compatibility concerns between branch-1.3 and 
branch-1.4

Fix NameError in admin.rb


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ee87edd8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ee87edd8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ee87edd8

Branch: refs/heads/branch-1.4
Commit: ee87edd89049cd215b7df91a4a0b98941a47ba63
Parents: f936b63
Author: Andrew Purtell 
Authored: Wed Aug 16 18:05:55 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Aug 16 18:06:00 2017 -0700

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ee87edd8/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index b225e1a..0727f7c 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -24,7 +24,7 @@ java_import org.apache.hadoop.hbase.util.RegionSplitter
 java_import org.apache.hadoop.hbase.util.Bytes
 java_import org.apache.hadoop.hbase.ServerName
 java_import org.apache.hadoop.hbase.TableName
-java_import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos::SnapshotDescription
+java_import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos::SnapshotDescription
 
 # Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
 



hbase git commit: HBASE-18608 AsyncConnection should return AsyncAdmin interface instead of the implemenation

2017-08-16 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master a17ed0356 -> 092dc6de8


HBASE-18608 AsyncConnection should return AsyncAdmin interface instead of the 
implemenation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/092dc6de
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/092dc6de
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/092dc6de

Branch: refs/heads/master
Commit: 092dc6de8483eea1b4e0d960cf22e65359379da1
Parents: a17ed03
Author: Guanghao Zhang 
Authored: Wed Aug 16 18:00:53 2017 +0800
Committer: Guanghao Zhang 
Committed: Thu Aug 17 09:47:39 2017 +0800

--
 .../hadoop/hbase/client/AsyncAdminBuilder.java  | 16 
 .../hadoop/hbase/client/AsyncAdminBuilderBase.java  | 12 ++--
 .../apache/hadoop/hbase/client/AsyncConnection.java |  4 ++--
 .../hadoop/hbase/client/AsyncConnectionImpl.java| 12 ++--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  2 +-
 .../hadoop/hbase/client/TestAsyncAdminBuilder.java  |  6 +++---
 6 files changed, 26 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/092dc6de/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
index d706949..fb0aefd 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
@@ -29,7 +29,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
  * create a new AsyncAdmin instance.
  */
 @InterfaceAudience.Public
-public interface AsyncAdminBuilder {
+public interface AsyncAdminBuilder {
 
   /**
* Set timeout for a whole admin operation. Operation timeout and max 
attempt times(or max retry
@@ -39,7 +39,7 @@ public interface AsyncAdminBuilder {
* @param unit
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setOperationTimeout(long timeout, TimeUnit unit);
+  AsyncAdminBuilder setOperationTimeout(long timeout, TimeUnit unit);
 
   /**
* Set timeout for each rpc request.
@@ -47,7 +47,7 @@ public interface AsyncAdminBuilder {
* @param unit
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setRpcTimeout(long timeout, TimeUnit unit);
+  AsyncAdminBuilder setRpcTimeout(long timeout, TimeUnit unit);
 
   /**
* Set the base pause time for retrying. We use an exponential policy to 
generate sleep time when
@@ -56,7 +56,7 @@ public interface AsyncAdminBuilder {
* @param unit
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setRetryPause(long timeout, TimeUnit unit);
+  AsyncAdminBuilder setRetryPause(long timeout, TimeUnit unit);
 
   /**
* Set the max retry times for an admin operation. Usually it is the max 
attempt times minus 1.
@@ -65,7 +65,7 @@ public interface AsyncAdminBuilder {
* @param maxRetries
* @return this for invocation chaining
*/
-  default AsyncAdminBuilder setMaxRetries(int maxRetries) {
+  default AsyncAdminBuilder setMaxRetries(int maxRetries) {
 return setMaxAttempts(retries2Attempts(maxRetries));
   }
 
@@ -76,18 +76,18 @@ public interface AsyncAdminBuilder {
* @param maxAttempts
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setMaxAttempts(int maxAttempts);
+  AsyncAdminBuilder setMaxAttempts(int maxAttempts);
 
   /**
* Set the number of retries that are allowed before we start to log.
* @param startLogErrorsCnt
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setStartLogErrorsCnt(int startLogErrorsCnt);
+  AsyncAdminBuilder setStartLogErrorsCnt(int startLogErrorsCnt);
 
   /**
* Create a {@link AsyncAdmin} instance.
* @return a {@link AsyncAdmin} instance
*/
-  T build();
+  AsyncAdmin build();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/092dc6de/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
index 013e8d7..77ff88d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
@@ -25,7 +25,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
  * Base class for all asynchrono

hbase git commit: HBASE-18608 AsyncConnection should return AsyncAdmin interface instead of the implemenation

2017-08-16 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2 242ccd588 -> c606a565c


HBASE-18608 AsyncConnection should return AsyncAdmin interface instead of the 
implemenation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c606a565
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c606a565
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c606a565

Branch: refs/heads/branch-2
Commit: c606a565c19b98fcf21e4e9c4e301ae6640170c8
Parents: 242ccd5
Author: Guanghao Zhang 
Authored: Wed Aug 16 18:00:53 2017 +0800
Committer: Guanghao Zhang 
Committed: Thu Aug 17 09:56:06 2017 +0800

--
 .../hadoop/hbase/client/AsyncAdminBuilder.java  | 16 
 .../hadoop/hbase/client/AsyncAdminBuilderBase.java  | 12 ++--
 .../apache/hadoop/hbase/client/AsyncConnection.java |  4 ++--
 .../hadoop/hbase/client/AsyncConnectionImpl.java| 12 ++--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  2 +-
 .../hadoop/hbase/client/TestAsyncAdminBuilder.java  |  6 +++---
 6 files changed, 26 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c606a565/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
index d706949..fb0aefd 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
@@ -29,7 +29,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
  * create a new AsyncAdmin instance.
  */
 @InterfaceAudience.Public
-public interface AsyncAdminBuilder {
+public interface AsyncAdminBuilder {
 
   /**
* Set timeout for a whole admin operation. Operation timeout and max 
attempt times(or max retry
@@ -39,7 +39,7 @@ public interface AsyncAdminBuilder {
* @param unit
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setOperationTimeout(long timeout, TimeUnit unit);
+  AsyncAdminBuilder setOperationTimeout(long timeout, TimeUnit unit);
 
   /**
* Set timeout for each rpc request.
@@ -47,7 +47,7 @@ public interface AsyncAdminBuilder {
* @param unit
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setRpcTimeout(long timeout, TimeUnit unit);
+  AsyncAdminBuilder setRpcTimeout(long timeout, TimeUnit unit);
 
   /**
* Set the base pause time for retrying. We use an exponential policy to 
generate sleep time when
@@ -56,7 +56,7 @@ public interface AsyncAdminBuilder {
* @param unit
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setRetryPause(long timeout, TimeUnit unit);
+  AsyncAdminBuilder setRetryPause(long timeout, TimeUnit unit);
 
   /**
* Set the max retry times for an admin operation. Usually it is the max 
attempt times minus 1.
@@ -65,7 +65,7 @@ public interface AsyncAdminBuilder {
* @param maxRetries
* @return this for invocation chaining
*/
-  default AsyncAdminBuilder setMaxRetries(int maxRetries) {
+  default AsyncAdminBuilder setMaxRetries(int maxRetries) {
 return setMaxAttempts(retries2Attempts(maxRetries));
   }
 
@@ -76,18 +76,18 @@ public interface AsyncAdminBuilder {
* @param maxAttempts
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setMaxAttempts(int maxAttempts);
+  AsyncAdminBuilder setMaxAttempts(int maxAttempts);
 
   /**
* Set the number of retries that are allowed before we start to log.
* @param startLogErrorsCnt
* @return this for invocation chaining
*/
-  AsyncAdminBuilder setStartLogErrorsCnt(int startLogErrorsCnt);
+  AsyncAdminBuilder setStartLogErrorsCnt(int startLogErrorsCnt);
 
   /**
* Create a {@link AsyncAdmin} instance.
* @return a {@link AsyncAdmin} instance
*/
-  T build();
+  AsyncAdmin build();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/c606a565/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
index 013e8d7..77ff88d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java
@@ -25,7 +25,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
  * Base class for all asynch