hbase git commit: HBASE-15407 Add SASL support for fan out OutputStream

2016-04-08 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master e450d94a2 -> 6ea499456


HBASE-15407 Add SASL support for fan out OutputStream


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6ea49945
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6ea49945
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6ea49945

Branch: refs/heads/master
Commit: 6ea4994569e05ff44e0fa571e053cef828ab57ed
Parents: e450d94
Author: zhangduo 
Authored: Sun Mar 27 19:01:05 2016 +0800
Committer: zhangduo 
Committed: Fri Apr 8 21:46:47 2016 +0800

--
 .../util/FanOutOneBlockAsyncDFSOutput.java  |   38 +-
 .../FanOutOneBlockAsyncDFSOutputHelper.java |  230 ++--
 .../FanOutOneBlockAsyncDFSOutputSaslHelper.java | 1032 ++
 .../util/TestFanOutOneBlockAsyncDFSOutput.java  |   13 +-
 .../TestSaslFanOutOneBlockAsyncDFSOutput.java   |  192 
 5 files changed, 1385 insertions(+), 120 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6ea49945/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
index b10f180..bdbf865 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
@@ -17,11 +17,26 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import static io.netty.handler.timeout.IdleState.READER_IDLE;
+import static io.netty.handler.timeout.IdleState.WRITER_IDLE;
 import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
 import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
 import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
 import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufAllocator;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.EventLoop;
+import io.netty.channel.SimpleChannelInboundHandler;
+import io.netty.handler.codec.protobuf.ProtobufDecoder;
+import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
+import io.netty.handler.timeout.IdleStateEvent;
+import io.netty.handler.timeout.IdleStateHandler;
+import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.FutureListener;
+import io.netty.util.concurrent.Promise;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -36,6 +51,8 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
+import com.google.common.base.Supplier;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -52,23 +69,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.util.DataChecksum;
 
-import com.google.common.base.Supplier;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.ByteBufAllocator;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.EventLoop;
-import io.netty.channel.SimpleChannelInboundHandler;
-import io.netty.handler.codec.protobuf.ProtobufDecoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-import io.netty.handler.timeout.IdleState;
-import io.netty.handler.timeout.IdleStateEvent;
-import io.netty.handler.timeout.IdleStateHandler;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.FutureListener;
-import io.netty.util.concurrent.Promise;
-
 /**
  * An asynchronous HDFS output stream implementation which fans out data to 
datanode and only
  * supports writing file with only one block.
@@ -278,7 +278,7 @@ public class FanOutOneBlockAsyncDFSOutput implements 
Closeable {
   public void userEventTriggered(ChannelHandlerContext ctx, Object evt) 
throws Exception {
 if (evt instanceof IdleStateEvent) {
   IdleStateEvent e = (IdleStateEvent) evt;
-  if (e.state() == IdleState.READER_IDLE) {
+  if (e.state() == READER_IDLE) {
 failed(ctx.channel(), new Supplier() {
 
   @Override
@@ -286,7 +286,7 @@ public class FanOutOneBlockAsyncDFSOutput implements 
Closeable {

[25/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
index 62cbf17..3c1e791 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/classification/class-use/InterfaceAudience.Private.html
@@ -5159,44 +5159,51 @@ service.
 
 
 
+class 
+DateTieredStoreEngine
+HBASE-15400 This store engine allows us to store data in 
date tiered layout with exponential
+ sizing so that the more recent data has more granularity.
+
+
+
 (package private) class 
 DefaultHeapMemoryTuner
 The default implementation for the HeapMemoryTuner.
 
 
-
+
 class 
 DefaultMemStore
 The MemStore holds in-memory modifications to the 
Store.
 
 
-
+
 (package private) class 
 DefaultStoreFileManager
 Default implementation of StoreFileManager.
 
 
-
+
 class 
 DefaultStoreFlusher
 Default implementation of StoreFlusher.
 
 
-
+
 interface 
 DeleteTracker
 This interface is used for the tracking and enforcement of 
Deletes
  during the course of a Get or Scan operation.
 
 
-
+
 class 
 DelimitedKeyPrefixRegionSplitPolicy
 A custom RegionSplitPolicy implementing a SplitPolicy that 
groups
  rows by a prefix of the row-key with a delimiter.
 
 
-
+
 class 
 ExplicitColumnTracker
 This class is used for the tracking and enforcement of 
columns and numbers
@@ -5204,85 +5211,85 @@ service.
  column qualifiers have been asked for in the query.
 
 
-
+
 class 
 FlushAllStoresPolicy
 A FlushPolicy that always 
flushes all stores for a given region.
 
 
-
+
 class 
 FlushPolicy
 A flush policy determines the stores that need to be 
flushed when flushing a region.
 
 
-
+
 interface 
 FlushRequester
 Request a flush.
 
 
-
+
 interface 
 FlushRequestListener
 Listener which will get notified regarding flush requests 
of regions.
 
 
-
+
 class 
 HeapMemoryManager
 Manages tuning of Heap memory using 
HeapMemoryTuner.
 
 
-
+
 interface 
 HeapMemoryTuner
 Makes the decision regarding proper sizing of the heap 
memory.
 
 
-
+
 class 
 HeapMemStoreLAB
 A memstore-local allocation buffer.
 
 
-
+
 class 
 HMobStore
 The store implementation to save MOBs (medium objects), it 
extends the HStore.
 
 
-
+
 class 
 HRegion 
 
-
+
 class 
 HRegionFileSystem
 View to an on-disk Region.
 
 
-
+
 class 
 HRegionServerCommandLine
 Class responsible for parsing the command line and starting 
the
  RegionServer.
 
 
-
+
 class 
 HStore
 A Store holds a column family in a Region.
 
 
-
+
 class 
 ImmutableSegment
 ImmutableSegment is an abstract class that extends the API 
supported by a Segment,
  and is not needed for a MutableSegment.
 
 
-
+
 class 
 IncreasingToUpperBoundRegionSplitPolicy
 Split size is the number of regions that are on this server 
that all are
@@ -5290,14 +5297,14 @@ service.
  region split size, whichever is smaller.
 
 
-
+
 interface 
 InternalScanner
 Internal scanners differ from client-side scanners in that 
they operate on
  HStoreKeys and byte[] instead of RowResults.
 
 
-
+
 class 
 KeyPrefixRegionSplitPolicy
 A custom RegionSplitPolicy implementing a SplitPolicy that 
groups
@@ -5306,32 +5313,32 @@ service.
  This ensures that a region is not split "inside" a prefix of a row key.
 
 
-
+
 class 
 KeyValueHeap
 Implements a heap merge across any number of 
KeyValueScanners.
 
 
-
+
 interface 
 KeyValueScanner
 Scanner that returns the next KeyValue.
 
 
-
+
 interface 
 LastSequenceId
 Last flushed sequence Ids for the regions and their stores 
on region server
 
 
-
+
 interface 
 LeaseListener
 LeaseListener is an interface meant to be implemented by 
users of the Leases
  class.
 
 
-
+
 class 
 Leases
 Leases
@@ -5340,32 +5347,32 @@ service.
  clients that occasionally send heartbeats.
 
 
-
+
 class 
 LogRoller
 Runs periodically to determine if the WAL should be 
rolled.
 
 
-
+
 class 
 LruHashMap
 The LruHashMap is a memory-aware HashMap with a 
configurable maximum
  memory footprint.
 
 
-
+
 interface 
 MemStore
 The MemStore holds in-memory modifications to the 
Store.
 
 
-
+
 class 
 MemStoreChunkPool
 A pool of HeapMemStoreLAB.Chunk 
instances.
 
 
-
+
 (package private) class 
 MemStoreFlusher
 Thread that flushes cache on request
@@ -5375,36 +5382,36 @@ service.
  sleep time which is invariant.
 
 
-
+
 interface 
 MemStoreLAB
 A memstore-local allocation buffer.
 
 
-
+
 class 
 MemStoreScanner
 This is the scanner for any MemStore implementation, 
derived from MemStore.
 
 
-
+
 class 
 MemStoreSnapshot
 Holds details of the snapshot taken on a MemStore.
 
 
-
+
 class 
 MetricsRegion
 This is the glue between the HReg

[17/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
index 039ccf6..4254a1e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
@@ -152,15 +152,15 @@
 
 
 private RegionCoprocessorEnvironment
-AggregateImplementation.env 
+MultiRowMutationEndpoint.env 
 
 
 private RegionCoprocessorEnvironment
-BaseRowProcessorEndpoint.env 
+AggregateImplementation.env 
 
 
 private RegionCoprocessorEnvironment
-MultiRowMutationEndpoint.env 
+BaseRowProcessorEndpoint.env 
 
 
 
@@ -1897,14 +1897,14 @@
 
 
 void
-DefaultVisibilityLabelServiceImpl.init(RegionCoprocessorEnvironment e) 
-
-
-void
 VisibilityLabelService.init(RegionCoprocessorEnvironment e)
 System calls this after opening of regions.
 
 
+
+void
+DefaultVisibilityLabelServiceImpl.init(RegionCoprocessorEnvironment e) 
+
 
 private void
 VisibilityController.initVisibilityLabelService(RegionCoprocessorEnvironment env) 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/WALCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/WALCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/WALCoprocessorEnvironment.html
index 69f04c6..24c82ac 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/WALCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/WALCoprocessorEnvironment.html
@@ -102,25 +102,32 @@
 
 
 void
-WALObserver.postWALWrite(ObserverContext ctx,
+BaseWALObserver.postWALWrite(ObserverContext ctx,
 HRegionInfo info,
 WALKey logKey,
 WALEdit logEdit)
-Called after a WALEdit
- is writen to WAL.
+Implementers should override this method and leave the 
deprecated version as-is.
 
 
 
 void
-BaseWALObserver.postWALWrite(ObserverContext ctx,
+WALObserver.postWALWrite(ObserverContext ctx,
 HRegionInfo info,
 WALKey logKey,
 WALEdit logEdit)
-Implementers should override this method and leave the 
deprecated version as-is.
+Called after a WALEdit
+ is writen to WAL.
 
 
 
 void
+BaseWALObserver.postWALWrite(ObserverContext ctx,
+HRegionInfo info,
+HLogKey logKey,
+WALEdit logEdit) 
+
+
+void
 WALObserver.postWALWrite(ObserverContext ctx,
 HRegionInfo info,
 HLogKey logKey,
@@ -130,34 +137,34 @@
 
 
 
-
-void
-BaseWALObserver.postWALWrite(ObserverContext ctx,
-HRegionInfo info,
-HLogKey logKey,
-WALEdit logEdit) 
-
 
 boolean
-WALObserver.preWALWrite(ObserverContext ctx,
+BaseWALObserver.preWALWrite(ObserverContext ctx,
   HRegionInfo info,
   WALKey logKey,
   WALEdit logEdit)
-Called before a WALEdit
- is writen to WAL.
+Implementers should override this method and leave the 
deprecated version as-is.
 
 
 
 boolean
-BaseWALObserver.preWALWrite(ObserverContext ctx,
+WALObserver.preWALWrite(ObserverContext ctx,
   HRegionInfo info,
   WALKey logKey,
   WALEdit logEdit)
-Implementers should override this method and leave the 
deprecated version as-is.
+Called before a WALEdit
+ is writen to WAL.
 
 
 
 boolean
+BaseWALObserver.preWALWrite(ObserverContext ctx,
+  HRegionInfo info,
+  HLogKey logKey,
+  WALEdit logEdit) 
+
+
+boolean
 WALObserver.preWALWrite(ObserverContext ctx,
   HRegionInfo info,
   HLogKey logKey,
@@ -167,13 +174,6 @@
 
 
 
-
-boolean
-BaseWALObserver.preWALWrite(ObserverContext ctx,
-  HRegionInfo info,
-  HLogKey logKey,

[14/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockEncodingContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockEncodingContext.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockEncodingContext.html
index de71447..c480add 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockEncodingContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockEncodingContext.html
@@ -181,18 +181,18 @@
 
 
 HFileBlockEncodingContext
+BufferedDataBlockEncoder.newDataBlockEncodingContext(DataBlockEncoding encoding,
+  byte[] header,
+  HFileContext meta) 
+
+
+HFileBlockEncodingContext
 DataBlockEncoder.newDataBlockEncodingContext(DataBlockEncoding encoding,
   byte[] headerBytes,
   HFileContext meta)
 Creates a encoder specific encoding context
 
 
-
-HFileBlockEncodingContext
-BufferedDataBlockEncoder.newDataBlockEncodingContext(DataBlockEncoding encoding,
-  byte[] header,
-  HFileContext meta) 
-
 
 
 
@@ -204,44 +204,44 @@
 
 
 int
-DataBlockEncoder.encode(Cell cell,
+BufferedDataBlockEncoder.encode(Cell cell,
 HFileBlockEncodingContext encodingCtx,
-http://docs.oracle.com/javase/7/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in java.io">DataOutputStream out)
-Encodes a KeyValue.
-
+http://docs.oracle.com/javase/7/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in 
java.io">DataOutputStream out) 
 
 
 int
-BufferedDataBlockEncoder.encode(Cell cell,
+DataBlockEncoder.encode(Cell cell,
 HFileBlockEncodingContext encodingCtx,
-http://docs.oracle.com/javase/7/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in 
java.io">DataOutputStream out) 
+http://docs.oracle.com/javase/7/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in java.io">DataOutputStream out)
+Encodes a KeyValue.
+
 
 
 void
+BufferedDataBlockEncoder.endBlockEncoding(HFileBlockEncodingContext encodingCtx,
+http://docs.oracle.com/javase/7/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in java.io">DataOutputStream out,
+
byte[] uncompressedBytesWithHeader) 
+
+
+void
 DataBlockEncoder.endBlockEncoding(HFileBlockEncodingContext encodingCtx,
 http://docs.oracle.com/javase/7/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in java.io">DataOutputStream out,
 byte[] uncompressedBytesWithHeader)
 Ends encoding for a block of KeyValues.
 
 
-
+
 void
-BufferedDataBlockEncoder.endBlockEncoding(HFileBlockEncodingContext encodingCtx,
-http://docs.oracle.com/javase/7/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in java.io">DataOutputStream out,
-
byte[] uncompressedBytesWithHeader) 
+BufferedDataBlockEncoder.startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx,
+http://docs.oracle.com/javase/7/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in 
java.io">DataOutputStream out) 
 
-
+
 void
 DataBlockEncoder.startBlockEncoding(HFileBlockEncodingContext encodingCtx,
 http://docs.oracle.com/javase/7/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in java.io">DataOutputStream out)
 Starts encoding for a block of KeyValues.
 
 
-
-void
-BufferedDataBlockEncoder.startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx,
-http://docs.oracle.com/javase/7/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in 
java.io">DataOutputStream out) 
-
 
 
 
@@ -271,13 +271,13 @@
 
 
 HFileBlockEncodingContext
-HFileDataBlockEncoderImpl.newDataBlockEncodingContext(byte[] dummyHeader,
-  HFileContext fileContext) 
+NoOpDataBlockEncoder.newDataBlockEncodingContext(byte[] dummyHeader,
+  HFileContext meta) 
 
 
 HFileBlockEncodingContext
-NoOpDataBlockEncoder.newDataBlockEncodingContext(byte[] dummyHeader,
-  HFileContext meta) 
+HFil

[47/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/apidocs/org/apache/hadoop/hbase/util/class-use/Order.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/util/class-use/Order.html 
b/apidocs/org/apache/hadoop/hbase/util/class-use/Order.html
index 5ea1c1e..5532fb3 100644
--- a/apidocs/org/apache/hadoop/hbase/util/class-use/Order.html
+++ b/apidocs/org/apache/hadoop/hbase/util/class-use/Order.html
@@ -108,11 +108,11 @@
 
 
 protected Order
-OrderedBytesBase.order 
+RawBytes.order 
 
 
 protected Order
-RawBytes.order 
+OrderedBytesBase.order 
 
 
 
@@ -125,63 +125,66 @@
 
 
 Order
-FixedLengthWrapper.getOrder() 
+DataType.getOrder()
+Retrieve the sort Order imposed by this data type, 
or null when
+ natural ordering is not preserved.
+
 
 
 Order
-RawByte.getOrder() 
+Union3.getOrder() 
 
 
 Order
-RawInteger.getOrder() 
+RawLong.getOrder() 
 
 
 Order
-RawString.getOrder() 
+RawShort.getOrder() 
 
 
 Order
-RawDouble.getOrder() 
+Struct.getOrder() 
 
 
 Order
-PBType.getOrder() 
+Union2.getOrder() 
 
 
 Order
-Union3.getOrder() 
+FixedLengthWrapper.getOrder() 
 
 
 Order
-Union4.getOrder() 
+RawByte.getOrder() 
 
 
 Order
-OrderedBytesBase.getOrder() 
+RawString.getOrder() 
 
 
 Order
-Union2.getOrder() 
+Union4.getOrder() 
 
 
 Order
-RawShort.getOrder() 
+RawBytes.getOrder() 
 
 
 Order
-RawLong.getOrder() 
+TerminatedWrapper.getOrder() 
 
 
 Order
-TerminatedWrapper.getOrder() 
+OrderedBytesBase.getOrder() 
 
 
 Order
-RawBytes.getOrder() 
+RawInteger.getOrder() 
 
 
 Order
-Struct.getOrder() 
+RawDouble.getOrder() 
 
 
 Order
@@ -189,10 +192,7 @@
 
 
 Order
-DataType.getOrder()
-Retrieve the sort Order imposed by this data type, 
or null when
- natural ordering is not preserved.
-
+PBType.getOrder() 
 
 
 



[32/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
index 3a8659a..8656b1e 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
@@ -533,75 +533,67 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 HTableDescriptor[]
-HBaseAdmin.deleteTables(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern)
+Admin.deleteTables(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern)
 Delete tables matching the passed in pattern and wait on 
completion.
 
 
 
 HTableDescriptor[]
-Admin.deleteTables(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern)
+HBaseAdmin.deleteTables(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern)
 Delete tables matching the passed in pattern and wait on 
completion.
 
 
 
 HTableDescriptor[]
-HBaseAdmin.deleteTables(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String regex) 
-
-
-HTableDescriptor[]
 Admin.deleteTables(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String regex)
 Deletes tables matching the passed in pattern and wait on 
completion.
 
 
-
+
 HTableDescriptor[]
-HBaseAdmin.disableTables(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in 
java.util.regex">Pattern pattern) 
+HBaseAdmin.deleteTables(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String regex) 
 
-
+
 HTableDescriptor[]
 Admin.disableTables(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern)
 Disable tables matching the passed in pattern and wait on 
completion.
 
 
-
+
 HTableDescriptor[]
-HBaseAdmin.disableTables(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String regex) 
+HBaseAdmin.disableTables(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in 
java.util.regex">Pattern pattern) 
 
-
+
 HTableDescriptor[]
 Admin.disableTables(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String regex)
 Disable tables matching the passed in pattern and wait on 
completion.
 
 
-
+
 HTableDescriptor[]
-HBaseAdmin.enableTables(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in 
java.util.regex">Pattern pattern) 
+HBaseAdmin.disableTables(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String regex) 
 
-
+
 HTableDescriptor[]
 Admin.enableTables(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern)
 Enable tables matching the passed in pattern and wait on 
completion.
 
 
-
+
 HTableDescriptor[]
-HBaseAdmin.enableTables(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String regex) 
+HBaseAdmin.enableTables(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in 
java.util.regex">Pattern pattern) 
 
-
+
 HTableDescriptor[]
 Admin.enableTables(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String regex)
 Enable tables matching the passed in pattern and wait on 
completion.
 
 
-
-HTableDescriptor
-HConnection.getHTableDescriptor(byte[] tableName)
-Deprecated. 
-internal method, do not use through HConnection
-
-
-
 
+HTableDescriptor[]
+HBaseAdmin.enableTables(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String regex) 
+
+
 HTableDescriptor
 ConnectionImplementation.getHTableDescriptor(byte[] tableName)
 Deprecated. 
@@ -610,15 +602,15 @@ Input/OutputFormats, a table indexing Map

[03/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetFileSize.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetFileSize.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetFileSize.html
index aea9077..1a4d619 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetFileSize.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetFileSize.html
@@ -36,7 +36,7 @@
 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames
@@ -103,7 +103,7 @@
 
 
 
-private static class StoreFile.Comparators.GetFileSize
+private static class StoreFile.Comparators.GetFileSize
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements com.google.common.base.FunctionLong>
 
@@ -182,7 +182,7 @@ implements com.google.common.base.Function<
 
 StoreFile.Comparators.GetFileSize
-private StoreFile.Comparators.GetFileSize()
+private StoreFile.Comparators.GetFileSize()
 
 
 
@@ -199,7 +199,7 @@ implements com.google.common.base.Function<
 
 apply
-public http://docs.oracle.com/javase/7/docs/api/java/lang/Long.html?is-external=true";
 title="class or interface in java.lang">Long apply(StoreFile sf)
+public http://docs.oracle.com/javase/7/docs/api/java/lang/Long.html?is-external=true";
 title="class or interface in java.lang">Long apply(StoreFile sf)
 
 Specified by:
 apply in 
interface com.google.common.base.FunctionLong>
@@ -233,7 +233,7 @@ implements com.google.common.base.Function<
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
new file mode 100644
index 000..37126e3
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
@@ -0,0 +1,278 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+StoreFile.Comparators.GetMaxTimestamp (Apache HBase 2.0.0-SNAPSHOT 
API)
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No 
Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver
+Class 
StoreFile.Comparators.GetMaxTimestamp
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.regionserver.StoreFile.Comparators.GetMaxTimestamp
+
+
+
+
+
+
+
+All Implemented Interfaces:
+com.google.common.base.FunctionLong>
+
+
+Enclosing class:
+StoreFile.Comparators
+
+
+
+private static class StoreFile.Comparators.GetMaxTimestamp
+extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+implements com.google.common.base.FunctionLong>
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Modifier
+Constructor and Description
+
+
+private 
+StoreFile.Comparators.GetMaxTimestamp() 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+Methods 
+
+Modifier and Type
+Method and Description
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Long.html?is-external=true";
 title="class or interface in java.lang">Long
+apply(StoreFile sf) 
+
+
+
+
+
+
+Methods inherited from class java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true"

[13/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/Cacheable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/Cacheable.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/Cacheable.html
index 9cb7271..62df01b 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/Cacheable.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/Cacheable.html
@@ -175,12 +175,10 @@
 
 
 Cacheable
-BlockCache.getBlock(BlockCacheKey cacheKey,
+CombinedBlockCache.getBlock(BlockCacheKey cacheKey,
 boolean caching,
 boolean repeat,
-boolean updateCacheMetrics)
-Fetch block from cache.
-
+boolean updateCacheMetrics) 
 
 
 Cacheable
@@ -193,10 +191,12 @@
 
 
 Cacheable
-CombinedBlockCache.getBlock(BlockCacheKey cacheKey,
+BlockCache.getBlock(BlockCacheKey cacheKey,
 boolean caching,
 boolean repeat,
-boolean updateCacheMetrics) 
+boolean updateCacheMetrics)
+Fetch block from cache.
+
 
 
 Cacheable
@@ -245,10 +245,8 @@
 
 
 void
-BlockCache.cacheBlock(BlockCacheKey cacheKey,
-Cacheable buf)
-Add block to cache (defaults to not in-memory).
-
+CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
+Cacheable buf) 
 
 
 void
@@ -259,8 +257,10 @@
 
 
 void
-CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
-Cacheable buf) 
+BlockCache.cacheBlock(BlockCacheKey cacheKey,
+Cacheable buf)
+Add block to cache (defaults to not in-memory).
+
 
 
 void
@@ -276,12 +276,10 @@
 
 
 void
-BlockCache.cacheBlock(BlockCacheKey cacheKey,
+CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
 Cacheable buf,
 boolean inMemory,
-boolean cacheDataInL1)
-Add block to cache.
-
+boolean cacheDataInL1) 
 
 
 void
@@ -294,10 +292,12 @@
 
 
 void
-CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
+BlockCache.cacheBlock(BlockCacheKey cacheKey,
 Cacheable buf,
 boolean inMemory,
-boolean cacheDataInL1) 
+boolean cacheDataInL1)
+Add block to cache.
+
 
 
 void
@@ -313,11 +313,8 @@
 
 
 void
-BlockCache.returnBlock(BlockCacheKey cacheKey,
-  Cacheable block)
-Called when the scanner using the block decides to return 
the block once its usage
- is over.
-
+CombinedBlockCache.returnBlock(BlockCacheKey cacheKey,
+  Cacheable block) 
 
 
 void
@@ -326,8 +323,11 @@
 
 
 void
-CombinedBlockCache.returnBlock(BlockCacheKey cacheKey,
-  Cacheable block) 
+BlockCache.returnBlock(BlockCacheKey cacheKey,
+  Cacheable block)
+Called when the scanner using the block decides to return 
the block once its usage
+ is over.
+
 
 
 void
@@ -417,17 +417,17 @@
 
 
 Cacheable
-FileIOEngine.read(long offset,
+FileMmapEngine.read(long offset,
 int length,
-CacheableDeserializer deserializer)
-Transfers data from file to the given byte buffer
-
+CacheableDeserializer deserializer) 
 
 
 Cacheable
-FileMmapEngine.read(long offset,
+FileIOEngine.read(long offset,
 int length,
-CacheableDeserializer deserializer) 
+CacheableDeserializer deserializer)
+Transfers data from file to the given byte buffer
+
 
 
 Cacheable
@@ -506,17 +506,17 @@
 
 
 Cacheable
-FileIOEngine.read(long offset,
+FileMmapEngine.read(long offset,
 int length,
-CacheableDeserializer deserializer)
-Transfers data from file to the given byte buffer
-
+CacheableDeserializer deserializer) 
 
 
 Cacheable
-FileMmapEngine.read(long offset,
+FileIOEngine.read(long offset,
 int length,
-CacheableDeserializer deserializer) 
+CacheableDeserializer deserializer)
+Transfers data from file to the given byte buffer
+
 
 
 Cacheable

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/CacheableDeserializer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/CacheableDeserializer.html
 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/CacheableDeserializer.html
index 3c50de1..8490881 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/CacheableDeserializer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/CacheableDeserializer.html
@@ -199,17 +199,17 @@
 
 
 Cacheable
-FileIOEngine.read(long offset,
+FileMmapEngine.read(long offset,
 int length,
-CacheableDeserializer deserializer)
-Transfers data f

[08/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
index 9862823..eee57d2 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
@@ -588,18 +588,18 @@
 
 
 protected Procedure[]
-StateMachineProcedure.execute(TEnvironment env) 
-
-
-protected Procedure[]
 ProcedureExecutor.CompletedProcedureCleaner.execute(TEnvironment env) 
 
-
+
 protected abstract Procedure[]
 Procedure.execute(TEnvironment env)
 The main code of the procedure.
 
 
+
+protected Procedure[]
+StateMachineProcedure.execute(TEnvironment env) 
+
 
 Procedure
 ProcedureExecutor.getProcedure(long procId) 
@@ -840,6 +840,16 @@
 
 
 void
+NoopProcedureStore.insert(Procedure proc,
+Procedure[] subprocs) 
+
+
+void
+NoopProcedureStore.insert(Procedure proc,
+Procedure[] subprocs) 
+
+
+void
 ProcedureStore.insert(Procedure proc,
 Procedure[] subprocs)
 When a procedure is submitted to the executor insert(proc, 
null) will be called.
@@ -854,25 +864,15 @@
 
 
 void
-NoopProcedureStore.insert(Procedure proc,
-Procedure[] subprocs) 
+NoopProcedureStore.update(Procedure proc) 
 
 
 void
-NoopProcedureStore.insert(Procedure proc,
-Procedure[] subprocs) 
-
-
-void
 ProcedureStore.update(Procedure proc)
 The specified procedure was executed,
  and the new state should be written to the store.
 
 
-
-void
-NoopProcedureStore.update(Procedure proc) 
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
index 1d73b9a..b991385 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
@@ -114,9 +114,11 @@
 
 
 void
-BaseMasterObserver.preAbortProcedure(ObserverContext ctx,
+MasterObserver.preAbortProcedure(ObserverContext ctx,
   ProcedureExecutor procEnv,
-  long procId) 
+  long procId)
+Called before a abortProcedure request has been 
processed.
+
 
 
 void
@@ -126,11 +128,9 @@
 
 
 void
-MasterObserver.preAbortProcedure(ObserverContext ctx,
+BaseMasterObserver.preAbortProcedure(ObserverContext ctx,
   ProcedureExecutor procEnv,
-  long procId)
-Called before a abortProcedure request has been 
processed.
-
+  long procId) 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureYieldException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureYieldException.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureYieldException.html
index 8ba6204..cea611f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureYieldException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureYieldException.html
@@ -133,15 +133,15 @@
 
 
 
-protected Procedure[]
-StateMachineProcedure.execute(TEnvironment env) 
-
-
 protected abstract Procedure[]
 Procedure.execute(TEnvironment env)
 The main code of the procedure.
 
 
+
+protected Procedure[]
+StateMachineProcedure.execute(TEnvironment env) 
+
 
 protected abstract StateMachineProcedure.Flow
 StateMachineProcedure.executeFromState(TEnvironment env,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/RemoteProcedureException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/RemoteProcedureException.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/RemoteProcedureException.html
index 05fe21c..f31260d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/RemoteProcedureException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/RemoteProcedureException.html
@@ -118,13 +118,13 @@
 RemoteProcedureExc

[05/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 044589d..11dbfd2 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -1004,48 +1004,42 @@ implements User user) 
 
 
-void
-rollback(Cell cell)
-Removes a Cell from the memstore.
-
-
-
 (package private) void
 setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder)
 Should be used only in tests.
 
 
-
+
 (package private) void
 setScanInfo(ScanInfo scanInfo)
 Set scan info, used by test
 
 
-
+
 (package private) void
 snapshot()
 Snapshot this stores memstore.
 
 
-
+
 boolean
 throttleCompaction(long compactionSize) 
 
-
+
 long
 timeOfOldestEdit()
 When was the last edit done in the memstore
 
 
-
+
 http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toString() 
 
-
+
 void
 triggerMajorCompaction() 
 
-
+
 long
 updateColumnValue(byte[] row,
   byte[] f,
@@ -1054,29 +1048,29 @@ implements Updates the value for the given row/family/qualifier.
 
 
-
+
 private boolean
 updateStorefiles(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List sfs,
 long snapshotId) 
 
-
+
 long
 upsert(http://docs.oracle.com/javase/7/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
 long readpoint)
 Adds or replaces the specified KeyValues.
 
 
-
+
 private void
 validateStoreFile(org.apache.hadoop.fs.Path path)
 Validates a store file by opening and closing it.
 
 
-
+
 (package private) int
 versionsToReturn(int wantedVersions) 
 
-
+
 private void
 writeCompactionWalRecord(http://docs.oracle.com/javase/7/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection filesCompacted,
 http://docs.oracle.com/javase/7/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection newFiles)
@@ -1512,7 +1506,7 @@ implements 
 
 FIXED_OVERHEAD
-public static final long FIXED_OVERHEAD
+public static final long FIXED_OVERHEAD
 
 
 
@@ -1521,7 +1515,7 @@ implements 
 
 DEEP_OVERHEAD
-public static final long DEEP_OVERHEAD
+public static final long DEEP_OVERHEAD
 
 
 
@@ -2004,30 +1998,13 @@ public static org.apache.hadoop.fs.Path Returns:memstore size delta
 
 
-
-
-
-
-
-rollback
-public void rollback(Cell cell)
-Description copied from interface: Store
-Removes a Cell from the memstore. The Cell is removed only 
if its key
- & memstoreTS match the key & memstoreTS value of the cell
- parameter.
-
-Specified by:
-rollback in
 interface Store
-
-
-
 
 
 
 
 
 getStorefiles
-public http://docs.oracle.com/javase/7/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection getStorefiles()
+public http://docs.oracle.com/javase/7/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection getStorefiles()
 
 Specified by:
 getStorefiles in
 interface Store
@@ -2040,7 +2017,7 @@ public static org.apache.hadoop.fs.Path 
 
 assertBulkLoadHFileOk
-public void assertBulkLoadHFileOk(org.apache.hadoop.fs.Path srcPath)
+public void assertBulkLoadHFileOk(org.apache.hadoop.fs.Path srcPath)
throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from interface: Store
 This throws a WrongRegionException if the HFile does not 
fit in this region, or an
@@ -2058,7 +2035,7 @@ public static org.apache.hadoop.fs.Path 
 
 bulkLoadHFile
-public org.apache.hadoop.fs.Path bulkLoadHFile(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String srcPathStr,
+public org.apache.hadoop.fs.Path bulkLoadHFile(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String srcPathStr,
   long seqNum)
 throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from interface: Store
@@ -2078,7 +2055,7 @@ public static org.apache.hadoop.fs.Path 
 
 bulkLoadHFile
-public void bulkLoadHFile(StoreFileInfo fileInfo)
+public void bulkLoadHFi

[50/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/Cell.html 
b/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
index 20abfb7..9ed6623 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
@@ -913,9 +913,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-Increment
-Increment.add(Cell cell)
-Add the specified KeyValue to this operation.
+Put
+Put.add(Cell kv)
+Add the specified KeyValue to this Put operation.
 
 
 
@@ -925,9 +925,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-Put
-Put.add(Cell kv)
-Add the specified KeyValue to this Put operation.
+Increment
+Increment.add(Cell cell)
+Add the specified KeyValue to this operation.
 
 
 
@@ -1007,6 +1007,14 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 boolean partial) 
 
 
+Put
+Put.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
+
+
+Append
+Append.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
+
+
 Increment
 Increment.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
 
@@ -1020,14 +1028,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Delete
 Delete.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
 
-
-Append
-Append.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
-
-
-Put
-Put.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
-
 
 
 
@@ -1044,21 +1044,29 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 Cell
-MultipleColumnPrefixFilter.getNextCellHint(Cell cell) 
+MultiRowRangeFilter.getNextCellHint(Cell currentKV) 
 
 
 Cell
-TimestampsFilter.getNextCellHint(Cell currentCell)
-Pick the next cell that the scanner should seek to.
-
+ColumnPrefixFilter.getNextCellHint(Cell cell) 
 
 
 Cell
-FilterList.getNextCellHint(Cell currentCell) 
+FuzzyRowFilter.getNextCellHint(Cell currentCell) 
 
 
 Cell
-FuzzyRowFilter.getNextCellHint(Cell currentCell) 
+MultipleColumnPrefixFilter.getNextCellHint(Cell cell) 
+
+
+Cell
+TimestampsFilter.getNextCellHint(Cell currentCell)
+Pick the next cell that the scanner should seek to.
+
+
+
+Cell
+FilterList.getNextCellHint(Cell currentCell) 
 
 
 Cell
@@ -1066,7 +1074,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 Cell
-ColumnPrefixFilter.getNextCellHint(Cell cell) 
+ColumnRangeFilter.getNextCellHint(Cell cell) 
 
 
 abstract Cell
@@ -1077,11 +1085,11 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 Cell
-MultiRowRangeFilter.getNextCellHint(Cell currentKV) 
+SkipFilter.transformCell(Cell v) 
 
 
 Cell
-ColumnRangeFilter.getNextCellHint(Cell cell) 
+WhileMatchFilter.transformCell(Cell v) 
 
 
 Cell
@@ -1089,22 +1097,14 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 Cell
-WhileMatchFilter.transformCell(Cell v) 
-
-
-Cell
 FilterList.transformCell(Cell c) 
 
-
+
 abstract Cell
 Filter.transformCell(Cell v)
 Give the filter a chance to transform the passed 
KeyValue.
 
 
-
-Cell
-SkipFilter.transformCell(Cell v) 
-
 
 
 
@@ -1140,215 +1140,223 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 Filter.ReturnCode
-MultipleColumnPrefixFilter.filte

[11/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/master/class-use/MasterServices.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/MasterServices.html 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/MasterServices.html
index bfacb81..2c47429 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/MasterServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/MasterServices.html
@@ -242,15 +242,15 @@
 
 
 private MasterServices
-TableNamespaceManager.masterServices 
+MasterCoprocessorHost.masterServices 
 
 
 private MasterServices
-MasterCoprocessorHost.masterServices 
+MasterCoprocessorHost.MasterEnvironment.masterServices 
 
 
 private MasterServices
-MasterCoprocessorHost.MasterEnvironment.masterServices 
+TableNamespaceManager.masterServices 
 
 
 private MasterServices
@@ -423,16 +423,16 @@
 StochasticLoadBalancer.LocalityBasedCandidateGenerator.masterServices 
 
 
-protected MasterServices
-BaseLoadBalancer.services 
+private MasterServices
+StochasticLoadBalancer.LocalityCostFunction.services 
 
 
 private MasterServices
 RegionLocationFinder.services 
 
 
-private MasterServices
-StochasticLoadBalancer.LocalityCostFunction.services 
+protected MasterServices
+BaseLoadBalancer.services 
 
 
 
@@ -445,24 +445,24 @@
 
 
 void
-BaseLoadBalancer.setMasterServices(MasterServices masterServices) 
+StochasticLoadBalancer.setMasterServices(MasterServices masterServices) 
 
 
 void
-StochasticLoadBalancer.setMasterServices(MasterServices masterServices) 
+BaseLoadBalancer.setMasterServices(MasterServices masterServices) 
 
 
-void
-RegionLocationFinder.setServices(MasterServices services) 
-
-
 (package private) void
 StochasticLoadBalancer.LocalityBasedCandidateGenerator.setServices(MasterServices services) 
 
-
+
 (package private) void
 StochasticLoadBalancer.LocalityCostFunction.setServices(MasterServices srvc) 
 
+
+void
+RegionLocationFinder.setServices(MasterServices services) 
+
 
 
 
@@ -562,13 +562,13 @@
 
 
 void
-SimpleRegionNormalizer.setMasterServices(MasterServices masterServices)
+RegionNormalizer.setMasterServices(MasterServices masterServices)
 Set the master service.
 
 
 
 void
-RegionNormalizer.setMasterServices(MasterServices masterServices)
+SimpleRegionNormalizer.setMasterServices(MasterServices masterServices)
 Set the master service.
 
 
@@ -629,13 +629,13 @@
 
 
 
-private MasterServices
-SnapshotManager.master 
-
-
 protected MasterServices
 TakeSnapshotHandler.master 
 
+
+private MasterServices
+SnapshotManager.master 
+
 
 private MasterServices
 MasterSnapshotVerifier.services 
@@ -739,17 +739,17 @@
 
 
 
-void
-MasterProcedureManagerHost.initialize(MasterServices master,
-MetricsMaster metricsMaster) 
-
-
 abstract void
 MasterProcedureManager.initialize(MasterServices master,
 MetricsMaster metricsMaster)
 Initialize a globally barriered procedure for master.
 
 
+
+void
+MasterProcedureManagerHost.initialize(MasterServices master,
+MetricsMaster metricsMaster) 
+
 
 
 
@@ -831,11 +831,11 @@
 
 
 private MasterServices
-RSGroupAdminEndpoint.master 
+RSGroupAdminServer.master 
 
 
 private MasterServices
-RSGroupAdminServer.master 
+RSGroupAdminEndpoint.master 
 
 
 private MasterServices

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
index 704e3a3..dfd627b 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
@@ -192,17 +192,17 @@
 
 
 
-void
-MasterProcedureManagerHost.initialize(MasterServices master,
-MetricsMaster metricsMaster) 
-
-
 abstract void
 MasterProcedureManager.initialize(MasterServices master,
 MetricsMaster metricsMaster)
 Initialize a globally barriered procedure for master.
 
 
+
+void
+MasterProcedureManagerHost.initialize(MasterServices master,
+MetricsMaster metricsMaster) 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
index fadbb20..f156778 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWra

[31/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/KeyValue.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/KeyValue.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/KeyValue.html
index 92a396e..fd8c1b1 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/KeyValue.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/KeyValue.html
@@ -814,28 +814,22 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
-DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue targetKey) 
-
-
-http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue targetKey)
 Gets initial, full list of candidate store files to check 
for row-key-before.
 
 
-
+
 http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue targetKey)
 See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
  for details on this methods.
 
 
-
+
 http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
-DefaultStoreFileManager.updateCandidateFilesForRowKeyBefore(http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator candidateFiles,
-  KeyValue targetKey,
-  Cell candidate) 
+DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue targetKey) 
 
-
+
 http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 StoreFileManager.updateCandidateFilesForRowKeyBefore(http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator candidateFiles,
   KeyValue targetKey,
@@ -843,7 +837,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Updates the candidate list for finding row key before.
 
 
-
+
 http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 StripeStoreFileManager.updateCandidateFilesForRowKeyBefore(http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator candidateFiles,
   KeyValue targetKey,
@@ -853,6 +847,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
  for details on this methods.
 
 
+
+http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
+DefaultStoreFileManager.updateCandidateFilesForRowKeyBefore(http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator candidateFiles,
+  KeyValue targetKey,
+  Cell candidate) 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/MasterNotRunningException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/MasterNotRunningException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/MasterNotRunningException.html
index 2c1e560..20a8091 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/class-use/MasterNotRunningException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/class-use/MasterNotRunningException.html
@@ -120,13 +120,17 @@
 
 
 MasterKeepAliveConnection
+ConnectionImplementation.getKeepAliveMasterService() 
+
+
+MasterKeepAliveConnection
 ClusterConnection.getKeepAliveMasterService()
 Deprecated. 
 Since 0.96.0
 
 
 
-
+
 MasterKeepAliveConnection
 HConnection.getKeepAliveMasterService()
 Deprecated. 
@@ -134,17 +138,13 @@
 
 
 
-
-MasterKeepAliveConnection
-ConnectionImplementation.getKeepAliveMasterService() 
-
 
 org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 ConnectionImplementation.getMaster() 
 
 
 boolean
-ClusterConnection.i

[18/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
index 5c41e95..d0bdf04 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
@@ -185,7 +185,9 @@
 
 
 void
-BaseMasterObserver.postAbortProcedure(ObserverContext ctx) 
+MasterObserver.postAbortProcedure(ObserverContext ctx)
+Called after a abortProcedure request has been 
processed.
+
 
 
 void
@@ -193,16 +195,18 @@
 
 
 void
-MasterObserver.postAbortProcedure(ObserverContext ctx)
-Called after a abortProcedure request has been 
processed.
-
+BaseMasterObserver.postAbortProcedure(ObserverContext ctx) 
 
 
 void
-BaseMasterObserver.postAddColumn(ObserverContext ctx,
+MasterObserver.postAddColumn(ObserverContext ctx,
   TableName tableName,
   HColumnDescriptor columnFamily)
-Deprecated. 
+Deprecated. 
+As of release 2.0.0, this will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-13645";>HBASE-13645).
+ Use MasterObserver.postAddColumnFamily(ObserverContext,
 TableName, HColumnDescriptor).
+
 
 
 
@@ -215,21 +219,19 @@
 
 
 void
-MasterObserver.postAddColumn(ObserverContext ctx,
+BaseMasterObserver.postAddColumn(ObserverContext ctx,
   TableName tableName,
   HColumnDescriptor columnFamily)
-Deprecated. 
-As of release 2.0.0, this will be removed in HBase 3.0.0
- (https://issues.apache.org/jira/browse/HBASE-13645";>HBASE-13645).
- Use MasterObserver.postAddColumnFamily(ObserverContext,
 TableName, HColumnDescriptor).
-
+Deprecated. 
 
 
 
 void
-BaseMasterObserver.postAddColumnFamily(ObserverContext ctx,
+MasterObserver.postAddColumnFamily(ObserverContext ctx,
   TableName tableName,
-  HColumnDescriptor columnFamily) 
+  HColumnDescriptor columnFamily)
+Called after the new column family has been created.
+
 
 
 void
@@ -239,17 +241,17 @@
 
 
 void
-MasterObserver.postAddColumnFamily(ObserverContext ctx,
+BaseMasterObserver.postAddColumnFamily(ObserverContext ctx,
   TableName tableName,
-  HColumnDescriptor columnFamily)
-Called after the new column family has been created.
-
+  HColumnDescriptor columnFamily) 
 
 
 void
-BaseMasterObserver.postAddColumnFamilyHandler(ObserverContext ctx,
+MasterObserver.postAddColumnFamilyHandler(ObserverContext ctx,
 TableName tableName,
-HColumnDescriptor columnFamily) 
+HColumnDescriptor columnFamily)
+Called after the new column family has been created.
+
 
 
 void
@@ -259,18 +261,20 @@
 
 
 void
-MasterObserver.postAddColumnFamilyHandler(ObserverContext ctx,
+BaseMasterObserver.postAddColumnFamilyHandler(ObserverContext ctx,
 TableName tableName,
-HColumnDescriptor columnFamily)
-Called after the new column family has been created.
-
+HColumnDescriptor columnFamily) 
 
 
 void
-BaseMasterObserver.postAddColumnHandler(ObserverContext ctx,
+MasterObserver.postAddColumnHandler(ObserverContext ctx,
 TableName tableName,
 HColumnDescriptor columnFamily)
-Deprecated. 
+Deprecated. 
+As of release 2.0.0, this will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-13645";>HBASE-13645). Use
+ MasterObserver.postAddColumnFamilyHandler(ObserverContext,
 TableName, HColumnDescriptor).
+
 
 
 
@@ -283,20 +287,18 @@
 
 
 void
-MasterObserver.postAddColumnHandler(ObserverContext ctx,
+BaseMasterObserver.postAddColum

[29/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
index ea3b6f3..d00e78c 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
@@ -248,11 +248,11 @@
 
 
 ServerName
-SplitLogTask.getServerName() 
+Server.getServerName() 
 
 
 ServerName
-Server.getServerName() 
+SplitLogTask.getServerName() 
 
 
 static ServerName
@@ -698,16 +698,16 @@
 
 
 
-void
-MetaCache.cacheLocation(TableName tableName,
+private void
+ConnectionImplementation.cacheLocation(TableName tableName,
   ServerName source,
   HRegionLocation location)
 Put a newly discovered HRegionLocation into the cache.
 
 
 
-private void
-ConnectionImplementation.cacheLocation(TableName tableName,
+void
+MetaCache.cacheLocation(TableName tableName,
   ServerName source,
   HRegionLocation location)
 Put a newly discovered HRegionLocation into the cache.
@@ -736,11 +736,15 @@
 
 
 void
+ConnectionImplementation.clearCaches(ServerName serverName) 
+
+
+void
 ClusterConnection.clearCaches(ServerName sn)
 Clear any caches that pertain to server name 
sn.
 
 
-
+
 void
 HConnection.clearCaches(ServerName sn)
 Deprecated. 
@@ -748,22 +752,18 @@
 
 
 
-
-void
-ConnectionImplementation.clearCaches(ServerName serverName) 
-
 
 void
-HBaseAdmin.closeRegion(ServerName sn,
-  HRegionInfo hri) 
-
-
-void
 Admin.closeRegion(ServerName sn,
   HRegionInfo hri)
 Close a region.
 
 
+
+void
+HBaseAdmin.closeRegion(ServerName sn,
+  HRegionInfo hri) 
+
 
 private void
 HBaseAdmin.compact(ServerName sn,
@@ -773,29 +773,29 @@
 
 
 void
-HBaseAdmin.compactRegionServer(ServerName sn,
+Admin.compactRegionServer(ServerName sn,
   boolean major)
 Compact all regions on the region server
 
 
 
 void
-Admin.compactRegionServer(ServerName sn,
+HBaseAdmin.compactRegionServer(ServerName sn,
   boolean major)
 Compact all regions on the region server
 
 
 
 CoprocessorRpcChannel
-HBaseAdmin.coprocessorService(ServerName sn) 
-
-
-CoprocessorRpcChannel
 Admin.coprocessorService(ServerName sn)
 Creates and returns a RpcChannel instance
  connected to the passed region server.
 
 
+
+CoprocessorRpcChannel
+HBaseAdmin.coprocessorService(ServerName sn) 
+
 
 protected MultiServerCallable
 AsyncProcess.createCallable(ServerName server,
@@ -838,11 +838,15 @@
 
 
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
+ConnectionImplementation.getAdmin(ServerName serverName) 
+
+
+org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 ClusterConnection.getAdmin(ServerName serverName)
 Establishes a connection to the region server at the 
specified address.
 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 HConnection.getAdmin(ServerName serverName)
 Deprecated. 
@@ -850,11 +854,12 @@
 
 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
-ConnectionImplementation.getAdmin(ServerName serverName) 
+ConnectionImplementation.getAdmin(ServerName serverName,
+boolean master) 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
 HConnection.getAdmin(ServerName serverName,
 boolean getMaster)
@@ -863,11 +868,6 @@
 
 
 
-
-org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
-ConnectionImplementation.getAdmin(ServerName serverName,
-boolean master) 
-
 
 private http://docs.oracle.com/javase/7/docs/api/java/lang/Long.html?is-external=true";
 title="class or interface in java.lang">Long
 AsyncProcess.AsyncRequestFutureImpl.getBackoff(ServerName server,
@@ -875,12 +875,16 @@
 
 
 org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
+ConnectionImplementation.getClient(ServerName sn) 
+
+
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
 ClusterConnection.getClient(ServerName serverName)
 Establishes a connection to the region server at the 
specified address, and returns
  a region client protocol.
 
 
-
+
 org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
 HConnection.getClient(ServerName serverName)
 Deprecated. 
@@ -888,10 +892,6 @@
 
 
 
-
-org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
-ConnectionImplementation.getClient(ServerName sn) 
-
 
 org.apache.hadoop.hbase.protobuf.generated.Clie

[10/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index 60c1334..e5f8bce 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -114,9 +114,11 @@
 
 
 void
-BaseMasterObserver.preAbortProcedure(ObserverContext ctx,
+MasterObserver.preAbortProcedure(ObserverContext ctx,
   ProcedureExecutor procEnv,
-  long procId) 
+  long procId)
+Called before a abortProcedure request has been 
processed.
+
 
 
 void
@@ -126,11 +128,9 @@
 
 
 void
-MasterObserver.preAbortProcedure(ObserverContext ctx,
+BaseMasterObserver.preAbortProcedure(ObserverContext ctx,
   ProcedureExecutor procEnv,
-  long procId)
-Called before a abortProcedure request has been 
processed.
-
+  long procId) 
 
 
 
@@ -197,123 +197,123 @@
 
 
 boolean
-CloneSnapshotProcedure.abort(MasterProcedureEnv env) 
+DeleteColumnFamilyProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-DeleteTableProcedure.abort(MasterProcedureEnv env) 
+CreateNamespaceProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-ModifyTableProcedure.abort(MasterProcedureEnv env) 
+DeleteNamespaceProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-EnableTableProcedure.abort(MasterProcedureEnv env) 
+CloneSnapshotProcedure.abort(MasterProcedureEnv env) 
 
 
-protected boolean
-ServerCrashProcedure.abort(MasterProcedureEnv env) 
+boolean
+DeleteTableProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-DeleteNamespaceProcedure.abort(MasterProcedureEnv env) 
+CreateTableProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-CreateTableProcedure.abort(MasterProcedureEnv env) 
+EnableTableProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-ModifyNamespaceProcedure.abort(MasterProcedureEnv env) 
+TruncateTableProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-RestoreSnapshotProcedure.abort(MasterProcedureEnv env) 
+ModifyColumnFamilyProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-ModifyColumnFamilyProcedure.abort(MasterProcedureEnv env) 
+RestoreSnapshotProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-AddColumnFamilyProcedure.abort(MasterProcedureEnv env) 
+ModifyTableProcedure.abort(MasterProcedureEnv env) 
 
 
-boolean
-DisableTableProcedure.abort(MasterProcedureEnv env) 
+protected boolean
+ServerCrashProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-DeleteColumnFamilyProcedure.abort(MasterProcedureEnv env) 
+ModifyNamespaceProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-CreateNamespaceProcedure.abort(MasterProcedureEnv env) 
+DisableTableProcedure.abort(MasterProcedureEnv env) 
 
 
 boolean
-TruncateTableProcedure.abort(MasterProcedureEnv env) 
+AddColumnFamilyProcedure.abort(MasterProcedureEnv env) 
 
 
 protected boolean
-CloneSnapshotProcedure.acquireLock(MasterProcedureEnv env) 
+DeleteColumnFamilyProcedure.acquireLock(MasterProcedureEnv env) 
 
 
 protected boolean
-DeleteTableProcedure.acquireLock(MasterProcedureEnv env) 
+CreateNamespaceProcedure.acquireLock(MasterProcedureEnv env) 
 
 
 protected boolean
-ModifyTableProcedure.acquireLock(MasterProcedureEnv env) 
+DeleteNamespaceProcedure.acquireLock(MasterProcedureEnv env) 
 
 
 protected boolean
-EnableTableProcedure.acquireLock(MasterProcedureEnv env) 
+CloneSnapshotProcedure.acquireLock(MasterProcedureEnv env) 
 
 
 protected boolean
-ServerCrashProcedure.acquireLock(MasterProcedureEnv env) 
+DeleteTableProcedure.acquireLock(MasterProcedureEnv env) 
 
 
 protected boolean
-DeleteNamespaceProcedure.acquireLock(MasterProcedureEnv env) 
+CreateTableProcedure.acquireLock(MasterProcedureEnv env) 
 
 
 protected boolean
-CreateTableProcedure.acquireLock(MasterProcedureEnv env) 
+EnableTableProcedure.acquireLock(MasterProcedureEnv env) 
 
 
 protected boolean
-ModifyNamespaceProcedure.acquireLock(MasterProcedureEnv env) 
+TruncateTableProcedure.acquireLock(MasterProcedureEnv env) 
 
 
 protected boolean
-RestoreSnapshotProcedure.acquireLock(MasterProcedureEnv env) 
+ModifyColumnFamilyProcedure.acquireLock(MasterProcedureEnv env) 
 
 
 protected boolean
-ModifyColumnFamilyProcedure.acquireLock(MasterProcedureEnv env) 
+RestoreSnapshotProcedure.acquireLock(MasterProcedureEnv env) 
 
 
 protected boolean
-AddColumnFamilyProcedure.acquireLock(MasterProcedure

[34/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
index 4d75f47..9b26d8c 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
@@ -871,11 +871,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 HRegionInfo
-MultiServerCallable.getHRegionInfo() 
+ScannerCallableWithReplicas.getHRegionInfo() 
 
 
 HRegionInfo
-ScannerCallableWithReplicas.getHRegionInfo() 
+MultiServerCallable.getHRegionInfo() 
 
 
 private HRegionInfo
@@ -905,28 +905,28 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-HBaseAdmin.getOnlineRegions(ServerName sn) 
-
-
-http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 Admin.getOnlineRegions(ServerName sn)
 Get all the online regions on a region server.
 
 
+
+http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+HBaseAdmin.getOnlineRegions(ServerName sn) 
+
 
 (package private) Pair
 HBaseAdmin.getRegion(byte[] regionName) 
 
 
 http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-HBaseAdmin.getTableRegions(TableName tableName) 
-
-
-http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 Admin.getTableRegions(TableName tableName)
 Get the regions of a given table.
 
 
+
+http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+HBaseAdmin.getTableRegions(TableName tableName) 
+
 
 
 
@@ -944,16 +944,16 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-HBaseAdmin.closeRegion(ServerName sn,
-  HRegionInfo hri) 
-
-
-void
 Admin.closeRegion(ServerName sn,
   HRegionInfo hri)
 Close a region.
 
 
+
+void
+HBaseAdmin.closeRegion(ServerName sn,
+  HRegionInfo hri) 
+
 
 private void
 HBaseAdmin.compact(ServerName sn,
@@ -1071,17 +1071,17 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 void
-SplitLogManagerCoordination.markRegionsRecovering(ServerName serverName,
+ZKSplitLogManagerCoordination.markRegionsRecovering(ServerName serverName,
   http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set userRegions)
-Mark regions in recovering state for distributed log 
replay
+Create znodes 
/hbase/recovering-regions/[region_ids...]/[failed region server names ...] for
+ all regions of the passed in region servers
 
 
 
 void
-ZKSplitLogManagerCoordination.markRegionsRecovering(ServerName serverName,
+SplitLogManagerCoordination.markRegionsRecovering(ServerName serverName,
   http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set userRegions)
-Create znodes 
/hbase/recovering-regions/[region_ids...]/[failed region server names ...] for
- all regions of the passed in region servers
+Mark regions in recovering state for distributed log 
replay
 
 
 
@@ -1113,8 +1113,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-BaseMasterObserver.postAssign(ObserverContext ctx,
-HRegionInfo regionInfo) 
+MasterObserver.postAssign(ObserverContext ctx,
+HRegionInfo regionInfo)
+Called after the region assignment has been requested.
+
 
 
 void
@@ -1123,16 +1125,16 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 void
-MasterObserver.postAssign(ObserverContext ctx,
-HRegionInfo regionInfo)
-Called after the region assignment has been requested.
-
+BaseMasterObserver.postAssign(ObserverContext ctx,
+HRegionInfo regionInfo) 
 
 
 void
-BaseMasterObserver.postCreateTable(ObserverContext ctx,
+MasterObserver.postCreateTable(ObserverContext ctx,
   HTableDescriptor desc,
- 

[01/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b0a048625 -> db94a6390


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Region.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Region.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Region.html
index df4490d..d84b21a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Region.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Region.html
@@ -411,22 +411,22 @@
 SplitTransactionImpl.DaughterOpener.r 
 
 
-(package private) Region
-RegionCoprocessorHost.region
-The region
-
+private Region
+MemStoreFlusher.FlushRegionEntry.region 
 
 
 private Region
-RegionCoprocessorHost.RegionEnvironment.region 
+RegionServerServices.PostOpenDeployContext.region 
 
 
-private Region
-MemStoreFlusher.FlushRegionEntry.region 
+(package private) Region
+RegionCoprocessorHost.region
+The region
+
 
 
 private Region
-RegionServerServices.PostOpenDeployContext.region 
+RegionCoprocessorHost.RegionEnvironment.region 
 
 
 
@@ -518,11 +518,11 @@
 
 
 Region
-RegionCoprocessorHost.RegionEnvironment.getRegion() 
+RegionServerServices.PostOpenDeployContext.getRegion() 
 
 
 Region
-RegionServerServices.PostOpenDeployContext.getRegion() 
+RegionCoprocessorHost.RegionEnvironment.getRegion() 
 
 
 protected Region
@@ -569,6 +569,11 @@
 
 
 PairOfSameType
+SplitTransactionImpl.execute(Server server,
+  RegionServerServices services) 
+
+
+PairOfSameType
 SplitTransaction.execute(Server server,
   RegionServerServices services)
 Deprecated. 
@@ -576,12 +581,13 @@
 
 
 
-
+
 PairOfSameType
-SplitTransactionImpl.execute(Server server,
-  RegionServerServices services) 
+SplitTransactionImpl.execute(Server server,
+  RegionServerServices services,
+  User user) 
 
-
+
 PairOfSameType
 SplitTransaction.execute(Server server,
   RegionServerServices services,
@@ -589,12 +595,6 @@
 Run the transaction.
 
 
-
-PairOfSameType
-SplitTransactionImpl.execute(Server server,
-  RegionServerServices services,
-  User user) 
-
 
 (package private) http://docs.oracle.com/javase/7/docs/api/java/util/SortedMap.html?is-external=true";
 title="class or interface in java.util">SortedMapLong,Region>
 HRegionServer.getCopyOfOnlineRegionsSortedBySize() 
@@ -759,16 +759,16 @@
 
 
 void
+HeapMemoryManager.HeapMemoryTunerChore.flushRequested(FlushType type,
+Region region) 
+
+
+void
 FlushRequestListener.flushRequested(FlushType type,
 Region region)
 Callback which will get called when a flush request is made 
for a region.
 
 
-
-void
-HeapMemoryManager.HeapMemoryTunerChore.flushRequested(FlushType type,
-Region region) 
-
 
 (package private) WAL
 RSRpcServices.getWAL(Region region) 
@@ -859,71 +859,71 @@
 
 
 CompactionRequest
-CompactSplitThread.requestCompaction(Region r,
+CompactionRequestor.requestCompaction(Region r,
   Store s,
   http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why,
   CompactionRequest request) 
 
 
 CompactionRequest
-CompactionRequestor.requestCompaction(Region r,
+CompactSplitThread.requestCompaction(Region r,
   Store s,
   http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why,
   CompactionRequest request) 
 
 
 CompactionRequest
-CompactSplitThread.requestCompaction(Region r,
+CompactionRequestor.requestCompaction(Region r,
   Store s,
   http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why,
-  int priority,
+  int pri,
   CompactionRequest request,
   User user) 
 
 
 CompactionRequest
-CompactionRequestor.requestCompaction(Region r,
+CompactSplitThread.requestCompaction(Region r,
   Store s,
   http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why,
-  int pri,
+  int priority,
  

[09/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureScheduler.ProcedureEvent.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureScheduler.ProcedureEvent.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureScheduler.ProcedureEvent.html
index e590988..56159a6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureScheduler.ProcedureEvent.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureScheduler.ProcedureEvent.html
@@ -144,11 +144,11 @@
 
 
 void
-MasterProcedureScheduler.suspend(MasterProcedureScheduler.ProcedureEvent event) 
+MasterProcedureEnv.suspend(MasterProcedureScheduler.ProcedureEvent event) 
 
 
 void
-MasterProcedureEnv.suspend(MasterProcedureScheduler.ProcedureEvent event) 
+MasterProcedureScheduler.suspend(MasterProcedureScheduler.ProcedureEvent event) 
 
 
 boolean
@@ -175,11 +175,11 @@
 
 
 void
-MasterProcedureScheduler.wake(MasterProcedureScheduler.ProcedureEvent event) 
+MasterProcedureEnv.wake(MasterProcedureScheduler.ProcedureEvent event) 
 
 
 void
-MasterProcedureEnv.wake(MasterProcedureScheduler.ProcedureEvent event) 
+MasterProcedureScheduler.wake(MasterProcedureScheduler.ProcedureEvent event) 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
index 8588d5d..bae14c2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
@@ -117,11 +117,11 @@
 
 
 private ProcedurePrepareLatch
-EnableTableProcedure.syncLatch 
+CreateTableProcedure.syncLatch 
 
 
 private ProcedurePrepareLatch
-CreateTableProcedure.syncLatch 
+EnableTableProcedure.syncLatch 
 
 
 private ProcedurePrepareLatch

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
index 6a68522..dd93329 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
@@ -96,29 +96,29 @@
 
 
 TableProcedureInterface.TableOperationType
-CloneSnapshotProcedure.getTableOperationType() 
+DeleteColumnFamilyProcedure.getTableOperationType() 
 
 
 TableProcedureInterface.TableOperationType
-TableProcedureInterface.getTableOperationType()
-Given an operation type we can take decisions about what to 
do with pending operations.
-
+CreateNamespaceProcedure.getTableOperationType() 
 
 
 TableProcedureInterface.TableOperationType
-DeleteTableProcedure.getTableOperationType() 
+DeleteNamespaceProcedure.getTableOperationType() 
 
 
 TableProcedureInterface.TableOperationType
-ModifyTableProcedure.getTableOperationType() 
+CloneSnapshotProcedure.getTableOperationType() 
 
 
 TableProcedureInterface.TableOperationType
-EnableTableProcedure.getTableOperationType() 
+TableProcedureInterface.getTableOperationType()
+Given an operation type we can take decisions about what to 
do with pending operations.
+
 
 
 TableProcedureInterface.TableOperationType
-DeleteNamespaceProcedure.getTableOperationType() 
+DeleteTableProcedure.getTableOperationType() 
 
 
 TableProcedureInterface.TableOperationType
@@ -126,11 +126,11 @@
 
 
 TableProcedureInterface.TableOperationType
-ModifyNamespaceProcedure.getTableOperationType() 
+EnableTableProcedure.getTableOperationType() 
 
 
 TableProcedureInterface.TableOperationType
-RestoreSnapshotProcedure.getTableOperationType() 
+TruncateTableProcedure.getTableOperationType() 
 
 
 TableProcedureInterface.TableOperationType
@@ -138,23 +138,23 @@
 
 
 TableProcedureInterface.TableOperationType
-AddColumnFamilyProcedure.getTableOperationType() 
+RestoreSnapshotProcedure.getTableOperationType() 
 
 
 TableProcedureInterface.TableOperationType
-DisableTableProcedure.getTableOperationType() 
+ModifyTableProcedure.getTableOperationType() 
 
 
 TableProcedureInterface.TableOperationType
-DeleteColumnFamilyProcedure.getTabl

[41/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/deprecated-list.html
--
diff --git a/devapidocs/deprecated-list.html b/devapidocs/deprecated-list.html
index 96098ea..eda0700 100644
--- a/devapidocs/deprecated-list.html
+++ b/devapidocs/deprecated-list.html
@@ -299,12 +299,6 @@
 
 
 
-org.apache.hadoop.hbase.client.HBaseAdmin.addColumn(TableName,
 HColumnDescriptor)
-Since 2.0. Will be removed in 3.0. Use
- HBaseAdmin.addColumnFamily(TableName,
 HColumnDescriptor) instead.
-
-
-
 org.apache.hadoop.hbase.client.Admin.addColumn(TableName,
 HColumnDescriptor)
 As of release 2.0.0.
  (https://issues.apache.org/jira/browse/HBASE-1989";>HBASE-1989).
@@ -312,6 +306,12 @@
  Use Admin.addColumnFamily(TableName,
 HColumnDescriptor).
 
 
+
+org.apache.hadoop.hbase.client.HBaseAdmin.addColumn(TableName,
 HColumnDescriptor)
+Since 2.0. Will be removed in 3.0. Use
+ HBaseAdmin.addColumnFamily(TableName,
 HColumnDescriptor) instead.
+
+
 
 org.apache.hadoop.hbase.security.visibility.VisibilityClient.addLabel(Configuration,
 String)
 Use VisibilityClient.addLabel(Connection,String)
 instead.
@@ -404,12 +404,6 @@
 
 
 
-org.apache.hadoop.hbase.client.HBaseAdmin.deleteColumn(TableName,
 byte[])
-Since 2.0. Will be removed in 3.0. Use
- HBaseAdmin.deleteColumnFamily(TableName,
 byte[]) instead.
-
-
-
 org.apache.hadoop.hbase.client.Admin.deleteColumn(TableName,
 byte[])
 As of release 2.0.0.
  (https://issues.apache.org/jira/browse/HBASE-1989";>HBASE-1989).
@@ -417,6 +411,12 @@
  Use Admin.deleteColumnFamily(TableName,
 byte[])}.
 
 
+
+org.apache.hadoop.hbase.client.HBaseAdmin.deleteColumn(TableName,
 byte[])
+Since 2.0. Will be removed in 3.0. Use
+ HBaseAdmin.deleteColumnFamily(TableName,
 byte[]) instead.
+
+
 
 org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(Cell)
 without any replacement.
@@ -426,13 +426,13 @@
 org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValues(List)
 
 
-org.apache.hadoop.hbase.regionserver.SplitTransaction.execute(Server,
 RegionServerServices)
-use #execute(Server, RegionServerServices, User);  as of 
1.0.2, remove in 3.0
+org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.execute(Server,
 RegionServerServices)
+use #execute(Server, RegionServerServices, 
User)
 
 
 
-org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.execute(Server,
 RegionServerServices)
-use #execute(Server, RegionServerServices, 
User)
+org.apache.hadoop.hbase.regionserver.SplitTransaction.execute(Server,
 RegionServerServices)
+use #execute(Server, RegionServerServices, User);  as of 
1.0.2, remove in 3.0
 
 
 
@@ -539,49 +539,49 @@
 
 
 
-org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(byte[])
-internal method, do not use through HConnection
-
-
-
 org.apache.hadoop.hbase.client.ConnectionImplementation.getHTableDescriptor(byte[])
 Use Admin.getTableDescriptor(org.apache.hadoop.hbase.TableName)
   instead
 
 
-
-org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(TableName)
+
+org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(byte[])
 internal method, do not use through HConnection
 
 
-
+
 org.apache.hadoop.hbase.client.ConnectionImplementation.getHTableDescriptor(TableName)
 Use Admin.getTableDescriptor(org.apache.hadoop.hbase.TableName)
   instead
 
 
-
-org.apache.hadoop.hbase.client.HConnection.getHTableDescriptors(List)
-since 0.96.0
+
+org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(TableName)
+internal method, do not use through HConnection
 
 
-
+
 org.apache.hadoop.hbase.client.ConnectionImplementation.getHTableDescriptors(List)
 Use
   Admin.getTableDescriptorsByTableName(java.util.List)
   instead
 
 
-
-org.apache.hadoop.hbase.client.HConnection.getHTableDescriptorsByTableName(List)
-Use Admin.getTableDescriptor(TableName)
 instead.
+
+org.apache.hadoop.hbase.client.HConnection.getHTableDescriptors(List)
+since 0.96.0
 
 
-
+
 org.apache.hadoop.hbase.client.ConnectionImplementation.getHTableDescriptorsByTableName(List)
 Use Admin.getTableDescriptorsByTableName(java.util.List)
 instead
 
 
+
+org.apache.hadoop.hbase.client.HConnection.getHTableDescriptorsByTableName(List)
+Use Admin.getTableDescriptor(TableName)
 instead.
+
+
 
 org.apache.hadoop.hbase.client.ClusterConnection.getKeepAliveMasterService()
 Since 0.96.0
@@ -654,22 +654,22 @@
 
 
 
-org.apache.hadoop.hbase.client.HConnection.getRegionCachePrefetch(byte[])
+org.apache.hadoop.hbase.client.ConnectionImplementation.getRegionCachePrefetch(byte[])
 always return false since 0.99
 
 
 
-org.apache.hadoop.hbase.client.ConnectionImplementation.getRegionCachePrefetch(byte[])
+org.apache.hadoop.hbase.client.HConnection.getRegionCachePrefetch(byte[])
 always return false since 0.99
 
 
 
-org.apache.hadoop.hbase.client.HConnection.getRegionCachePrefetch(TableName)
+org.apache.hadoop.hbase.client.ConnectionImplementation.getRegionCa

[07/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
index 11306ee..537e2a4 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
@@ -315,39 +315,33 @@ implements resetCellSet() 
 
 
-void
-rollback(Cell cell)
-Remove n key from the memstore.
-
-
-
 private void
 setOldestEditTimeToNow() 
 
-
+
 protected AbstractMemStore
 setSnapshot(ImmutableSegment snapshot) 
 
-
+
 protected void
 setSnapshotSize(long snapshotSize) 
 
-
+
 MemStoreSnapshot
 snapshot()
 An override on snapshot so the no arg version of the method 
implies zero seq num,
  like for cases without wal
 
 
-
+
 long
 timeOfOldestEdit() 
 
-
+
 http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toString() 
 
-
+
 long
 updateColumnValue(byte[] row,
   byte[] family,
@@ -358,13 +352,13 @@ implements 
+
 abstract void
 updateLowestUnflushedSequenceIdInWal(boolean onlyIfMoreRecent)
 Updates the wal with the lowest sequence id (oldest entry) 
that is still in memory
 
 
-
+
 private long
 upsert(Cell cell,
 long readpoint)
@@ -372,7 +366,7 @@ implements 
+
 long
 upsert(http://docs.oracle.com/javase/7/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
 long readpoint)
@@ -711,31 +705,13 @@ implements Returns:size of the memstore 
snapshot
 
 
-
-
-
-
-
-rollback
-public void rollback(Cell cell)
-Remove n key from the memstore. Only cells that have the 
same key and the
- same memstoreTS are removed.  It is ok to not update timeRangeTracker
- in this call. It is possible that we can optimize this method by using
- tailMap/iterator, but since this method is called rarely (only for
- error recovery), we can leave those optimization for the future.
-
-Specified by:
-rollback in
 interface MemStore
-Parameters:cell - 

-
-
 
 
 
 
 
 toString
-public http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
+public http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
 
 Overrides:
 http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#toString()"
 title="class or interface in java.lang">toString in 
class http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
@@ -748,7 +724,7 @@ implements 
 
 getConfiguration
-protected org.apache.hadoop.conf.Configuration getConfiguration()
+protected org.apache.hadoop.conf.Configuration getConfiguration()
 
 
 
@@ -757,7 +733,7 @@ implements 
 
 dump
-protected void dump(org.apache.commons.logging.Log log)
+protected void dump(org.apache.commons.logging.Log log)
 
 
 
@@ -766,7 +742,7 @@ implements 
 
 upsert
-private long upsert(Cell cell,
+private long upsert(Cell cell,
   long readpoint)
 Inserts the specified Cell into MemStore and deletes any 
existing
  versions of the same row/family/qualifier as the specified Cell.
@@ -787,7 +763,7 @@ implements 
 
 getLowest
-protected Cell getLowest(Cell a,
+protected Cell getLowest(Cell a,
  Cell b)
 
 
@@ -797,7 +773,7 @@ implements 
 
 getNextRow
-protected Cell getNextRow(Cell key,
+protected Cell getNextRow(Cell key,
   http://docs.oracle.com/javase/7/docs/api/java/util/NavigableSet.html?is-external=true";
 title="class or interface in java.util">NavigableSet set)
 
 
@@ -807,7 +783,7 @@ implements 
 
 updateColumnValue
-public long updateColumnValue(byte[] row,
+public long updateColumnValue(byte[] row,
  byte[] family,
  byte[] qualifier,
  long newValue,
@@ -830,7 +806,7 @@ implements 
 
 maybeCloneWithAllocator
-private Cell maybeCloneWithAllocator(Cell cell)
+private Cell maybeCloneWithAllocator(Cell cell)
 
 
 
@@ -839,7 +815,7 @@ implements 
 
 internalAdd
-private long internalAdd(Cell toAdd)
+private long internalAdd(Cell toAdd)
 Internal version of add() that doesn't clone Cells with the
  allocator, and doesn't take the lock.
 
@@ -852,7 +828,7 @@ implements 
 
 setOldestEditTimeToNow
-private void setOldestEditTimeToNow()
+private void setOldestEditTimeToNow()
 
 
 
@@ -861,7 +837,7 @@ implements 
 
 keySize
-protected long keySize()
+protected long keySize()
 
 
 
@@ -870,7 +846,7 @@ implements 
 
 getComparator
-protected CellComparator getComparator()
+protected CellComparator getComparat

[37/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
index eab4d9a..69864c1 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
@@ -234,16 +234,16 @@
 
 
 int
+BufferedDataBlockEncoder.BufferedEncodedSeeker.compareKey(CellComparator comparator,
+Cell key) 
+
+
+int
 DataBlockEncoder.EncodedSeeker.compareKey(CellComparator comparator,
 Cell key)
 Compare the given key against the current key
 
 
-
-int
-BufferedDataBlockEncoder.BufferedEncodedSeeker.compareKey(CellComparator comparator,
-Cell key) 
-
 
 DataBlockEncoder.EncodedSeeker
 CopyKeyDataBlockEncoder.createSeeker(CellComparator comparator,
@@ -298,9 +298,9 @@
 
 
 
-private CellComparator
-HFileBlockIndex.CellBasedKeyBlockIndexReader.comparator
-Needed doing lookup on blocks.
+protected CellComparator
+HFileWriterImpl.comparator
+Key comparator.
 
 
 
@@ -308,23 +308,23 @@
 HFile.WriterFactory.comparator 
 
 
+private CellComparator
+HFileBlockIndex.CellBasedKeyBlockIndexReader.comparator
+Needed doing lookup on blocks.
+
+
+
 protected CellComparator
 CompoundBloomFilterBase.comparator
 Comparator used to compare Bloom filter keys
 
 
-
+
 private CellComparator
 HFileReaderImpl.comparator
 Key comparator
 
 
-
-protected CellComparator
-HFileWriterImpl.comparator
-Key comparator.
-
-
 
 
 
@@ -499,24 +499,24 @@
 StripeStoreFileManager.cellComparator 
 
 
-protected CellComparator
-StripeMultiFileWriter.comparator 
+private CellComparator
+StoreFile.WriterBuilder.comparator 
 
 
 private CellComparator
-Segment.comparator 
+AbstractMemStore.comparator 
 
 
-private CellComparator
-ScanInfo.comparator 
+protected CellComparator
+StripeMultiFileWriter.comparator 
 
 
 private CellComparator
-StoreFile.WriterBuilder.comparator 
+Segment.comparator 
 
 
 private CellComparator
-AbstractMemStore.comparator 
+ScanInfo.comparator 
 
 
 private CellComparator
@@ -531,13 +531,13 @@
 StripeStoreFlusher.StripeFlushRequest.comparator 
 
 
-private CellComparator
-DefaultStoreFileManager.kvComparator 
-
-
 protected CellComparator
 KeyValueHeap.KVScannerComparator.kvComparator 
 
+
+private CellComparator
+DefaultStoreFileManager.kvComparator 
+
 
 private CellComparator
 ScanQueryMatcher.rowComparator
@@ -564,38 +564,38 @@
 HRegion.getCellCompartor() 
 
 
-(package private) CellComparator
-StoreFileScanner.getComparator() 
+CellComparator
+StoreFile.Reader.getComparator() 
 
 
 protected CellComparator
+AbstractMemStore.getComparator() 
+
+
+protected CellComparator
 Segment.getComparator()
 Returns the Cell comparator used by this segment
 
 
-
-CellComparator
-ScanInfo.getComparator() 
-
 
 CellComparator
-StoreFile.Reader.getComparator() 
+ScanInfo.getComparator() 
 
 
 CellComparator
-KeyValueHeap.KVScannerComparator.getComparator() 
+Store.getComparator() 
 
 
-protected CellComparator
-AbstractMemStore.getComparator() 
+CellComparator
+KeyValueHeap.KVScannerComparator.getComparator() 
 
 
-CellComparator
-HStore.getComparator() 
+(package private) CellComparator
+StoreFileScanner.getComparator() 
 
 
 CellComparator
-Store.getComparator() 
+HStore.getComparator() 
 
 
 
@@ -608,18 +608,18 @@
 
 
 protected void
-ReversedStoreScanner.checkScanOrder(Cell prevKV,
-Cell kv,
-CellComparator comparator) 
-
-
-protected void
 StoreScanner.checkScanOrder(Cell prevKV,
 Cell kv,
 CellComparator comparator)
 Check whether scan as expected order
 
 
+
+protected void
+ReversedStoreScanner.checkScanOrder(Cell prevKV,
+Cell kv,
+CellComparator comparator) 
+
 
 static StoreEngine
 StoreEngine.create(Store store,
@@ -629,6 +629,12 @@
 
 
 
+protected void
+DefaultStoreEngine.createComponents(org.apache.hadoop.conf.Configuration conf,
+Store store,
+CellComparator kvComparator) 
+
+
 protected abstract void
 StoreEngine.createComponents(org.apache.hadoop.conf.Configuration conf,
 Store store,
@@ -636,42 +642,42 @@
 Create the StoreEngine's components.
 
 
-
+
 protected void
-DefaultStoreEngine.createComponents(org.apache.hadoop.conf.Configuration conf,
+DateTieredStoreEngine.createComponents(org.apache.hadoop.conf.Configuration conf,
 Store store,
 CellComparator kvComparator) 
 
-
+
 protected void
 StripeStoreEngine.createComponents(org.apache.hadoop.conf.Configuration conf,

[36/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
index 5b3b866..99b8782 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
@@ -463,38 +463,38 @@ service.
 
 
 void
-HBaseAdmin.addColumn(TableName tableName,
+Admin.addColumn(TableName tableName,
   HColumnDescriptor columnFamily)
 Deprecated. 
-Since 2.0. Will be removed in 3.0. Use
- HBaseAdmin.addColumnFamily(TableName,
 HColumnDescriptor) instead.
+As of release 2.0.0.
+ (https://issues.apache.org/jira/browse/HBASE-1989";>HBASE-1989).
+ This will be removed in HBase 3.0.0.
+ Use Admin.addColumnFamily(TableName,
 HColumnDescriptor).
 
 
 
 
 void
-Admin.addColumn(TableName tableName,
+HBaseAdmin.addColumn(TableName tableName,
   HColumnDescriptor columnFamily)
 Deprecated. 
-As of release 2.0.0.
- (https://issues.apache.org/jira/browse/HBASE-1989";>HBASE-1989).
- This will be removed in HBase 3.0.0.
- Use Admin.addColumnFamily(TableName,
 HColumnDescriptor).
+Since 2.0. Will be removed in 3.0. Use
+ HBaseAdmin.addColumnFamily(TableName,
 HColumnDescriptor) instead.
 
 
 
 
 http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureVoid>
-HBaseAdmin.addColumnFamily(TableName tableName,
-  HColumnDescriptor columnFamily) 
-
-
-http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureVoid>
 Admin.addColumnFamily(TableName tableName,
   HColumnDescriptor columnFamily)
 Add a column family to an existing table.
 
 
+
+http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureVoid>
+HBaseAdmin.addColumnFamily(TableName tableName,
+  HColumnDescriptor columnFamily) 
+
 
 UnmodifyableHTableDescriptor
 UnmodifyableHTableDescriptor.addFamily(HColumnDescriptor family)
@@ -503,38 +503,38 @@ service.
 
 
 void
-HBaseAdmin.modifyColumn(TableName tableName,
+Admin.modifyColumn(TableName tableName,
 HColumnDescriptor columnFamily)
 Deprecated. 
-As of 2.0. Will be removed in 3.0. Use
- HBaseAdmin.modifyColumnFamily(TableName,
 HColumnDescriptor) instead.
+As of release 2.0.0.
+ (https://issues.apache.org/jira/browse/HBASE-1989";>HBASE-1989).
+ This will be removed in HBase 3.0.0.
+ Use Admin.modifyColumnFamily(TableName,
 HColumnDescriptor).
 
 
 
 
 void
-Admin.modifyColumn(TableName tableName,
+HBaseAdmin.modifyColumn(TableName tableName,
 HColumnDescriptor columnFamily)
 Deprecated. 
-As of release 2.0.0.
- (https://issues.apache.org/jira/browse/HBASE-1989";>HBASE-1989).
- This will be removed in HBase 3.0.0.
- Use Admin.modifyColumnFamily(TableName,
 HColumnDescriptor).
+As of 2.0. Will be removed in 3.0. Use
+ HBaseAdmin.modifyColumnFamily(TableName,
 HColumnDescriptor) instead.
 
 
 
 
 http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureVoid>
-HBaseAdmin.modifyColumnFamily(TableName tableName,
-HColumnDescriptor columnFamily) 
-
-
-http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureVoid>
 Admin.modifyColumnFamily(TableName tableName,
 HColumnDescriptor columnFamily)
 Modify an existing column family on a table.
 
 
+
+http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">Future

[28/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/SplitLogTask.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/SplitLogTask.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/SplitLogTask.html
index 73ded02..0855b61 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/SplitLogTask.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/SplitLogTask.html
@@ -147,19 +147,19 @@
 
 
 void
-SplitLogWorkerCoordination.endTask(SplitLogTask slt,
+ZkSplitLogWorkerCoordination.endTask(SplitLogTask slt,
   http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong ctr,
-  SplitLogWorkerCoordination.SplitTaskDetails splitTaskDetails)
-Notify coordination engine that splitting task has 
completed.
+  SplitLogWorkerCoordination.SplitTaskDetails details)
+endTask() can fail and the only way to recover out of it is 
for the
+ SplitLogManager to timeout the 
task node.
 
 
 
 void
-ZkSplitLogWorkerCoordination.endTask(SplitLogTask slt,
+SplitLogWorkerCoordination.endTask(SplitLogTask slt,
   http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong ctr,
-  SplitLogWorkerCoordination.SplitTaskDetails details)
-endTask() can fail and the only way to recover out of it is 
for the
- SplitLogManager to timeout the 
task node.
+  SplitLogWorkerCoordination.SplitTaskDetails splitTaskDetails)
+Notify coordination engine that splitting task has 
completed.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/TableExistsException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/TableExistsException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableExistsException.html
index 4f49372..967b9af 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableExistsException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableExistsException.html
@@ -98,34 +98,29 @@
 
 
 void
-HBaseAdmin.cloneSnapshot(byte[] snapshotName,
-  TableName tableName) 
-
-
-void
 Admin.cloneSnapshot(byte[] snapshotName,
   TableName tableName)
 Create a new table by cloning the snapshot content.
 
 
-
+
 void
-HBaseAdmin.cloneSnapshot(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String snapshotName,
+HBaseAdmin.cloneSnapshot(byte[] snapshotName,
   TableName tableName) 
 
-
+
 void
 Admin.cloneSnapshot(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String snapshotName,
   TableName tableName)
 Create a new table by cloning the snapshot content.
 
 
-
-http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureVoid>
-HBaseAdmin.cloneSnapshotAsync(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String snapshotName,
-TableName tableName) 
-
 
+void
+HBaseAdmin.cloneSnapshot(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String snapshotName,
+  TableName tableName) 
+
+
 http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureVoid>
 Admin.cloneSnapshotAsync(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String snapshotName,
 TableName tableName)
@@ -133,6 +128,11 @@
  and wait for it be completely cloned.
 
 
+
+http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureVoid>
+HBaseAdmin.cloneSnapshotAsync(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String snapshotName,
+

[19/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
index 76e..9b9ec87 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
@@ -114,7 +114,9 @@
 
 
 void
-BaseMasterObserver.postAbortProcedure(ObserverContext ctx) 
+MasterObserver.postAbortProcedure(ObserverContext ctx)
+Called after a abortProcedure request has been 
processed.
+
 
 
 void
@@ -122,16 +124,18 @@
 
 
 void
-MasterObserver.postAbortProcedure(ObserverContext ctx)
-Called after a abortProcedure request has been 
processed.
-
+BaseMasterObserver.postAbortProcedure(ObserverContext ctx) 
 
 
 void
-BaseMasterObserver.postAddColumn(ObserverContext ctx,
+MasterObserver.postAddColumn(ObserverContext ctx,
   TableName tableName,
   HColumnDescriptor columnFamily)
-Deprecated. 
+Deprecated. 
+As of release 2.0.0, this will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-13645";>HBASE-13645).
+ Use MasterObserver.postAddColumnFamily(ObserverContext,
 TableName, HColumnDescriptor).
+
 
 
 
@@ -144,21 +148,19 @@
 
 
 void
-MasterObserver.postAddColumn(ObserverContext ctx,
+BaseMasterObserver.postAddColumn(ObserverContext ctx,
   TableName tableName,
   HColumnDescriptor columnFamily)
-Deprecated. 
-As of release 2.0.0, this will be removed in HBase 3.0.0
- (https://issues.apache.org/jira/browse/HBASE-13645";>HBASE-13645).
- Use MasterObserver.postAddColumnFamily(ObserverContext,
 TableName, HColumnDescriptor).
-
+Deprecated. 
 
 
 
 void
-BaseMasterObserver.postAddColumnFamily(ObserverContext ctx,
+MasterObserver.postAddColumnFamily(ObserverContext ctx,
   TableName tableName,
-  HColumnDescriptor columnFamily) 
+  HColumnDescriptor columnFamily)
+Called after the new column family has been created.
+
 
 
 void
@@ -168,17 +170,17 @@
 
 
 void
-MasterObserver.postAddColumnFamily(ObserverContext ctx,
+BaseMasterObserver.postAddColumnFamily(ObserverContext ctx,
   TableName tableName,
-  HColumnDescriptor columnFamily)
-Called after the new column family has been created.
-
+  HColumnDescriptor columnFamily) 
 
 
 void
-BaseMasterObserver.postAddColumnFamilyHandler(ObserverContext ctx,
+MasterObserver.postAddColumnFamilyHandler(ObserverContext ctx,
 TableName tableName,
-HColumnDescriptor columnFamily) 
+HColumnDescriptor columnFamily)
+Called after the new column family has been created.
+
 
 
 void
@@ -188,18 +190,20 @@
 
 
 void
-MasterObserver.postAddColumnFamilyHandler(ObserverContext ctx,
+BaseMasterObserver.postAddColumnFamilyHandler(ObserverContext ctx,
 TableName tableName,
-HColumnDescriptor columnFamily)
-Called after the new column family has been created.
-
+HColumnDescriptor columnFamily) 
 
 
 void
-BaseMasterObserver.postAddColumnHandler(ObserverContext ctx,
+MasterObserver.postAddColumnHandler(ObserverContext ctx,
 TableName tableName,
 HColumnDescriptor columnFamily)
-Deprecated. 
+Deprecated. 
+As of release 2.0.0, this will be removed in HBase 3.0.0
+ (https://issues.apache.org/jira/browse/HBASE-13645";>HBASE-13645). Use
+ MasterObserver.postAddColumnFamilyHandler(ObserverContext,
 TableName, HColumnDescriptor).
+
 
 
 
@@ -212,28 +216,14 @@
 
 
 void
-MasterObserver.postAddColumnHandler(ObserverContext

[44/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index babbe9a..5202063 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -280,10 +280,10 @@
  Warnings
  Errors
 
-1724
+1728
 0
 0
-12504
+12478
 
 Files
 
@@ -3428,595 +3428,585 @@
 0
 3
 
-org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
-0
-0
-1
-
 org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java
 0
 0
 15
-
+
 org/apache/hadoop/hbase/regionserver/BaseRowProcessor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/ColumnTracker.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/regionserver/CompactedHFilesDischargeHandler.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/regionserver/CompactionTool.java
 0
 0
 12
-
+
 org/apache/hadoop/hbase/regionserver/DefaultHeapMemoryTuner.java
 0
 0
 38
-
+
 org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/DeleteTracker.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/FlushPolicyFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/FlushRequestListener.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/FlushRequester.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/HMobStore.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/regionserver/HRegion.java
 0
 0
 209
-
+
 org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 0
 0
 44
-
+
 org/apache/hadoop/hbase/regionserver/HRegionServer.java
 0
 0
 93
-
+
 org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/HStore.java
 0
 0
-42
-
+45
+
 org/apache/hadoop/hbase/regionserver/HeapMemStoreLAB.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java
 0
 0
 17
-
+
 org/apache/hadoop/hbase/regionserver/HeapMemoryTuner.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/InternalScan.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/InternalScanner.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/KeyValueHeap.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/regionserver/KeyValueScanner.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/LeaseException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/Leases.java
 0
 0
 12
-
+
 org/apache/hadoop/hbase/regionserver/LogRoller.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/regionserver/LruHashMap.java
 0
 0
 69
-
+
 org/apache/hadoop/hbase/regionserver/MemStore.java
 0
 0
-12
-
+11
+
 org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
 0
 0
 23
-
+
 org/apache/hadoop/hbase/regionserver/MemStoreLAB.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/MetricsRegion.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/regionserver/MultiVersionConcurrencyControl.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/regionserver/NoOpHeapMemoryTuner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/OnlineRegions.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/regionserver/OperationStatus.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/regionserver/

[40/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

2016-04-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index fddebb7..ca3fc2d 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -1122,6 +1122,8 @@
 
 Add a child procedure to execute
 
+addCipherOptions(DataTransferProtos.DataTransferEncryptorMessageProto.Builder,
 List) - Method in interface 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputSaslHelper.CipherHelper
+ 
 addClientPort(int)
 - Method in class org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster
 
 Add a client port to the list.
@@ -2215,6 +2217,8 @@
 
 AES(CipherProvider)
 - Constructor for class org.apache.hadoop.hbase.io.crypto.aes.AES
  
+AES_CTR_NOPADDING
 - Static variable in class org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputSaslHelper
+ 
 AESDecryptor - Class in org.apache.hadoop.hbase.io.crypto.aes
  
 AESDecryptor(Cipher)
 - Constructor for class org.apache.hadoop.hbase.io.crypto.aes.AESDecryptor
@@ -2351,6 +2355,8 @@
 
 Always allocates a new buffer of the correct size.
 
+allocateBuffer(ChannelHandlerContext,
 ByteBuf, boolean) - Method in class org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler
+ 
 allocateBytes(int)
 - Method in class org.apache.hadoop.hbase.regionserver.HeapMemStoreLAB
 
 Allocate a slice of the given length.
@@ -2712,6 +2718,8 @@
  
 apply(StoreFile)
 - Method in class org.apache.hadoop.hbase.regionserver.StoreFile.Comparators.GetFileSize
  
+apply(StoreFile)
 - Method in class org.apache.hadoop.hbase.regionserver.StoreFile.Comparators.GetMaxTimestamp
+ 
 apply(StoreFile)
 - Method in class org.apache.hadoop.hbase.regionserver.StoreFile.Comparators.GetPathName
  
 apply(StoreFile)
 - Method in class org.apache.hadoop.hbase.regionserver.StoreFile.Comparators.GetSeqId
@@ -2743,20 +2751,16 @@
 Apply the settings in the given key to the given 
configuration, this is
  used to communicate with distant clusters
 
-applyCompactionPolicy(ArrayList,
 boolean, boolean) - Method in class 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy
-
-Could return null if no candidates are found
-
-applyCompactionPolicy(ArrayList,
 boolean, boolean, long) - Method in class 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy
-
-Input candidates are sorted from oldest to newest by 
seqId.
-
 applyCompactionPolicy(ArrayList,
 boolean, boolean) - Method in class 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy
  
 applyCompactionPolicy(List,
 boolean, boolean, int, int) - Method in class 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy
  
 applyCompactionPolicy(ArrayList,
 boolean, boolean) - Method in class 
org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy
- 
+
+-- Default minor compaction selection algorithm:
+ choose CompactSelection from candidates --
+ First exclude bulk-load files if indicated in configuration.
+
 applyFamilyMapToMemstore(Map>, boolean, long) - Method in class 
org.apache.hadoop.hbase.regionserver.HRegion
 
 Atomically apply the given map of family->edits to the 
memstore.
@@ -5143,6 +5147,8 @@
  
 bootstrap(Path,
 Configuration) - Static method in class 
org.apache.hadoop.hbase.master.MasterFileSystem
  
+boundaries
 - Variable in class org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest
+ 
 boundaries
 - Variable in class org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter
  
 BoundedArrayQueue - Class in org.apache.hadoop.hbase.util
@@ -5521,6 +5527,8 @@
 Puts CellScanner Cells into a cell block using passed in 
codec and/or
  compressor.
 
+buildClientPassword(Token)
 - Static method in class org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputSaslHelper
+ 
 buildDependencyClasspath(Configuration)
 - Static method in class org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
 
 Returns a classpath string built from the content of the 
"tmpjars" value in conf.
@@ -5597,6 +5605,8 @@
 
 Build the user information
 
+buildUsername(Token)
 - Static method in class org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputSaslHelper
+ 
 buildWALHeader(Configuration,
 WALProtos.WALHeader.Builder) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter
  
 buildWALHeader(Configuration,
 WALProtos.WALHeader.Builder) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.SecureAsyncProtobufLogWriter
@@ -6309,7 +6319,7 @@
  
 cached
 - Variable in class org.apache.hadoop.hbase.wal.RegionGroupingProvider
 
-A group-wal mapping, recommended to make sure one-one 
rather than many-one mapping
+A group-provider mapping, make sure one-one rather than 
ma

[46/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/apidocs/org/apache/hadoop/hbase/util/class-use/PositionedByteRange.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/util/class-use/PositionedByteRange.html 
b/apidocs/org/apache/hadoop/hbase/util/class-use/PositionedByteRange.html
index a81cdaa..2f3ad05 100644
--- a/apidocs/org/apache/hadoop/hbase/util/class-use/PositionedByteRange.html
+++ b/apidocs/org/apache/hadoop/hbase/util/class-use/PositionedByteRange.html
@@ -116,116 +116,116 @@
 
 
 
+T
+DataType.decode(PositionedByteRange src)
+Read an instance of T from the buffer 
src.
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Number.html?is-external=true";
 title="class or interface in java.lang">Number
+OrderedNumeric.decode(PositionedByteRange src) 
+
+
 http://docs.oracle.com/javase/7/docs/api/java/lang/Long.html?is-external=true";
 title="class or interface in java.lang">Long
-OrderedInt64.decode(PositionedByteRange src) 
+RawLong.decode(PositionedByteRange src) 
 
 
-http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer
-OrderedInt32.decode(PositionedByteRange src) 
+http://docs.oracle.com/javase/7/docs/api/java/lang/Short.html?is-external=true";
 title="class or interface in java.lang">Short
+RawShort.decode(PositionedByteRange src) 
 
 
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[]
+Struct.decode(PositionedByteRange src) 
+
+
 T
 FixedLengthWrapper.decode(PositionedByteRange src) 
 
-
+
 http://docs.oracle.com/javase/7/docs/api/java/lang/Byte.html?is-external=true";
 title="class or interface in java.lang">Byte
 RawByte.decode(PositionedByteRange src) 
 
-
-http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer
-RawInteger.decode(PositionedByteRange src) 
-
 
 http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 RawString.decode(PositionedByteRange src) 
 
 
-http://docs.oracle.com/javase/7/docs/api/java/lang/Double.html?is-external=true";
 title="class or interface in java.lang">Double
-RawDouble.decode(PositionedByteRange src) 
+http://docs.oracle.com/javase/7/docs/api/java/lang/Byte.html?is-external=true";
 title="class or interface in java.lang">Byte
+OrderedInt8.decode(PositionedByteRange src) 
 
 
-http://docs.oracle.com/javase/7/docs/api/java/lang/Number.html?is-external=true";
 title="class or interface in java.lang">Number
-OrderedNumeric.decode(PositionedByteRange src) 
+byte[]
+RawBytes.decode(PositionedByteRange src) 
 
 
-http://docs.oracle.com/javase/7/docs/api/java/lang/Short.html?is-external=true";
 title="class or interface in java.lang">Short
-RawShort.decode(PositionedByteRange src) 
+T
+TerminatedWrapper.decode(PositionedByteRange src) 
 
 
-byte[]
-OrderedBlobVar.decode(PositionedByteRange src) 
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+OrderedString.decode(PositionedByteRange src) 
 
 
 http://docs.oracle.com/javase/7/docs/api/java/lang/Long.html?is-external=true";
 title="class or interface in java.lang">Long
-RawLong.decode(PositionedByteRange src) 
+OrderedInt64.decode(PositionedByteRange src) 
 
 
 http://docs.oracle.com/javase/7/docs/api/java/lang/Short.html?is-external=true";
 title="class or interface in java.lang">Short
 OrderedInt16.decode(PositionedByteRange src) 
 
 
-T
-TerminatedWrapper.decode(PositionedByteRange src) 
+byte[]
+OrderedBlobVar.decode(PositionedByteRange src) 
 
 
-byte[]
-RawBytes.decode(PositionedByteRange src) 
+http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer
+RawInteger.decode(PositionedByteRange src) 
 
 
-http://docs.oracle.com/javase/7/docs/api/java/lang/Float.html?is-external=true";
 title="class or interface in java.lang">Float
-OrderedFloat32.decode(PositionedByteRange src) 
+http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer
+OrderedInt32.decode(PositionedByteRange src) 
 
 
-http://docs.oracle.com/javase/7/docs/api/java/lang/Byte.html?is-external=true";
 title="class or interface in java.lang">Byte
-OrderedInt8.decode(PositionedByteRange src) 
+http://docs.oracle.com/javase/7/docs/api/java/lang/Double.html?is-external=true";
 title="class or interface in java.lang">Double
+OrderedFloat64.decode(PositionedByteRange src) 
 
 
-http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[]
-Struct.decode(PositionedByteRange src) 
-
-
 byte[]
 OrderedBlob.decode(PositionedByteRange src) 
 
-
+
 http://docs.oracle.c

[15/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
index 7a9d2b8..7d65a2f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
@@ -179,18 +179,18 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 org.apache.hadoop.mapred.RecordReader
-TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplit split,
-  org.apache.hadoop.mapred.JobConf job,
-  
org.apache.hadoop.mapred.Reporter reporter) 
-
-
-org.apache.hadoop.mapred.RecordReader
 TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplit split,
   org.apache.hadoop.mapred.JobConf job,
   
org.apache.hadoop.mapred.Reporter reporter)
 Builds a TableRecordReader.
 
 
+
+org.apache.hadoop.mapred.RecordReader
+TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplit split,
+  org.apache.hadoop.mapred.JobConf job,
+  
org.apache.hadoop.mapred.Reporter reporter) 
+
 
 org.apache.hadoop.mapred.RecordReader
 MultiTableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplit split,
@@ -214,11 +214,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-IdentityTableMap.map(ImmutableBytesWritable key,
+GroupingTableMap.map(ImmutableBytesWritable key,
   Result value,
   org.apache.hadoop.mapred.OutputCollector output,
   org.apache.hadoop.mapred.Reporter reporter)
-Pass the key, value to reduce
+Extract the grouping columns from value to construct a new 
key.
 
 
 
@@ -230,11 +230,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-GroupingTableMap.map(ImmutableBytesWritable key,
+IdentityTableMap.map(ImmutableBytesWritable key,
   Result value,
   org.apache.hadoop.mapred.OutputCollector output,
   org.apache.hadoop.mapred.Reporter reporter)
-Extract the grouping columns from value to construct a new 
key.
+Pass the key, value to reduce
 
 
 
@@ -277,11 +277,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-IdentityTableMap.map(ImmutableBytesWritable key,
+GroupingTableMap.map(ImmutableBytesWritable key,
   Result value,
   org.apache.hadoop.mapred.OutputCollector output,
   org.apache.hadoop.mapred.Reporter reporter)
-Pass the key, value to reduce
+Extract the grouping columns from value to construct a new 
key.
 
 
 
@@ -293,11 +293,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-GroupingTableMap.map(ImmutableBytesWritable key,
+IdentityTableMap.map(ImmutableBytesWritable key,
   Result value,
   org.apache.hadoop.mapred.OutputCollector output,
   org.apache.hadoop.mapred.Reporter reporter)
-Extract the grouping columns from value to construct a new 
key.
+Pass the key, value to reduce
 
 
 
@@ -345,7 +345,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImmutableBytesWritable
-HashTable.TableHash.Reader.key 
+TableRecordReaderImpl.key 
 
 
 private ImmutableBytesWritable
@@ -353,7 +353,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImmutableBytesWritable
-TableRecordReaderImpl.key 
+HashTable.TableHash.Reader.key 
 
 
 (package private) ImmutableBytesWritable
@@ -423,32 +423,32 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ImmutableBytesWritable
-TableRecordReader.getCurrentKey()
-Returns the current key.
-
+TableSnapshotInputFormatImpl.RecordReader.getCurrentKey() 
 
 
 ImmutableBytesWritable
-HashTable.TableHash.Reader.getCurrentKey()
-Get the current key
+TableRecordReader.getCurrentKey()
+Returns the current key.
 
 
 
 ImmutableBytesWritable
-MultithreadedTableMapper.SubMapRecordReader.getCurrentKey() 
+TableSnapshotInputFormat.TableSnapshotRegionRecordReader.getCurrentKey() 
 
 
 ImmutableBytesWritable
-TableSnapshotInputFormat.TableSnapshotRegionRecordReader.getCurrentKey() 
+TableRecordReaderImpl.getCurrentKey()
+Returns the current key.
+
 
 
 ImmutableBytesWritable
-TableSnapshotInputFormatImpl.RecordReader.getCurrentKey() 
+MultithreadedTableMapper.SubMapRecordReader.getCurr

[26/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
index e4d462e..3a9942d 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
@@ -194,13 +194,13 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-EnableTableHandler
-EnableTableHandler.prepare() 
-
-
 DisableTableHandler
 DisableTableHandler.prepare() 
 
+
+EnableTableHandler
+EnableTableHandler.prepare() 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
index 8e92558..c9d1384 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
@@ -146,15 +146,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 KeyValue.EMPTY_ARRAY_LIST 
 
 
-private static http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
-CellUtil.EMPTY_TAGS_ITR 
-
-
 (package private) static http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 TagUtil.EMPTY_TAGS_ITR
 Iterator returned when no Tags.
 
 
+
+private static http://docs.oracle.com/javase/7/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
+CellUtil.EMPTY_TAGS_ITR 
+
 
 
 
@@ -767,18 +767,18 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-DefaultVisibilityLabelServiceImpl.createVisibilityExpTags(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String visExpression,
-  
boolean withSerializationFormat,
-  
boolean checkAuths) 
-
-
-http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 VisibilityLabelService.createVisibilityExpTags(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String visExpression,
   
boolean withSerializationFormat,
   boolean checkAuths)
 Creates tags corresponding to given visibility 
expression.
 
 
+
+http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+DefaultVisibilityLabelServiceImpl.createVisibilityExpTags(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String visExpression,
+  
boolean withSerializationFormat,
+  
boolean checkAuths) 
+
 
 static http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 VisibilityUtils.createVisibilityExpTags(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String visExpression,
@@ -823,11 +823,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 byte[]
-DefaultVisibilityLabelServiceImpl.encodeVisibilityForReplication(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tags,
-http://docs.oracle.com/javase/7/docs/api/java/lang/Byte.html?is-external=true";
 title="class or interface in 
java.lang">Byte serializationFormat) 
-
-
-byte[]
 VisibilityLabelService.encodeVisibilityForReplication(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List visTags,
 http://docs.oracle.com/javase/7/docs/api/java/lang/Byte.html?is-external=true";
 title="class or interface in 
java.lang">Byte serializationFormat)
 Provides a way to modify the visibility tags of type TagType
@@ -837,6 +832,11 @@ Input

[30/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/Server.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Server.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Server.html
index f8195eb..f69011b 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Server.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Server.html
@@ -166,13 +166,13 @@
 
 
 
-protected Server
-ZkCoordinatedStateManager.server 
-
-
 private Server
 ZKSplitLogManagerCoordination.server 
 
+
+protected Server
+ZkCoordinatedStateManager.server 
+
 
 
 
@@ -325,28 +325,28 @@
 
 
 private Server
-ServerManager.master 
+ActiveMasterManager.master 
 
 
 private Server
-ActiveMasterManager.master 
+ServerManager.master 
 
 
 (package private) Server
 MasterFileSystem.master 
 
 
-private Server
-RegionStateStore.server 
-
-
 protected Server
 BulkAssigner.server 
 
-
+
 private Server
 CatalogJanitor.server 
 
+
+private Server
+RegionStateStore.server 
+
 
 private Server
 SplitLogManager.server 
@@ -514,19 +514,19 @@
 
 
 private Server
-LogRoller.server 
+RegionMergeTransactionImpl.server 
 
 
 private Server
-RegionMergeTransactionImpl.server 
+SplitTransactionImpl.server 
 
 
 private Server
-SplitTransactionImpl.server 
+SplitTransactionImpl.DaughterOpener.server 
 
 
 private Server
-SplitTransactionImpl.DaughterOpener.server 
+LogRoller.server 
 
 
 private Server
@@ -543,23 +543,23 @@
 
 
 Server
-SplitTransaction.getServer()
+RegionMergeTransaction.getServer()
 Get the Server running the transaction or rollback
 
 
 
 Server
-RegionMergeTransaction.getServer()
-Get the Server running the transaction or rollback
-
+RegionMergeTransactionImpl.getServer() 
 
 
 Server
-RegionMergeTransactionImpl.getServer() 
+SplitTransactionImpl.getServer() 
 
 
 Server
-SplitTransactionImpl.getServer() 
+SplitTransaction.getServer()
+Get the Server running the transaction or rollback
+
 
 
 
@@ -594,15 +594,6 @@
 
 
 
-PairOfSameType
-SplitTransaction.execute(Server server,
-  RegionServerServices services)
-Deprecated. 
-use #execute(Server, RegionServerServices, User);  as of 
1.0.2, remove in 3.0
-
-
-
-
 Region
 RegionMergeTransaction.execute(Server server,
   RegionServerServices services)
@@ -611,25 +602,26 @@
 
 
 
-
+
 Region
 RegionMergeTransactionImpl.execute(Server server,
   RegionServerServices services) 
 
-
+
 PairOfSameType
 SplitTransactionImpl.execute(Server server,
   RegionServerServices services) 
 
-
+
 PairOfSameType
-SplitTransaction.execute(Server server,
-  RegionServerServices services,
-  User user)
-Run the transaction.
+SplitTransaction.execute(Server server,
+  RegionServerServices services)
+Deprecated. 
+use #execute(Server, RegionServerServices, User);  as of 
1.0.2, remove in 3.0
+
 
 
-
+
 Region
 RegionMergeTransaction.execute(Server server,
   RegionServerServices services,
@@ -637,18 +629,26 @@
 Run the transaction.
 
 
-
+
 Region
 RegionMergeTransactionImpl.execute(Server server,
   RegionServerServices services,
   User user) 
 
-
+
 PairOfSameType
 SplitTransactionImpl.execute(Server server,
   RegionServerServices services,
   User user) 
 
+
+PairOfSameType
+SplitTransaction.execute(Server server,
+  RegionServerServices services,
+  User user)
+Run the transaction.
+
+
 
 void
 ReplicationService.initialize(Server rs,
@@ -684,15 +684,6 @@
 
 
 boolean
-SplitTransaction.rollback(Server server,
-RegionServerServices services)
-Deprecated. 
-use #rollback(Server, RegionServerServices, User); as of 
1.0.2, remove in 3.0
-
-
-
-
-boolean
 RegionMergeTransaction.rollback(Server server,
 RegionServerServices services)
 Deprecated. 
@@ -700,25 +691,26 @@
 
 
 
-
+
 boolean
 RegionMergeTransactionImpl.rollback(Server server,
 RegionServerServices services) 
 
-
+
 boolean
 SplitTransactionImpl.rollback(Server server,
 RegionServerServices services) 
 
-
+
 boolean
-SplitTransaction.rollback(Server server,
-RegionServerServices services,
-User user)
-Roll back a failed transaction
+SplitTransaction.rollback(Server server,
+RegionServerServices services)
+Deprecated. 
+use #rollback(Server, RegionServerServices, User); as of 
1.0.2, remove in 3.0
+
 
 
-
+
 boolean
 RegionMergeTransaction.rollback(Server server,
 RegionServerServices services,
@@ -726,18 +718,26 @@
 Roll back a failed transaction
 
 
-
+
 boolean
 RegionMergeTransactionImpl.rollback(Server server,
 RegionServerServices services,
 User user) 
 
-
+
 boolean
 SplitTransactionImpl.rollback(Server server,
 RegionServerServi

[45/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/apidocs/overview-tree.html
--
diff --git a/apidocs/overview-tree.html b/apidocs/overview-tree.html
index 62051a0..a5abf10 100644
--- a/apidocs/overview-tree.html
+++ b/apidocs/overview-tree.html
@@ -826,22 +826,22 @@
 org.apache.hadoop.hbase.util.Order
 org.apache.hadoop.hbase.KeepDeletedCells
 org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
-org.apache.hadoop.hbase.filter.FilterList.Operator
-org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
 org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
+org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
 org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
+org.apache.hadoop.hbase.filter.FilterList.Operator
 org.apache.hadoop.hbase.filter.Filter.ReturnCode
-org.apache.hadoop.hbase.quotas.QuotaScope
-org.apache.hadoop.hbase.quotas.QuotaType
-org.apache.hadoop.hbase.quotas.ThrottlingException.Type
-org.apache.hadoop.hbase.quotas.ThrottleType
-org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.client.Durability
 org.apache.hadoop.hbase.client.IsolationLevel
-org.apache.hadoop.hbase.client.Consistency
 org.apache.hadoop.hbase.client.Admin.CompactType
 org.apache.hadoop.hbase.client.Admin.MasterSwitchType
+org.apache.hadoop.hbase.client.Durability
+org.apache.hadoop.hbase.client.Consistency
 org.apache.hadoop.hbase.client.security.SecurityCapability
+org.apache.hadoop.hbase.quotas.ThrottlingException.Type
+org.apache.hadoop.hbase.quotas.ThrottleType
+org.apache.hadoop.hbase.quotas.QuotaType
+org.apache.hadoop.hbase.quotas.QuotaScope
+org.apache.hadoop.hbase.regionserver.BloomType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/book.html
--
diff --git a/book.html b/book.html
index 116aa23..6e780e7 100644
--- a/book.html
+++ b/book.html
@@ -33282,7 +33282,7 @@ The server will return cellblocks compressed using this 
same compressor as long
 
 
 Version 2.0.0-SNAPSHOT
-Last updated 2016-04-07 20:13:40 UTC
+Last updated 2016-04-08 14:30:12 UTC
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index 3eb37c5..06a28d5 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase –  
   Bulk Loads in Apache HBase (TM)
@@ -305,7 +305,7 @@ under the License. -->
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-04-07
+  Last Published: 
2016-04-08
 
 
 



[48/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/apidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html 
b/apidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
index b7f390d..02c305c 100644
--- a/apidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
+++ b/apidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
@@ -382,63 +382,63 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-MultipleColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+DependentColumnFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-KeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-PrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+InclusiveStopFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+SingleColumnValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-TimestampsFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+QualifierFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+KeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+PrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnPaginationFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+MultipleColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+PageFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-SingleColumnValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-DependentColumnFilter.cr

[38/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
index 7ebbecd..7f05906 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
@@ -1260,32 +1260,32 @@ service.
 
 
 
-static boolean
-CellUtil.matchingRowColumn(Cell left,
+boolean
+KeyValue.KVComparator.matchingRowColumn(Cell left,
   Cell right)
+Deprecated. 
 Compares the row and column of two keyvalues for 
equality
 
 
 
-boolean
-KeyValue.KVComparator.matchingRowColumn(Cell left,
+static boolean
+CellUtil.matchingRowColumn(Cell left,
   Cell right)
-Deprecated. 
 Compares the row and column of two keyvalues for 
equality
 
 
 
-static boolean
-CellUtil.matchingRows(Cell left,
+boolean
+KeyValue.KVComparator.matchingRows(Cell left,
 Cell right)
+Deprecated. 
 Compares the row of two keyvalues for equality
 
 
 
-boolean
-KeyValue.KVComparator.matchingRows(Cell left,
+static boolean
+CellUtil.matchingRows(Cell left,
 Cell right)
-Deprecated. 
 Compares the row of two keyvalues for equality
 
 
@@ -1645,17 +1645,17 @@ service.
 
 
 
-Append
-Append.add(Cell cell)
-Add column and value to this Append operation.
-
-
-
 Put
 Put.add(Cell kv)
 Add the specified KeyValue to this Put operation.
 
 
+
+Append
+Append.add(Cell cell)
+Add column and value to this Append operation.
+
+
 
 Increment
 Increment.add(Cell cell)
@@ -1750,26 +1750,26 @@ service.
 boolean partial) 
 
 
-Append
-Append.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
+Delete
+Delete.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
 
 
+Put
+Put.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
+
+
 Mutation
 Mutation.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map)
 Method for setting the put's familyMap
 
 
-
-Put
-Put.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
-
 
-Increment
-Increment.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
+Append
+Append.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
 
 
-Delete
-Delete.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
+Increment
+Increment.setFamilyCellMap(http://docs.oracle.com/javase/7/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> map) 
 
 
 
@@ -1802,10 +1802,10 @@ service.
 
 
 
-http://docs.oracle.com/javase/7/docs/api/java/math/BigDecimal.html?is-external=true";
 title="class or interface in java.math">BigDecimal
-BigDecimalColumnInterpreter.getValue(byte[] colFamily

[21/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallerInterceptorContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallerInterceptorContext.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallerInterceptorContext.html
index 15c7d81..bd61af9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallerInterceptorContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallerInterceptorContext.html
@@ -131,24 +131,20 @@
 
 
 
+RetryingCallerInterceptorContext
+PreemptiveFastFailInterceptor.createEmptyContext() 
+
+
 abstract RetryingCallerInterceptorContext
 RetryingCallerInterceptor.createEmptyContext()
 This returns the context object for the current call.
 
 
-
-RetryingCallerInterceptorContext
-NoOpRetryableCallerInterceptor.createEmptyContext() 
-
 
 RetryingCallerInterceptorContext
-PreemptiveFastFailInterceptor.createEmptyContext() 
+NoOpRetryableCallerInterceptor.createEmptyContext() 
 
 
-RetryingCallerInterceptorContext
-NoOpRetryingInterceptorContext.prepare(RetryingCallable callable) 
-
-
 abstract RetryingCallerInterceptorContext
 RetryingCallerInterceptorContext.prepare(RetryingCallable callable)
 This prepares the context object by populating it with 
information specific
@@ -156,12 +152,11 @@
  which this will be used.
 
 
-
+
 RetryingCallerInterceptorContext
-NoOpRetryingInterceptorContext.prepare(RetryingCallable callable,
-  int tries) 
+NoOpRetryingInterceptorContext.prepare(RetryingCallable callable) 
 
-
+
 abstract RetryingCallerInterceptorContext
 RetryingCallerInterceptorContext.prepare(RetryingCallable callable,
   int tries)
@@ -169,6 +164,11 @@
  in.
 
 
+
+RetryingCallerInterceptorContext
+NoOpRetryingInterceptorContext.prepare(RetryingCallable callable,
+  int tries) 
+
 
 
 
@@ -179,49 +179,49 @@
 
 
 
+void
+PreemptiveFastFailInterceptor.handleFailure(RetryingCallerInterceptorContext context,
+  http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable t) 
+
+
 abstract void
 RetryingCallerInterceptor.handleFailure(RetryingCallerInterceptorContext context,
   http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable t)
 Call this function in case we caught a failure during 
retries.
 
 
-
+
 void
 NoOpRetryableCallerInterceptor.handleFailure(RetryingCallerInterceptorContext context,
   http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable t) 
 
-
+
 void
-PreemptiveFastFailInterceptor.handleFailure(RetryingCallerInterceptorContext context,
-  http://docs.oracle.com/javase/7/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable t) 
+PreemptiveFastFailInterceptor.intercept(RetryingCallerInterceptorContext context) 
 
-
+
 abstract void
 RetryingCallerInterceptor.intercept(RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext)
 Call this function alongside the actual call done on the 
callable.
 
 
-
+
 void
 NoOpRetryableCallerInterceptor.intercept(RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext) 
 
-
+
 void
-PreemptiveFastFailInterceptor.intercept(RetryingCallerInterceptorContext context) 
+PreemptiveFastFailInterceptor.updateFailureInfo(RetryingCallerInterceptorContext context) 
 
-
+
 abstract void
 RetryingCallerInterceptor.updateFailureInfo(RetryingCallerInterceptorContext context)
 Call this function to update at the end of the retry.
 
 
-
-void
-NoOpRetryableCallerInterceptor.updateFailureInfo(RetryingCallerInterceptorContext context) 
-
 
 void
-PreemptiveFastFailInterceptor.updateFailureInfo(RetryingCallerInterceptorContext context) 
+NoOpRetryableCallerInterceptor.updateFailureInfo(RetryingCallerInterceptorContext context) 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/class-use/Row.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Row.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Row.html
index 52836b4..8b6492f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Row.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Row.html
@@ -251,11 +251,11 @@
 
 
 int
-RowMutations.compareTo(Row i) 
+RegionCoprocessorServiceExec.compareTo(Row o) 
 
 
 int
-RegionCoprocessorServiceExec.compareTo(Row o) 
+Get.compareTo(Row other) 
 
 
 int
@@ -267,7 +267,7 @@
 
 
 int
-

[24/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
index d40f98e..52ed716 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
@@ -95,7 +95,7 @@
 
 
 
-protected static interface HBaseAdmin.ProcedureFuture.WaitForStateCallable
+protected static interface HBaseAdmin.ProcedureFuture.WaitForStateCallable
 
 
 
@@ -147,7 +147,7 @@
 
 
 checkState
-boolean checkState(int tries)
+boolean checkState(int tries)
throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Throws:
 http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -159,7 +159,7 @@
 
 
 throwInterruptedException
-void throwInterruptedException()
+void throwInterruptedException()
throws http://docs.oracle.com/javase/7/docs/api/java/io/InterruptedIOException.html?is-external=true";
 title="class or interface in java.io">InterruptedIOException
 Throws:
 http://docs.oracle.com/javase/7/docs/api/java/io/InterruptedIOException.html?is-external=true";
 title="class or interface in 
java.io">InterruptedIOException
@@ -171,7 +171,7 @@
 
 
 throwTimeoutException
-void throwTimeoutException(long elapsed)
+void throwTimeoutException(long elapsed)
throws http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/TimeoutException.html?is-external=true";
 title="class or interface in java.util.concurrent">TimeoutException
 Throws:
 http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/TimeoutException.html?is-external=true";
 title="class or interface in 
java.util.concurrent">TimeoutException

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
index a59ec3b..36ae3da 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
@@ -109,7 +109,7 @@
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-protected static class HBaseAdmin.ProcedureFuture
+protected static class HBaseAdmin.ProcedureFuture
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">Future
 Future that waits on a procedure result.
@@ -314,7 +314,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 exception
-private http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutionException.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutionException exception
+private http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutionException.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutionException exception
 
 
 
@@ -323,7 +323,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 procResultFound
-private boolean procResultFound
+private boolean procResultFound
 
 
 
@@ -332,7 +332,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 done
-private boolean done
+private boolean done
 
 
 
@@ -341,7 +341,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 cancelled
-private boolean cancelled
+private boolean cancelled
 
 
 
@@ -350,7 +350,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 result
-private V result
+private V result
 
 
 
@@ -359,7 +359,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 admin
-private final HBaseAdmin admin
+private final HBaseAdmin admin
 
 
 
@@ -368,7 +368,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 procId
-private final http://docs.oracle.com/javase/7/docs/api/java/lang/Long.html?is-external=true";
 title="class or interface in java.lang">Long procId
+private final http://docs.oracle.com/javase/7/docs/a

[12/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
index abb3921..e0fbb1e 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
@@ -127,12 +127,12 @@
 ScannerCallable.controllerFactory 
 
 
-protected RpcControllerFactory
-ClientScanner.rpcControllerFactory 
+private RpcControllerFactory
+RpcRetryingCallerWithReadReplicas.rpcControllerFactory 
 
 
 private RpcControllerFactory
-HBaseAdmin.rpcControllerFactory 
+ConnectionImplementation.rpcControllerFactory 
 
 
 protected RpcControllerFactory
@@ -140,15 +140,15 @@
 
 
 private RpcControllerFactory
-RpcRetryingCallerWithReadReplicas.rpcControllerFactory 
+HTable.rpcControllerFactory 
 
 
-private RpcControllerFactory
-HTable.rpcControllerFactory 
+protected RpcControllerFactory
+ClientScanner.rpcControllerFactory 
 
 
 private RpcControllerFactory
-ConnectionImplementation.rpcControllerFactory 
+HBaseAdmin.rpcControllerFactory 
 
 
 protected RpcControllerFactory
@@ -164,16 +164,16 @@
 
 
 
-private RpcControllerFactory
-HBaseAdmin.getRpcControllerFactory() 
+RpcControllerFactory
+ConnectionImplementation.getRpcControllerFactory() 
 
 
 RpcControllerFactory
 ClusterConnection.getRpcControllerFactory() 
 
 
-RpcControllerFactory
-ConnectionImplementation.getRpcControllerFactory() 
+private RpcControllerFactory
+HBaseAdmin.getRpcControllerFactory() 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.Context.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.Context.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.Context.html
index 85ea186..bb78445 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.Context.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.Context.html
@@ -110,18 +110,18 @@
 
 
 
-abstract void
-RpcScheduler.init(RpcScheduler.Context context)
-Does some quick initialization.
-
+void
+SimpleRpcScheduler.init(RpcScheduler.Context context) 
 
 
 void
 FifoRpcScheduler.init(RpcScheduler.Context context) 
 
 
-void
-SimpleRpcScheduler.init(RpcScheduler.Context context) 
+abstract void
+RpcScheduler.init(RpcScheduler.Context context)
+Does some quick initialization.
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.html
index ed48dc4..50a0775 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.html
@@ -141,11 +141,11 @@
 
 
 RpcScheduler
-RpcServerInterface.getScheduler() 
+RpcServer.getScheduler() 
 
 
 RpcScheduler
-RpcServer.getScheduler() 
+RpcServerInterface.getScheduler() 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.html
index 8fa8eb1..e04e03a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.html
@@ -99,16 +99,16 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImportTsv.TsvParser
-TsvImporterTextMapper.parser 
-
-
-private ImportTsv.TsvParser
 TextSortReducer.parser 
 
-
+
 protected ImportTsv.TsvParser
 TsvImporterMapper.parser 
 
+
+private ImportTsv.TsvParser
+TsvImporterTextMapper.parser 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/TableSnapshotInputFormatImpl.InputSplit.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/TableSnapshotInputFormatImpl.InputSplit.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/TableSnapshotInputFormatImpl.InputSplit.html
index f199933..f53c343 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/TableSnapshotInputFormat

[42/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/coc.html
--
diff --git a/coc.html b/coc.html
index a08e293..2b26d01 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -331,7 +331,7 @@ For flagrant violations requiring a firm response the PMC 
may opt to skip early
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-04-07
+  Last Published: 
2016-04-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 0690022..e4636b2 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -673,7 +673,7 @@ Now your HBase server is running, start 
coding and build that next
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-04-07
+  Last Published: 
2016-04-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 8367d5c..d3406dd 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependencies
 
@@ -518,7 +518,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-04-07
+  Last Published: 
2016-04-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 5f43fc8..bfc8a34 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Reactor Dependency Convergence
 
@@ -1702,7 +1702,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-04-07
+  Last Published: 
2016-04-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 998f331..cfe7d50 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Dependency Information
 
@@ -312,7 +312,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-04-07
+  Last Published: 
2016-04-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 8794a6f..23b7454 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependency Management
 
@@ -798,7 +798,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-04-07
+  Last Published: 
2016-04-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/allclasses-frame.html
--
diff --git a/devapidocs/allclasses-frame.html b/devapidocs/allclasses-frame.html
index cbe863b..d58a70b 100644
--- a/devapidocs/allclasses-frame.html
+++ b/devapidocs/allclasses-frame.html
@@ -481,8 +481,10 @@
 DataType
 DateTieredCompactionPolicy
 DateTieredCompactionPolicy.Window
+DateTieredCompactionRequest
 DateTieredCompactor
 DateTieredMultiFileWriter
+DateTieredStoreEngine
 DeadServer
 DecoderFactory
 Decryptor
@@ -614,6 +616,16 @@
 FanOutOneBlockAsyncDFSOutputHelper.NameNodeException
 FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatu

[22/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterKeepAliveConnection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterKeepAliveConnection.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterKeepAliveConnection.html
index 7ed9d6b..b63b90f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterKeepAliveConnection.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterKeepAliveConnection.html
@@ -111,13 +111,17 @@
 
 
 MasterKeepAliveConnection
+ConnectionImplementation.getKeepAliveMasterService() 
+
+
+MasterKeepAliveConnection
 ClusterConnection.getKeepAliveMasterService()
 Deprecated. 
 Since 0.96.0
 
 
 
-
+
 MasterKeepAliveConnection
 HConnection.getKeepAliveMasterService()
 Deprecated. 
@@ -125,10 +129,6 @@
 
 
 
-
-MasterKeepAliveConnection
-ConnectionImplementation.getKeepAliveMasterService() 
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/class-use/MetricsConnection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/MetricsConnection.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/MetricsConnection.html
index c4b73a5..b5ad67e 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/MetricsConnection.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/MetricsConnection.html
@@ -104,11 +104,11 @@
 
 
 private MetricsConnection
-MetaCache.metrics 
+ConnectionImplementation.metrics 
 
 
 private MetricsConnection
-ConnectionImplementation.metrics 
+MetaCache.metrics 
 
 
 
@@ -121,11 +121,11 @@
 
 
 MetricsConnection
-ClusterConnection.getConnectionMetrics() 
+ConnectionImplementation.getConnectionMetrics() 
 
 
 MetricsConnection
-ConnectionImplementation.getConnectionMetrics() 
+ClusterConnection.getConnectionMetrics() 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/class-use/MultiAction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/MultiAction.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/MultiAction.html
index f0a8a9f..b2cfac0 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/MultiAction.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/MultiAction.html
@@ -101,13 +101,13 @@
 DelayingRunner.actions 
 
 
-private MultiAction
-MultiServerCallable.multiAction 
-
-
 private MultiAction
 AsyncProcess.AsyncRequestFutureImpl.SingleServerRequestRunnable.multiAction 
 
+
+private MultiAction
+MultiServerCallable.multiAction 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
index 40dd766..5736a02 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
@@ -586,21 +586,21 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-NoopOperationQuota.addMutation(Mutation mutation) 
+DefaultOperationQuota.addMutation(Mutation mutation) 
 
 
 void
-OperationQuota.addMutation(Mutation mutation)
-Add a mutation result.
-
+NoopOperationQuota.addMutation(Mutation mutation) 
 
 
 void
-OperationQuota.AvgOperationSize.addMutation(Mutation mutation) 
+OperationQuota.addMutation(Mutation mutation)
+Add a mutation result.
+
 
 
 void
-DefaultOperationQuota.addMutation(Mutation mutation) 
+OperationQuota.AvgOperationSize.addMutation(Mutation mutation) 
 
 
 static long

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/class-use/NonceGenerator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/NonceGenerator.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/NonceGenerator.html
index dd53617..a0dc6af 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/NonceGenerator.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/NonceGenerator.html
@@ -143,32 +143,32 @@
 
 
 NonceGenerator
-ClusterConnection.getNonceGenerator() 
+ConnectionImplementation.getNonceGenerator() 
 
 
 NonceGenerator
+ClusterConnection.getNonceGenerator() 
+
+
+NonceGenerator
 HConnection.getNonceGenerator()
 Deprecated. 
 internal method, do not use thru HConnection
 
 
 
-
-NonceGenerator
-ConnectionImplementation.getNonceGenerator() 
-
 
 Nonc

[04/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
index 60e16d5..db2a536 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStore.html
@@ -162,32 +162,26 @@ extends 
 
 
-void
-rollback(Cell cell)
-Remove n key from the memstore.
-
-
-
 long
 size() 
 
-
+
 MemStoreSnapshot
 snapshot()
 Creates a snapshot of the current memstore.
 
 
-
+
 MemStoreSnapshot
 snapshot(long flushOpSeqId)
 Creates a snapshot of the current memstore.
 
 
-
+
 long
 timeOfOldestEdit() 
 
-
+
 long
 updateColumnValue(byte[] row,
   byte[] family,
@@ -198,7 +192,7 @@ extends 
 
 
-
+
 long
 upsert(http://docs.oracle.com/javase/7/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
 long readpoint)
@@ -314,25 +308,13 @@ extends Returns:Oldest timestamp of all 
the Cells in the MemStore
 
 
-
-
-
-
-
-rollback
-void rollback(Cell cell)
-Remove n key from the memstore. Only kvs that have the same 
key and the same memstoreTS are
- removed. It is ok to not update timeRangeTracker in this call.
-Parameters:cell - 

-
-
 
 
 
 
 
 delete
-long delete(Cell deleteCell)
+long delete(Cell deleteCell)
 Write a delete
 Parameters:deleteCell - 
 Returns:approximate size of the 
passed key and value.
@@ -344,7 +326,7 @@ extends 
 
 updateColumnValue
-long updateColumnValue(byte[] row,
+long updateColumnValue(byte[] row,
  byte[] family,
  byte[] qualifier,
  long newValue,
@@ -365,7 +347,7 @@ extends 
 
 upsert
-long upsert(http://docs.oracle.com/javase/7/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
+long upsert(http://docs.oracle.com/javase/7/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable cells,
   long readpoint)
 Update or insert the specified cells.
  
@@ -387,7 +369,7 @@ extends 
 
 getScanners
-http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getScanners(long readPt)
+http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getScanners(long readPt)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Returns:scanner over the 
memstore. This might include scanner over the snapshot when one is
  present.
@@ -401,7 +383,7 @@ extends 
 
 size
-long size()
+long size()
 Returns:Total memory occupied by 
this MemStore.
 
 
@@ -411,7 +393,7 @@ extends 
 
 finalizeFlush
-void finalizeFlush()
+void finalizeFlush()
 This method is called when it is clear that the flush to 
disk is completed.
  The store may do any post-flush actions at this point.
  One example is to update the wal with sequence number that is known only at 
the store level.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.Result.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.Result.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.Result.html
index cbd926b..d13fa6b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.Result.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.FlushResult.Result.html
@@ -246,7 +246,7 @@ the order they are declared.
 
 
 values
-public static Region.FlushResult.Result[] values()
+public static Region.FlushResult.Result[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -263,7 +263,7 @@ for (Region.FlushResult.Result c : 
Region.FlushResult.Result.values())
 
 
 valueOf
-public static Region.FlushResult.Result valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static Region.FlushResult.Result valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespac

[35/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/HDFSBlocksDistribution.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/HDFSBlocksDistribution.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HDFSBlocksDistribution.html
index d037968..3df7b03 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HDFSBlocksDistribution.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HDFSBlocksDistribution.html
@@ -262,11 +262,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 HDFSBlocksDistribution
-StoreFileInfo.getHDFSBlockDistribution() 
+StoreFile.getHDFSBlockDistribution() 
 
 
 HDFSBlocksDistribution
-StoreFile.getHDFSBlockDistribution() 
+StoreFileInfo.getHDFSBlockDistribution() 
 
 
 HDFSBlocksDistribution



[20/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index 02f0332..56ae34c 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -396,13 +396,13 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.client.IsolationLevel
-org.apache.hadoop.hbase.client.Consistency
-org.apache.hadoop.hbase.client.AsyncProcess.Retry
-org.apache.hadoop.hbase.client.TableState.State
 org.apache.hadoop.hbase.client.Durability
-org.apache.hadoop.hbase.client.Admin.MasterSwitchType
+org.apache.hadoop.hbase.client.TableState.State
 org.apache.hadoop.hbase.client.Admin.CompactType
+org.apache.hadoop.hbase.client.Admin.MasterSwitchType
+org.apache.hadoop.hbase.client.AsyncProcess.Retry
+org.apache.hadoop.hbase.client.Consistency
+org.apache.hadoop.hbase.client.IsolationLevel
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-use.html
index dd054a5..167d831 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-use.html
@@ -1144,47 +1144,15 @@ service.
 
 
 
-BufferedMutator
-Used to communicate with a single HBase table similar to Table but 
meant for
- batched, asynchronous puts.
-
-
-
-Connection
-A cluster connection encapsulating lower level individual 
connections to actual servers and
- a connection to zookeeper.
-
-
-
 Put
 Used to perform Put operations for a single row.
 
 
 
-RegionLocator
-Used to view region location information for a single HBase 
table.
-
-
-
 Result
 Single row result of a Get or Scan query.
 
 
-
-ResultScanner
-Interface for client-side scanning.
-
-
-
-Scan
-Used to perform Scan operations.
-
-
-
-Table
-Used to communicate with a single HBase table.
-
-
 
 
 
@@ -1254,11 +1222,16 @@ service.
 
 
 
+ResultScanner
+Interface for client-side scanning.
+
+
+
 Scan
 Used to perform Scan operations.
 
 
-
+
 Table
 Used to communicate with a single HBase table.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/security/class-use/SecurityCapability.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/security/class-use/SecurityCapability.html
 
b/devapidocs/org/apache/hadoop/hbase/client/security/class-use/SecurityCapability.html
index 0af7b8c..2e5d672 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/security/class-use/SecurityCapability.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/security/class-use/SecurityCapability.html
@@ -102,14 +102,14 @@
 
 
 http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-HBaseAdmin.getSecurityCapabilities() 
-
-
-http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 Admin.getSecurityCapabilities()
 Return the set of supported security capabilities.
 
 
+
+http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+HBaseAdmin.getSecurityCapabilities() 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/codec/class-use/Codec.Decoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/codec/class-use/Codec.Decoder.html 
b/devapidocs/org/apache/hadoop/hbase/codec/class-use/Codec.Decoder.html
index 49b8afe..3ddf9ee 100644
--- a/devapidocs/org/apache/hadoop/hbase/codec/class-use/Codec.Decoder.html
+++ b/devapidocs/org/apache/hadoop/hbase/codec/class-use/Codec.Decoder.html
@@ -143,11 +143,11 @@
 
 
 Codec.Decoder
-CellCodec.getDecoder(http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in 
java.nio">ByteBuffer buf) 
+KeyValueCodec.getDecoder(http://doc

[02/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegionServer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegionServer.html
index 4fdb11d..cb9549b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegionServer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegionServer.html
@@ -277,31 +277,31 @@
 
 
 private HRegionServer
-StorefileRefresherChore.regionServer 
+RSRpcServices.regionServer 
 
 
 private HRegionServer
-HRegionServer.MovedRegionsCleaner.regionServer 
+StorefileRefresherChore.regionServer 
 
 
 private HRegionServer
-RSRpcServices.regionServer 
+MetricsRegionServerWrapperImpl.regionServer 
 
 
 private HRegionServer
-MetricsRegionServerWrapperImpl.regionServer 
+HRegionServer.MovedRegionsCleaner.regionServer 
 
 
 private HRegionServer
-RegionMergeRequest.server 
+MemStoreFlusher.server 
 
 
 private HRegionServer
-MemStoreFlusher.server 
+CompactSplitThread.server 
 
 
 private HRegionServer
-CompactSplitThread.server 
+SplitRequest.server 
 
 
 (package private) HRegionServer
@@ -309,7 +309,7 @@
 
 
 private HRegionServer
-SplitRequest.server 
+RegionMergeRequest.server 
 
 
 
@@ -501,11 +501,11 @@
 
 
 private HRegionServer
-RSStatusTmpl.ImplData.m_regionServer 
+RegionListTmpl.ImplData.m_regionServer 
 
 
 private HRegionServer
-RegionListTmpl.ImplData.m_regionServer 
+RSStatusTmpl.ImplData.m_regionServer 
 
 
 private HRegionServer
@@ -526,11 +526,11 @@
 
 
 HRegionServer
-RSStatusTmpl.ImplData.getRegionServer() 
+RegionListTmpl.ImplData.getRegionServer() 
 
 
 HRegionServer
-RegionListTmpl.ImplData.getRegionServer() 
+RSStatusTmpl.ImplData.getRegionServer() 
 
 
 
@@ -574,11 +574,11 @@
 
 
 void
-RSStatusTmpl.ImplData.setRegionServer(HRegionServer regionServer) 
+RegionListTmpl.ImplData.setRegionServer(HRegionServer regionServer) 
 
 
 void
-RegionListTmpl.ImplData.setRegionServer(HRegionServer regionServer) 
+RSStatusTmpl.ImplData.setRegionServer(HRegionServer regionServer) 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.TunerContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.TunerContext.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.TunerContext.html
index 3a79826..7e311f2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.TunerContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.TunerContext.html
@@ -121,17 +121,17 @@
 
 
 HeapMemoryManager.TunerResult
-DefaultHeapMemoryTuner.tune(HeapMemoryManager.TunerContext context) 
+HeapMemoryTuner.tune(HeapMemoryManager.TunerContext context)
+Perform the heap memory tuning operation.
+
 
 
 HeapMemoryManager.TunerResult
-NoOpHeapMemoryTuner.tune(HeapMemoryManager.TunerContext context) 
+DefaultHeapMemoryTuner.tune(HeapMemoryManager.TunerContext context) 
 
 
 HeapMemoryManager.TunerResult
-HeapMemoryTuner.tune(HeapMemoryManager.TunerContext context)
-Perform the heap memory tuning operation.
-
+NoOpHeapMemoryTuner.tune(HeapMemoryManager.TunerContext context) 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.TunerResult.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.TunerResult.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.TunerResult.html
index 4519a30..5b662df 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.TunerResult.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.TunerResult.html
@@ -117,17 +117,17 @@
 
 
 HeapMemoryManager.TunerResult
-DefaultHeapMemoryTuner.tune(HeapMemoryManager.TunerContext context) 
+HeapMemoryTuner.tune(HeapMemoryManager.TunerContext context)
+Perform the heap memory tuning operation.
+
 
 
 HeapMemoryManager.TunerResult
-NoOpHeapMemoryTuner.tune(HeapMemoryManager.TunerContext context) 
+DefaultHeapMemoryTuner.tune(HeapMemoryManager.TunerContext context) 
 
 
 HeapMemoryManager.TunerResult
-HeapMemoryTuner.tune(HeapMemoryManager.TunerContext context)
-Perform the heap memory tuning operation.
-
+NoOpHeapMemoryTuner.tune(HeapMemoryManager.TunerContext context) 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
---

[51/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/db94a639
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/db94a639
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/db94a639

Branch: refs/heads/asf-site
Commit: db94a6390d157e542d0f36650c16249f6ca21d6a
Parents: b0a0486
Author: jenkins 
Authored: Fri Apr 8 15:06:21 2016 +
Committer: Misty Stanley-Jones 
Committed: Fri Apr 8 09:20:07 2016 -0700

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |   230 +-
 .../hadoop/hbase/class-use/TableName.html   |36 +-
 .../apache/hadoop/hbase/client/Durability.html  | 4 +-
 .../hadoop/hbase/client/IsolationLevel.html | 4 +-
 .../hadoop/hbase/client/class-use/Admin.html| 6 +-
 .../hbase/client/class-use/Durability.html  |16 +-
 .../hadoop/hbase/client/class-use/Mutation.html | 8 +-
 .../hbase/client/class-use/RegionLocator.html   | 6 +-
 .../hadoop/hbase/client/class-use/Result.html   |50 +-
 .../hadoop/hbase/client/class-use/Row.html  | 6 +-
 .../hadoop/hbase/client/class-use/Scan.html | 4 +-
 .../hadoop/hbase/client/class-use/Table.html| 4 +-
 .../hadoop/hbase/client/package-tree.html   | 4 +-
 .../hbase/filter/CompareFilter.CompareOp.html   | 4 +-
 .../filter/class-use/Filter.ReturnCode.html |66 +-
 .../hadoop/hbase/filter/class-use/Filter.html   |40 +-
 .../hadoop/hbase/filter/package-tree.html   | 4 +-
 .../io/class-use/ImmutableBytesWritable.html|70 +-
 .../hbase/io/crypto/class-use/Cipher.html   | 8 +-
 .../hbase/io/encoding/DataBlockEncoding.html| 4 +-
 .../mapreduce/class-use/TableRecordReader.html  | 4 +-
 .../apache/hadoop/hbase/quotas/QuotaType.html   | 4 +-
 .../hbase/quotas/ThrottlingException.Type.html  | 4 +-
 .../hadoop/hbase/quotas/package-tree.html   | 4 +-
 apidocs/org/apache/hadoop/hbase/util/Order.html | 4 +-
 .../hadoop/hbase/util/class-use/ByteRange.html  |40 +-
 .../hadoop/hbase/util/class-use/Order.html  |42 +-
 .../util/class-use/PositionedByteRange.html |   270 +-
 apidocs/overview-tree.html  |18 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 20652 -
 checkstyle.rss  |   664 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/allclasses-frame.html|14 +
 devapidocs/allclasses-noframe.html  |14 +
 devapidocs/constant-values.html |79 +-
 devapidocs/deprecated-list.html |   290 +-
 devapidocs/index-all.html   |   430 +-
 .../HealthChecker.HealthCheckerExitStatus.html  | 4 +-
 .../org/apache/hadoop/hbase/KeyValue.Type.html  | 4 +-
 .../hbase/MetaTableAccessor.QueryType.html  | 4 +-
 .../hadoop/hbase/class-use/Abortable.html   |20 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |   806 +-
 .../hadoop/hbase/class-use/CellComparator.html  |   128 +-
 .../hadoop/hbase/class-use/CellScanner.html |32 +-
 .../hadoop/hbase/class-use/ClusterStatus.html   |24 +-
 .../hbase/class-use/CoprocessorEnvironment.html |60 +-
 .../hbase/class-use/HBaseIOException.html   |12 +-
 .../hbase/class-use/HColumnDescriptor.html  |   280 +-
 .../hbase/class-use/HDFSBlocksDistribution.html | 4 +-
 .../hadoop/hbase/class-use/HRegionInfo.html |   408 +-
 .../hadoop/hbase/class-use/HRegionLocation.html |   158 +-
 .../hbase/class-use/HTableDescriptor.html   |   440 +-
 .../apache/hadoop/hbase/class-use/KeyValue.html |22 +-
 .../class-use/MasterNotRunningException.html|20 +-
 .../hbase/class-use/NamespaceDescriptor.html|   124 +-
 .../hadoop/hbase/class-use/ProcedureInfo.html   |20 +-
 .../hadoop/hbase/class-use/RegionLocations.html |40 +-
 .../apache/hadoop/hbase/class-use/Server.html   |   122 +-
 .../hadoop/hbase/class-use/ServerName.html  |   304 +-
 .../hadoop/hbase/class-use/SplitLogTask.html|14 +-
 .../hbase/class-use/TableExistsException.html   |26 +-
 .../hadoop/hbase/class-use/TableName.html   |  1572 +-
 .../hbase/class-use/TableNot

[16/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
index 1a8f56a..5e1dc46 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
@@ -124,11 +124,11 @@
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterColumn(Cell cell) 
+MultipleColumnPrefixFilter.filterColumn(Cell cell) 
 
 
 Filter.ReturnCode
-MultipleColumnPrefixFilter.filterColumn(Cell cell) 
+ColumnPrefixFilter.filterColumn(Cell cell) 
 
 
 Filter.ReturnCode
@@ -136,107 +136,107 @@
 
 
 Filter.ReturnCode
-FilterWrapper.filterKeyValue(Cell v) 
+PrefixFilter.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-QualifierFilter.filterKeyValue(Cell v) 
+FamilyFilter.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-ColumnRangeFilter.filterKeyValue(Cell kv) 
+WhileMatchFilter.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-FirstKeyValueMatchingQualifiersFilter.filterKeyValue(Cell v)
-Deprecated. 
- 
+InclusiveStopFilter.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-TimestampsFilter.filterKeyValue(Cell v) 
+FirstKeyOnlyFilter.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-WhileMatchFilter.filterKeyValue(Cell v) 
+TimestampsFilter.filterKeyValue(Cell v) 
 
 
-Filter.ReturnCode
-ColumnCountGetFilter.filterKeyValue(Cell v) 
+abstract Filter.ReturnCode
+Filter.filterKeyValue(Cell v)
+A way to filter based on the column family, column 
qualifier and/or the column value.
+
 
 
 Filter.ReturnCode
-SingleColumnValueFilter.filterKeyValue(Cell c) 
+KeyOnlyFilter.filterKeyValue(Cell ignored) 
 
 
 Filter.ReturnCode
-FamilyFilter.filterKeyValue(Cell v) 
+MultipleColumnPrefixFilter.filterKeyValue(Cell kv) 
 
 
 Filter.ReturnCode
-FirstKeyOnlyFilter.filterKeyValue(Cell v) 
+QualifierFilter.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-RowFilter.filterKeyValue(Cell v) 
+SkipFilter.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterKeyValue(Cell cell) 
+ColumnCountGetFilter.filterKeyValue(Cell v) 
 
 
-abstract Filter.ReturnCode
-Filter.filterKeyValue(Cell v)
-A way to filter based on the column family, column 
qualifier and/or the column value.
-
+Filter.ReturnCode
+RandomRowFilter.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-ValueFilter.filterKeyValue(Cell v) 
+FuzzyRowFilter.filterKeyValue(Cell c) 
 
 
 Filter.ReturnCode
-PageFilter.filterKeyValue(Cell ignored) 
+SingleColumnValueFilter.filterKeyValue(Cell c) 
 
 
 Filter.ReturnCode
-SkipFilter.filterKeyValue(Cell v) 
+FilterList.filterKeyValue(Cell c) 
 
 
 Filter.ReturnCode
-FuzzyRowFilter.filterKeyValue(Cell c) 
+ColumnRangeFilter.filterKeyValue(Cell kv) 
 
 
 Filter.ReturnCode
-FilterList.filterKeyValue(Cell c) 
+MultiRowRangeFilter.filterKeyValue(Cell ignored) 
 
 
 Filter.ReturnCode
-MultiRowRangeFilter.filterKeyValue(Cell ignored) 
+FirstKeyValueMatchingQualifiersFilter.filterKeyValue(Cell v)
+Deprecated. 
+ 
 
 
 Filter.ReturnCode
-RandomRowFilter.filterKeyValue(Cell v) 
+ColumnPrefixFilter.filterKeyValue(Cell cell) 
 
 
 Filter.ReturnCode
-InclusiveStopFilter.filterKeyValue(Cell v) 
+PageFilter.filterKeyValue(Cell ignored) 
 
 
 Filter.ReturnCode
-KeyOnlyFilter.filterKeyValue(Cell ignored) 
+RowFilter.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-MultipleColumnPrefixFilter.filterKeyValue(Cell kv) 
+ValueFilter.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-ColumnPaginationFilter.filterKeyValue(Cell v) 
+FilterWrapper.filterKeyValue(Cell v) 
 
 
 Filter.ReturnCode
-PrefixFilter.filterKeyValue(Cell v) 
+ColumnPaginationFilter.filterKeyValue(Cell v) 
 
 
 static Filter.ReturnCode

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
index a0cbdd5..435b828 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
@@ -413,17 +413,17 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-(package private) Filter
-FilterWrapper.filter 
-
-
 private Filter
 WhileMatchFilter.filter 
 
-
+
 private Filter
 SkipFilter.filter 
 
+
+(package private) Filter
+FilterWrapper.filter 
+
 
 private Filter
 FilterList.seekHintFilter 
@@ -456,77 +456,77 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-QualifierFilter.createFilterFromArguments(http://docs.oracle.com/javase/7/docs/api/java/util/ArrayList.html?is-external=true";
 

[27/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index 62a09d1..8422fa3 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -313,11 +313,11 @@ service.
 
 
 private TableName
-HRegionInfo.tableName 
+MetaTableAccessor.TableVisitorBase.tableName 
 
 
 private TableName
-MetaTableAccessor.TableVisitorBase.tableName 
+HRegionInfo.tableName 
 
 
 
@@ -759,19 +759,19 @@ service.
 
 
 private TableName
-ClientScanner.tableName 
+HRegionLocator.tableName 
 
 
 protected TableName
-RegionServerCallable.tableName 
+RpcRetryingCallerWithReadReplicas.tableName 
 
 
 private TableName
-HBaseAdmin.TableFuture.tableName 
+BufferedMutatorImpl.tableName 
 
 
 private TableName
-HRegionLocator.tableName 
+TableState.tableName 
 
 
 private TableName
@@ -783,27 +783,27 @@ service.
 
 
 private TableName
-BufferedMutatorImpl.tableName 
+AsyncProcess.AsyncRequestFutureImpl.tableName 
 
 
-private TableName
-TableState.tableName 
+protected TableName
+RegionServerCallable.tableName 
 
 
 private TableName
-ScannerCallableWithReplicas.tableName 
+HTable.tableName 
 
 
-protected TableName
-RpcRetryingCallerWithReadReplicas.tableName 
+private TableName
+ClientScanner.tableName 
 
 
 private TableName
-AsyncProcess.AsyncRequestFutureImpl.tableName 
+ScannerCallableWithReplicas.tableName 
 
 
 private TableName
-HTable.tableName 
+HBaseAdmin.TableFuture.tableName 
 
 
 
@@ -837,18 +837,6 @@ service.
 
 
 TableName
-RegionLocator.getName()
-Gets the fully qualified table name instance of this 
table.
-
-
-
-TableName
-Table.getName()
-Gets the fully qualified table name instance of this 
table.
-
-
-
-TableName
 HRegionLocator.getName() 
 
 
@@ -863,8 +851,20 @@ service.
 
 
 TableName
+Table.getName()
+Gets the fully qualified table name instance of this 
table.
+
+
+
+TableName
 HTable.getName() 
 
+
+TableName
+RegionLocator.getName()
+Gets the fully qualified table name instance of this 
table.
+
+
 
 TableName
 HTableWrapper.getName() 
@@ -875,21 +875,21 @@ service.
 
 
 TableName
-RegionServerCallable.getTableName() 
+TableState.getTableName()
+Table name for state
+
 
 
-protected TableName
-HBaseAdmin.TableFuture.getTableName() 
+TableName
+BufferedMutatorParams.getTableName() 
 
 
 TableName
-BufferedMutatorParams.getTableName() 
+RegionServerCallable.getTableName() 
 
 
-TableName
-TableState.getTableName()
-Table name for state
-
+protected TableName
+HBaseAdmin.TableFuture.getTableName() 
 
 
 private TableName
@@ -897,84 +897,84 @@ service.
 
 
 TableName[]
-HBaseAdmin.listTableNames() 
-
-
-TableName[]
-HConnection.listTableNames()
+ConnectionImplementation.listTableNames()
 Deprecated. 
-Use Admin.listTables()
 instead.
+Use Admin.listTableNames()
 instead
 
 
 
-
+
 TableName[]
 Admin.listTableNames()
 List all of the names of userspace tables.
 
 
+
+TableName[]
+HBaseAdmin.listTableNames() 
+
 
 TableName[]
-ConnectionImplementation.listTableNames()
+HConnection.listTableNames()
 Deprecated. 
-Use Admin.listTableNames()
 instead
+Use Admin.listTables()
 instead.
 
 
 
 
 TableName[]
-HBaseAdmin.listTableNames(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in 
java.util.regex">Pattern pattern) 
-
-
-TableName[]
 Admin.listTableNames(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern)
 List all of the names of userspace tables.
 
 
-
+
 TableName[]
-HBaseAdmin.listTableNames(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern,
-boolean includeSysTables) 
+HBaseAdmin.listTableNames(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in 
java.util.regex">Pattern pattern) 
 
-
+
 TableName[]
 Admin.listTableNames(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern,
 boolean includeSysTables)
 List all of the names of userspace tables.
 
 
-
+
 TableName[]
-HBaseAdmin.listTableNames(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String regex) 
+HBaseAdmin.listTableNames(http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html?is-external=true";
 title="class or interface in java.util.regex">Pattern pattern,
+boolean includeSysTables) 
 
-
+
 TableName[]
 Adm

[49/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/apidocs/org/apache/hadoop/hbase/client/Durability.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Durability.html 
b/apidocs/org/apache/hadoop/hbase/client/Durability.html
index c083248..5c3c3ea 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Durability.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Durability.html
@@ -280,7 +280,7 @@ the order they are declared.
 
 
 values
-public static Durability[] values()
+public static Durability[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -297,7 +297,7 @@ for (Durability c : Durability.values())
 
 
 valueOf
-public static Durability valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static Durability valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/apidocs/org/apache/hadoop/hbase/client/IsolationLevel.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/IsolationLevel.html 
b/apidocs/org/apache/hadoop/hbase/client/IsolationLevel.html
index ac83789..927480e 100644
--- a/apidocs/org/apache/hadoop/hbase/client/IsolationLevel.html
+++ b/apidocs/org/apache/hadoop/hbase/client/IsolationLevel.html
@@ -243,7 +243,7 @@ the order they are declared.
 
 
 values
-public static IsolationLevel[] values()
+public static IsolationLevel[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -260,7 +260,7 @@ for (IsolationLevel c : IsolationLevel.values())
 
 
 valueOf
-public static IsolationLevel valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static IsolationLevel valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/apidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/class-use/Admin.html 
b/apidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
index 63a3845..72528d4 100644
--- a/apidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
@@ -105,14 +105,14 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 Admin
-Connection.getAdmin()
+HConnection.getAdmin()
+Deprecated. 
 Retrieve an Admin implementation to administer an HBase 
cluster.
 
 
 
 Admin
-HConnection.getAdmin()
-Deprecated. 
+Connection.getAdmin()
 Retrieve an Admin implementation to administer an HBase 
cluster.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/apidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/class-use/Durability.html 
b/apidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
index 7e2cc89..a225a9c 100644
--- a/apidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
+++ b/apidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
@@ -191,6 +191,14 @@ the order they are declared.
 
 
 
+Put
+Put.setDurability(Durability d) 
+
+
+Append
+Append.setDurability(Durability d) 
+
+
 Increment
 Increment.setDurability(Durability d) 
 
@@ -204,14 +212,6 @@ the order they are declared.
 Delete
 Delete.setDurability(Durability d) 
 
-
-Append
-Append.setDurability(Durability d) 
-
-
-Put
-Put.setDurability(Durability d) 
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/apidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html 
b/apidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
index c0ca016..f176d84 100644
--- a/apidocs/org/apache/hadoop/h

[43/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 95e3493..8280441 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 ©2007 - 2016 The Apache Software Foundation
 
-  File: 1724,
- Errors: 12504,
+  File: 1728,
+ Errors: 12478,
  Warnings: 0,
  Infos: 0
   
@@ -130,7 +130,7 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.LogRoller.java";>org/apache/hadoop/hbase/regionserver/LogRoller.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.RowTooBigException.java";>org/apache/hadoop/hbase/regionserver/RowTooBigException.java
 
 
   0
@@ -139,12 +139,12 @@ under the License.
   0
 
 
-  5
+  0
 
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.RowTooBigException.java";>org/apache/hadoop/hbase/regionserver/RowTooBigException.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.LogRoller.java";>org/apache/hadoop/hbase/regionserver/LogRoller.java
 
 
   0
@@ -153,7 +153,7 @@ under the License.
   0
 
 
-  0
+  5
 
   
   
@@ -326,7 +326,7 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.MultiAction.java";>org/apache/hadoop/hbase/client/MultiAction.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.ResultStatsUtil.java";>org/apache/hadoop/hbase/client/ResultStatsUtil.java
 
 
   0
@@ -335,12 +335,12 @@ under the License.
   0
 
 
-  3
+  0
 
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.ResultStatsUtil.java";>org/apache/hadoop/hbase/client/ResultStatsUtil.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.MultiAction.java";>org/apache/hadoop/hbase/client/MultiAction.java
 
 
   0
@@ -349,7 +349,7 @@ under the License.
   0
 
 
-  0
+  3
 
   
   
@@ -466,7 +466,7 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.ByteArrayOutputStream.java";>org/apache/hadoop/hbase/io/ByteArrayOutputStream.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.security.visibility.VisibilityConstants.java";>org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java
 
 
   0
@@ -475,12 +475,12 @@ under the License.
   0
 
 
-  0
+  1
 
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.security.visibility.VisibilityConstants.java";>org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.ByteArrayOutputStream.java";>org/apache/hadoop/hbase/io/ByteArrayOutputStream.java
 
 
   0
@@ -489,7 +489,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -648,7 +648,7 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.zookeeper.ZKConfig.java";>org/apache/hadoop/hbase/zookeeper/ZKConfig.java
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp.java";>org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
 
 

[33/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
index 4548c8e..524c162 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
@@ -266,11 +266,11 @@ service.
 
 
 protected HRegionLocation
-RegionServerCallable.location 
+RegionAdminServiceCallable.location 
 
 
 protected HRegionLocation
-RegionAdminServiceCallable.location 
+RegionServerCallable.location 
 
 
 
@@ -310,32 +310,38 @@ service.
 
 
 HRegionLocation
-RegionLocator.getRegionLocation(byte[] row)
+HRegionLocator.getRegionLocation(byte[] row)
 Finds the region on which the given row is being 
served.
 
 
 
 HRegionLocation
-HRegionLocator.getRegionLocation(byte[] row)
+RegionLocator.getRegionLocation(byte[] row)
 Finds the region on which the given row is being 
served.
 
 
 
 HRegionLocation
-RegionLocator.getRegionLocation(byte[] row,
+HRegionLocator.getRegionLocation(byte[] row,
   boolean reload)
 Finds the region on which the given row is being 
served.
 
 
 
 HRegionLocation
-HRegionLocator.getRegionLocation(byte[] row,
+RegionLocator.getRegionLocation(byte[] row,
   boolean reload)
 Finds the region on which the given row is being 
served.
 
 
 
 HRegionLocation
+ConnectionImplementation.getRegionLocation(byte[] tableName,
+  byte[] row,
+  boolean reload) 
+
+
+HRegionLocation
 HConnection.getRegionLocation(byte[] tableName,
   byte[] row,
   boolean reload)
@@ -344,13 +350,13 @@ service.
 
 
 
-
+
 HRegionLocation
-ConnectionImplementation.getRegionLocation(byte[] tableName,
+ConnectionImplementation.getRegionLocation(TableName tableName,
   byte[] row,
   boolean reload) 
 
-
+
 HRegionLocation
 ClusterConnection.getRegionLocation(TableName tableName,
   byte[] row,
@@ -358,7 +364,7 @@ service.
 Find region location hosting passed row
 
 
-
+
 HRegionLocation
 HConnection.getRegionLocation(TableName tableName,
   byte[] row,
@@ -368,23 +374,21 @@ service.
 
 
 
-
-HRegionLocation
-ConnectionImplementation.getRegionLocation(TableName tableName,
-  byte[] row,
-  boolean reload) 
-
 
 private HRegionLocation
 AsyncProcess.AsyncRequestFutureImpl.getReplicaLocationOrFail(Action action) 
 
 
 HRegionLocation
+ConnectionImplementation.locateRegion(byte[] regionName) 
+
+
+HRegionLocation
 ClusterConnection.locateRegion(byte[] regionName)
 Gets the location of the region of regionName.
 
 
-
+
 HRegionLocation
 HConnection.locateRegion(byte[] regionName)
 Deprecated. 
@@ -392,11 +396,12 @@ service.
 
 
 
-
+
 HRegionLocation
-ConnectionImplementation.locateRegion(byte[] regionName) 
+ConnectionImplementation.locateRegion(byte[] tableName,
+byte[] row) 
 
-
+
 HRegionLocation
 HConnection.locateRegion(byte[] tableName,
 byte[] row)
@@ -405,12 +410,12 @@ service.
 
 
 
-
+
 HRegionLocation
-ConnectionImplementation.locateRegion(byte[] tableName,
+ConnectionImplementation.locateRegion(TableName tableName,
 byte[] row) 
 
-
+
 HRegionLocation
 ClusterConnection.locateRegion(TableName tableName,
 byte[] row)
@@ -418,7 +423,7 @@ service.
  lives in.
 
 
-
+
 HRegionLocation
 HConnection.locateRegion(TableName tableName,
 byte[] row)
@@ -427,12 +432,12 @@ service.
 
 
 
-
+
 HRegionLocation
-ConnectionImplementation.locateRegion(TableName tableName,
-byte[] row) 
+ConnectionImplementation.relocateRegion(byte[] tableName,
+byte[] row) 
 
-
+
 HRegionLocation
 HConnection.relocateRegion(byte[] tableName,
 byte[] row)
@@ -441,12 +446,12 @@ service.
 
 
 
-
+
 HRegionLocation
-ConnectionImplementation.relocateRegion(byte[] tableName,
+ConnectionImplementation.relocateRegion(TableName tableName,
 byte[] row) 
 
-
+
 HRegionLocation
 ClusterConnection.relocateRegion(TableName tableName,
 byte[] row)
@@ -454,7 +459,7 @@ service.
  lives in, ignoring any value that might be in the cache.
 
 
-
+
 HRegionLocation
 HConnection.relocateRegion(TableName tableName,
 byte[] row)
@@ -463,11 +468,6 @@ service.
 
 
 
-
-HRegionLocation
-ConnectionImplementation.relocateRegion(TableName tableName,
-

[23/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.MasterSwitchType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.MasterSwitchType.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.MasterSwitchType.html
index 32e3b52..4ed5de4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.MasterSwitchType.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.MasterSwitchType.html
@@ -146,21 +146,15 @@ the order they are declared.
 
 
 boolean
-HBaseAdmin.isSplitOrMergeEnabled(Admin.MasterSwitchType switchType) 
-
-
-boolean
 Admin.isSplitOrMergeEnabled(Admin.MasterSwitchType switchType)
 Query the current state of the switch
 
 
-
-boolean[]
-HBaseAdmin.setSplitOrMergeEnabled(boolean enabled,
-boolean synchronous,
-Admin.MasterSwitchType... switchTypes) 
-
 
+boolean
+HBaseAdmin.isSplitOrMergeEnabled(Admin.MasterSwitchType switchType) 
+
+
 boolean[]
 Admin.setSplitOrMergeEnabled(boolean enabled,
 boolean synchronous,
@@ -168,6 +162,12 @@ the order they are declared.
 Turn the Split or Merge switches on or off.
 
 
+
+boolean[]
+HBaseAdmin.setSplitOrMergeEnabled(boolean enabled,
+boolean synchronous,
+Admin.MasterSwitchType... switchTypes) 
+
 
 
 
@@ -184,9 +184,11 @@ the order they are declared.
 
 
 void
-BaseMasterObserver.postSetSplitOrMergeEnabled(ObserverContext ctx,
+MasterObserver.postSetSplitOrMergeEnabled(ObserverContext ctx,
 boolean newValue,
-Admin.MasterSwitchType switchType) 
+Admin.MasterSwitchType switchType)
+Called after setting split / merge switch
+
 
 
 void
@@ -196,17 +198,17 @@ the order they are declared.
 
 
 void
-MasterObserver.postSetSplitOrMergeEnabled(ObserverContext ctx,
+BaseMasterObserver.postSetSplitOrMergeEnabled(ObserverContext ctx,
 boolean newValue,
-Admin.MasterSwitchType switchType)
-Called after setting split / merge switch
-
+Admin.MasterSwitchType switchType) 
 
 
 boolean
-BaseMasterObserver.preSetSplitOrMergeEnabled(ObserverContext ctx,
+MasterObserver.preSetSplitOrMergeEnabled(ObserverContext ctx,
   boolean newValue,
-  Admin.MasterSwitchType switchType) 
+  Admin.MasterSwitchType switchType)
+Called prior to setting split / merge switch
+
 
 
 boolean
@@ -216,11 +218,9 @@ the order they are declared.
 
 
 boolean
-MasterObserver.preSetSplitOrMergeEnabled(ObserverContext ctx,
+BaseMasterObserver.preSetSplitOrMergeEnabled(ObserverContext ctx,
   boolean newValue,
-  Admin.MasterSwitchType switchType)
-Called prior to setting split / merge switch
-
+  Admin.MasterSwitchType switchType) 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
index 9dd2efa..1682a6d 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
@@ -165,21 +165,21 @@ service.
 
 
 Admin
+ConnectionImplementation.getAdmin() 
+
+
+Admin
 Connection.getAdmin()
 Retrieve an Admin implementation to administer an HBase 
cluster.
 
 
-
+
 Admin
 HConnection.getAdmin()
 Deprecated. 
 Retrieve an Admin implementation to administer an HBase 
cluster.
 
 
-
-Admin
-ConnectionImplementation.getAdmin() 
-
 
 
 
@@ -262,8 +262,8 @@ service.
 
 
 void
-EmptyNormalizationPlan.execute(Admin admin)
-No-op for empty plan.
+MergeNormalizationPlan.execute(Admin admin)
+Executes normalization plan on cluster (does actual 
splitting/merging work).
 
 
 
@@ -274,13 +274,13 @@ service.
 
 
 void
-NormalizationPlan.execute(Admin admin)
-Executes normalization plan on cluster (does actual 
s

[06/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
index fa0acb2..1cca152 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
@@ -468,7 +468,7 @@ extends HStore
-add,
 addChangedReaderObserver,
 areWritesEnabled,
 assertBulkLoadHFileOk,
 bulkLoadHFile,
 bulkLoadHFile,
 cancelRequestedCompaction, canSplit,
 close,
 closeAndArchiveCompactedFiles,
 compact,
 compactRecentForTestingAssumingDefaultPolicy,
 completeCompaction,
 createFlushContext,
 createWriterInTmp,
 createWriterInTmp,
 delete,
 deleteChangedReaderObserver,
 deregisterChildren, determineTTLFromFamily,
 flushCache,
 getAvgStoreFileAge,
 getBlockingFileCount,
 getBytesPerChecksum,
 
 getCacheConfig, getChecksumType,
 getCloseCheckInterval,
 getColumnFamilyName,
 getCompactedCellsCount,
 getCompactedCellsSize,
 getCompactionCheckMultiplier,
 getCompactionPressure,
 getCompactionProgress, getCompactPriority,
 getComparator,
 getCoprocessorHost,
 getDataBlockEncoder,
 getFamily,
 getFileSystem,
 getFlushableSize,
 getFlushedCellsCount,
 getFlushedCellsSize,
 getFlushedOutputFileSize,
 getHRegion,
 getLastCompactSize,
 getMajorCompactedCellsCount,
 getMajorCompactedCellsSize,
 getMaxMemstoreTS,
 getMaxSequenceId,
 getMaxStoreFileAge, getMemstoreFlushSize,
 getMemStoreSize,
 getMinStoreFileAge,
 getNumHFiles,
 getNumReferenceFiles,
 getOffPeakHours,
 getRegionFileSystem,
 getRegionInf
 o, getScanInfo,
 getScanner,
 getScanners,
 getScanners,
 getSize,
 getSmallestReadPoint,
 getSnapshotSize,
 getSplitPoint,
 getStoreEngine,
 getStorefiles,
 getStorefilesCount,
 getStorefilesIndexSize,
 getStorefilesSize,
 getStoreFileTtl,
 getStoreHomedir,
 getStoreHomedir,
 getStoreSizeUncompressed,
 getTableName,
 getTotalStaticBloomSize,
 getTotalStaticIndexSize,
 hasReferences,
 hasTooManyStoreFiles, href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html#heapSize()">heapSize,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html#isCellTTLExpired(org.apache.hadoop.hbase.Cell,%20long,%20long)">isCellTTLExpired,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html#isMajorCompaction()">isMajorCompaction,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html#isPrimaryReplicaStore()">isPrimaryReplicaStore,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html#moveFileIntoPlace(org.apache.hadoop.fs.Path)">moveFileIntoPlace,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html#needsCompaction()">needsCompaction,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html#onConfigurationChange(org.apache.hadoop.conf.Configuration)">onConfigurationChange,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HStore.html#refre
 shStoreFiles()">refreshStoreFiles, refreshStoreFiles,
 registerChildren,
 replaceStoreFiles,
 replayCompactionMarker,
 requestCompaction,
 requestCompaction, requestCompaction,
 rollback,
 setDataBlockEncoderInTest,
 setScanInfo,
 snapshot,
 throttleCompaction,
 timeOfOldestEdit, toString,
 triggerMajorCompaction,
 updateColumnValue,
 upsert,
 versionsToReturn
+add,
 addChangedReaderObserver,
 areWritesEnabled,
 assertBulkLoadHFileOk,
 bulkLoadHFile,
 bulkLoadHFile,
 cancelRequestedCompaction, canSplit,
 close,
 closeAndArchiveCompactedFiles,
 compact,
 compactRecentForTestingAssumingDefaultPolicy,
 completeCompaction,
 createFlushContext,
 createWriterInTmp,
 createWriterInTmp,
 delete,
 deleteChangedReaderObserver,
 deregisterChildren, determineTTLFromFamily,
 flushCache,
 getAvgStoreFileAge,
 getBlockingFileCount,
 getBytesPerChecksum,
 
 getCacheConfig, getChecksumType,
 getCloseCheckInterval,
 getColumnFamilyName,
 getCompactedCellsCount,
 getCompactedCellsSize,
 getCompactionCheckMultiplier,
 getCompactionPressure,
 getCompactionProgress, getCompactPriority,
 getComparator,
 getCoprocessorHost,
 getDataBlockEncoder,
 getFamily,
 getFileSystem,
 getFlushableSize,
 getFlushedCellsCount,
 getFlushedCellsSize,
 getFlushedOutputFileSize,
 getHRegion,
 getLastCompactSize,
 getMajorCompactedCellsCount,
 getMajorCompactedCellsSize,
 getMaxMemstoreTS,
 getMaxSequenceId,
 getMaxStoreFileAge, getMemstoreFlushSize,
 getMemStoreSize,
 getMinStoreFileAge,
 getNumHFiles,
 getNumReferenceFiles,
 getOffPeakHours,
 getRegionFileSystem,
 getRegionInf
 o, getScanInfo,
 getScanner,
 getScanners,
 getScanners,
 getSize,
 getSmallestReadPoint,
 getSnapshotSize,
 getSplitPoint,
 getStoreEngine,
 getStorefiles,
 getStorefilesCount,
 getStorefilesIndexSize,
 getStorefilesSize,
 getStoreFileTtl,
 getStoreHomedir,
 getStoreHomedir,
 getStoreSizeUncompressed,
 getTableN

[39/51] [partial] hbase-site git commit: Published site at 6ea4994569e05ff44e0fa571e053cef828ab57ed.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html 
b/devapidocs/org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html
index e252b6c..97174a2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/HealthChecker.HealthCheckerExitStatus.html
@@ -258,7 +258,7 @@ the order they are declared.
 
 
 values
-public static HealthChecker.HealthCheckerExitStatus[] values()
+public static HealthChecker.HealthCheckerExitStatus[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -275,7 +275,7 @@ for (HealthChecker.HealthCheckerExitStatus c : 
HealthChecker.HealthCheckerExitSt
 
 
 valueOf
-public static HealthChecker.HealthCheckerExitStatus valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static HealthChecker.HealthCheckerExitStatus valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/KeyValue.Type.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/KeyValue.Type.html 
b/devapidocs/org/apache/hadoop/hbase/KeyValue.Type.html
index 521f7b6..c31803c 100644
--- a/devapidocs/org/apache/hadoop/hbase/KeyValue.Type.html
+++ b/devapidocs/org/apache/hadoop/hbase/KeyValue.Type.html
@@ -331,7 +331,7 @@ the order they are declared.
 
 
 values
-public static KeyValue.Type[] values()
+public static KeyValue.Type[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -348,7 +348,7 @@ for (KeyValue.Type c : KeyValue.Type.values())
 
 
 valueOf
-public static KeyValue.Type valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static KeyValue.Type valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html 
b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
index d49f7f7..f552c5a 100644
--- a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
+++ b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
@@ -275,7 +275,7 @@ the order they are declared.
 
 
 values
-public static MetaTableAccessor.QueryType[] values()
+public static MetaTableAccessor.QueryType[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -292,7 +292,7 @@ for (MetaTableAccessor.QueryType c : 
MetaTableAccessor.QueryType.values())
 
 
 valueOf
-public static MetaTableAccessor.QueryType valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static MetaTableAccessor.QueryType valueOf(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/db94a639/devapidocs/org/apache/hadoop/hbase/class-use/Abortable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Abortable.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Abortable.html
index f407161..bbb672e 100644
--- a/devapidocs/org/apache/hadoop/hbase/class

[08/52] [abbrv] hbase git commit: Add ashishsinghi to pom.xml

Add ashishsinghi to pom.xml

Change-Id: Ib0709d92622350c50bee7e8a0bae0554d40df882


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f1fc5208
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f1fc5208
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f1fc5208

Branch: refs/heads/HBASE-14850
Commit: f1fc5208aa724de7e31cdd4e2e4a696cf823929c
Parents: afdfd1b
Author: Ashish Singhi 
Authored: Wed Mar 30 11:08:21 2016 +0530
Committer: Ashish Singhi 
Committed: Wed Mar 30 11:09:30 2016 +0530

--
 pom.xml | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f1fc5208/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 450275c..0324c1c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -169,6 +169,14 @@
   http://www.facebook.com/
 
 
+  ashishsinghi
+  Ashish Singhi
+  ashishsin...@apache.org
+  +5
+  Huawei
+  http://www.huawei.com/en/
+
+
   busbey
   Sean Busbey
   bus...@apache.org



[10/52] [abbrv] hbase git commit: HBASE-15559 Fix BaseMasterAndRegionObserver doesn't implement all the methods

HBASE-15559 Fix  BaseMasterAndRegionObserver doesn't implement all the methods


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b18de5ef
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b18de5ef
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b18de5ef

Branch: refs/heads/HBASE-14850
Commit: b18de5ef4545bda4558b950c96cd4be79f9567af
Parents: 31aee19
Author: Elliott Clark 
Authored: Tue Mar 29 10:10:35 2016 -0700
Committer: Elliott Clark 
Committed: Wed Mar 30 11:17:08 2016 -0700

--
 .../coprocessor/BaseMasterAndRegionObserver.java | 19 ++-
 .../hbase/coprocessor/BaseRegionObserver.java|  2 +-
 2 files changed, 19 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b18de5ef/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
index 4748056..65398c2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@@ -44,7 +45,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
 
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
-public abstract class BaseMasterAndRegionObserver extends BaseRegionObserver
+public class BaseMasterAndRegionObserver extends BaseRegionObserver
 implements MasterObserver {
   @Override
   public void preCreateTable(ObserverContext ctx,
@@ -447,6 +448,22 @@ public abstract class BaseMasterAndRegionObserver extends 
BaseRegionObserver
   }
 
   @Override
+  public boolean 
preSetSplitOrMergeEnabled(ObserverContext ctx,
+   boolean newValue,
+   Admin.MasterSwitchType switchType)
+  throws IOException {
+return false;
+  }
+
+  @Override
+  public void 
postSetSplitOrMergeEnabled(ObserverContext ctx,
+ boolean newValue,
+ Admin.MasterSwitchType switchType)
+  throws IOException {
+
+  }
+
+  @Override
   public boolean 
preBalanceSwitch(ObserverContext ctx,
   boolean b) throws IOException {
 return b;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b18de5ef/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
index 3286f53..da7252b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -71,7 +71,7 @@ import com.google.common.collect.ImmutableList;
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
-public abstract class BaseRegionObserver implements RegionObserver {
+public class BaseRegionObserver implements RegionObserver {
   @Override
   public void start(CoprocessorEnvironment e) throws IOException { }
 



[15/52] [abbrv] hbase git commit: HBASE-15567 TestReplicationShell broken by recent replication changes (Geoffrey Jacoby)

HBASE-15567 TestReplicationShell broken by recent replication changes (Geoffrey 
Jacoby)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bcc4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bcc4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bcc4

Branch: refs/heads/HBASE-14850
Commit: bcc420ab1ef9a397d5a299a46a3f22b09d84
Parents: d6fd859
Author: Enis Soztutar 
Authored: Thu Mar 31 11:37:09 2016 -0700
Committer: Enis Soztutar 
Committed: Thu Mar 31 11:37:09 2016 -0700

--
 .../src/main/ruby/hbase/replication_admin.rb|  2 +-
 .../test/ruby/hbase/replication_admin_test.rb   | 39 ++--
 2 files changed, 13 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bcc4/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index a026d09..f441a99 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -95,7 +95,7 @@ module Hbase
 end
 @replication_admin.add_peer(id, replication_peer_config, map)
   else
-raise(ArgumentError, "args must be either a String or Hash")
+raise(ArgumentError, "args must be a Hash")
   end
 end
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bcc4/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb 
b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index 4923560..8f08dc0 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -30,12 +30,9 @@ module Hbase
 include TestHelpers
 
 def setup
-  @test_name = "hbase_shell_tests_table"
   @peer_id = '1'
 
   setup_hbase
-  drop_test_table(@test_name)
-  create_test_table(@test_name)
 
   assert_equal(0, replication_admin.list_peers.length)
 end
@@ -67,23 +64,26 @@ module Hbase
   end
 end
 
-define_test "add_peer: args must be a string or number" do
+define_test "add_peer: args must be a hash" do
   assert_raise(ArgumentError) do
 replication_admin.add_peer(@peer_id, 1)
   end
   assert_raise(ArgumentError) do
 replication_admin.add_peer(@peer_id, ['test'])
   end
+  assert_raise(ArgumentError) do
+replication_admin.add_peer(@peer_id, 'test')
+  end
 end
 
 define_test "add_peer: single zk cluster key" do
   cluster_key = "server1.cie.com:2181:/hbase"
 
-  replication_admin.add_peer(@peer_id, cluster_key)
+  replication_admin.add_peer(@peer_id, {CLUSTER_KEY => cluster_key})
 
   assert_equal(1, replication_admin.list_peers.length)
   assert(replication_admin.list_peers.key?(@peer_id))
-  assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id))
+  assert_equal(cluster_key, 
replication_admin.list_peers.fetch(@peer_id).get_cluster_key)
 
   # cleanup for future tests
   replication_admin.remove_peer(@peer_id)
@@ -92,26 +92,11 @@ module Hbase
 define_test "add_peer: multiple zk cluster key" do
   cluster_key = "zk1,zk2,zk3:2182:/hbase-prod"
 
-  replication_admin.add_peer(@peer_id, cluster_key)
-
-  assert_equal(1, replication_admin.list_peers.length)
-  assert(replication_admin.list_peers.key?(@peer_id))
-  assert_equal(replication_admin.list_peers.fetch(@peer_id), cluster_key)
-
-  # cleanup for future tests
-  replication_admin.remove_peer(@peer_id)
-end
-
-define_test "add_peer: multiple zk cluster key and table_cfs" do
-  cluster_key = "zk4,zk5,zk6:11000:/hbase-test"
-  table_cfs_str = "table1;table2:cf1;table3:cf2,cf3"
-
-  replication_admin.add_peer(@peer_id, cluster_key, table_cfs_str)
+  replication_admin.add_peer(@peer_id, {CLUSTER_KEY => cluster_key})
 
   assert_equal(1, replication_admin.list_peers.length)
   assert(replication_admin.list_peers.key?(@peer_id))
-  assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id))
-  assert_equal(table_cfs_str, 
replication_admin.show_peer_tableCFs(@peer_id))
+  assert_equal(cluster_key, 
replication_admin.list_peers.fetch(@peer_id).get_cluster_key)
 
   # cleanup for future tests
   replication_admin.remove_peer(@peer_id)
@@ -125,7 +110,7 @@ module Hbase
 
   assert_equal(1, replication_admin.list_peers.length)
   assert(replication_admin.list_peers.key?(@peer_id))
-  assert_equal(cluster_key, rep

[04/52] [abbrv] hbase git commit: HBASE-11393 Replication TableCfs should be a PB object rather than a string

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f39baf0/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
index 29a052b..8b7c0a5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
@@ -19,13 +19,9 @@
  */
 package org.apache.hadoop.hbase.replication;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -47,7 +43,9 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -58,6 +56,8 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import static org.junit.Assert.*;
+
 @Category({FlakeyTests.class, LargeTests.class})
 public class TestPerTableCFReplication {
 
@@ -184,13 +184,13 @@ public class TestPerTableCFReplication {
 Map> tabCFsMap = null;
 
 // 1. null or empty string, result should be null
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig(null);
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig(null);
 assertEquals(null, tabCFsMap);
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("");
 assertEquals(null, tabCFsMap);
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("   ");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("   ");
 assertEquals(null, tabCFsMap);
 
 TableName tab1 = TableName.valueOf("tab1");
@@ -198,20 +198,20 @@ public class TestPerTableCFReplication {
 TableName tab3 = TableName.valueOf("tab3");
 
 // 2. single table: "tab1" / "tab2:cf1" / "tab3:cf1,cf3"
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab1");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab1");
 assertEquals(1, tabCFsMap.size()); // only one table
 assertTrue(tabCFsMap.containsKey(tab1));   // its table name is "tab1"
 assertFalse(tabCFsMap.containsKey(tab2));  // not other table
 assertEquals(null, tabCFsMap.get(tab1));   // null cf-list,
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab2:cf1");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab2:cf1");
 assertEquals(1, tabCFsMap.size()); // only one table
 assertTrue(tabCFsMap.containsKey(tab2));   // its table name is "tab2"
 assertFalse(tabCFsMap.containsKey(tab1));  // not other table
 assertEquals(1, tabCFsMap.get(tab2).size());   // cf-list contains only 1 
cf
 assertEquals("cf1", tabCFsMap.get(tab2).get(0));// the only cf is "cf1"
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab3 : cf1 , cf3");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab3 : cf1 , 
cf3");
 assertEquals(1, tabCFsMap.size()); // only one table
 assertTrue(tabCFsMap.containsKey(tab3));   // its table name is "tab2"
 assertFalse(tabCFsMap.containsKey(tab1));  // not other table
@@ -220,7 +220,7 @@ public class TestPerTableCFReplication {
 assertTrue(tabCFsMap.get(tab3).contains("cf3"));// contains "cf3"
 
 // 3. multiple tables: "tab1 ; tab2:cf1 ; tab3:cf1,cf3"
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab1 ; tab2:cf1 ; 
tab3:cf1,cf3");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab1 ; 
tab2:cf1 ; tab3:cf1,cf3");
 // 3.1 contains 3 tables : "tab1", "tab2" and "tab3"
 assertEquals(3, tabCFsMap.size());
 assertTrue(tabCFsMap.containsKey(tab1));
@@ -238,7 +238,7 @@ public class TestPerTableCFReplication {
 
 // 4. contiguous or additional ";"(table delimiter) or ","(cf delimiter) 
can be tolerated
 // still use the example of multiple tables: "tab1 ; tab2:cf1 ; 
tab3:cf1,cf3"
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig(
+tabCFsMap = ReplicationSerDeHelp

[18/52] [abbrv] hbase git commit: Revert "HBASE-15572 Adding optional timestamp semantics to HBase-Spark (Weiqing Yang)"

Revert "HBASE-15572 Adding optional timestamp semantics to HBase-Spark (Weiqing 
Yang)"

This reverts commit eec27ad7ef7b5078f705301bd3042991d4d4b4d9.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6905d272
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6905d272
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6905d272

Branch: refs/heads/HBASE-14850
Commit: 6905d272d34f0d674cfd73c6a8d579e3231b5a78
Parents: eec27ad
Author: Sean Busbey 
Authored: Thu Mar 31 21:40:50 2016 -0500
Committer: Sean Busbey 
Committed: Thu Mar 31 21:40:50 2016 -0500

--
 .../hadoop/hbase/spark/DefaultSource.scala  |   8 +-
 .../spark/datasources/HBaseSparkConf.scala  |   5 -
 .../spark/datasources/HBaseTableScanRDD.scala   |  26 -
 .../hadoop/hbase/spark/DefaultSourceSuite.scala | 105 +++
 4 files changed, 14 insertions(+), 130 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6905d272/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
--
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
index c71ee4e..7970816 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
@@ -88,12 +88,6 @@ case class HBaseRelation (
 userSpecifiedSchema: Option[StructType]
   )(@transient val sqlContext: SQLContext)
   extends BaseRelation with PrunedFilteredScan  with InsertableRelation  with 
Logging {
-
-  val timestamp = parameters.get(HBaseSparkConf.TIMESTAMP).map(_.toLong)
-  val minTimeStamp = parameters.get(HBaseSparkConf.MIN_TIMESTAMP).map(_.toLong)
-  val maxTimeStamp = parameters.get(HBaseSparkConf.MAX_TIMESTAMP).map(_.toLong)
-  val maxVersions = parameters.get(HBaseSparkConf.MAX_VERSIONS).map(_.toInt)
-
   val catalog = HBaseTableCatalog(parameters)
   def tableName = catalog.name
   val configResources = 
parameters.getOrElse(HBaseSparkConf.HBASE_CONFIG_RESOURCES_LOCATIONS, "")
@@ -210,7 +204,7 @@ case class HBaseRelation (
 System.arraycopy(x, 0, rBytes, offset, x.length)
 offset += x.length
   }
-  val put = timestamp.fold(new Put(rBytes))(new Put(rBytes, _))
+  val put = new Put(rBytes)
 
   colsIdxedFields.foreach { case (x, y) =>
 val b = Utils.toBytes(row(x), y)

http://git-wip-us.apache.org/repos/asf/hbase/blob/6905d272/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
--
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
index 2e4c0b3..ca44d42 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
@@ -36,9 +36,4 @@ object HBaseSparkConf{
   val USE_HBASE_CONTEXT = "hbase.use.hbase.context"
   val PUSH_DOWN_COLUMN_FILTER = "hbase.pushdown.column.filter"
   val defaultPushDownColumnFilter = true
-
-  val TIMESTAMP = "timestamp"
-  val MIN_TIMESTAMP = "minTimestamp"
-  val MAX_TIMESTAMP = "maxTimestamp"
-  val MAX_VERSIONS = "maxVersions"
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6905d272/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
--
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
index 886114a..2e05651 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
@@ -105,7 +105,6 @@ class HBaseTableScanRDD(relation: HBaseRelation,
   val gets = new ArrayList[Get]()
   x.foreach{ y =>
 val g = new Get(y)
-handleTimeSemantics(g)
 columns.foreach { d =>
   if (!d.isRowKey) {
 g.addColumn(d.cfBytes, d.colBytes)
@@ -158,7 +157,6 @@ class HBaseTableScanRDD(relation: HBaseRelation,
   case (Some(Bound(a, b)), None) => new Scan(a)
   case (None, None) => new Scan()
 }
-handleTimeSemantics(scan)
 
 columns.foreach { d =>
   if (!d.isRowKey) {
@@ -228,30 +226,6 @@ class HBaseTableScanRDD(relation: HBaseRelation,

[09/52] [abbrv] hbase git commit: HBASE-15327 Canary will always invoke admin.balancer() in each sniffing period when writeSniffing is enabled (Jianwei Cui)

HBASE-15327 Canary will always invoke admin.balancer() in each sniffing period 
when writeSniffing is enabled (Jianwei Cui)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/31aee19f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/31aee19f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/31aee19f

Branch: refs/heads/HBASE-14850
Commit: 31aee19f28e56070e128c1bada2d87b53e1fd656
Parents: f1fc520
Author: tedyu 
Authored: Wed Mar 30 08:58:42 2016 -0700
Committer: tedyu 
Committed: Wed Mar 30 08:58:42 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/tool/Canary.java   | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/31aee19f/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 9a71a14..9ad7242 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -46,6 +46,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -927,7 +928,12 @@ public final class Canary implements Tool {
 admin.enableTable(writeTableName);
   }
 
-  int numberOfServers = admin.getClusterStatus().getServers().size();
+  ClusterStatus status = admin.getClusterStatus();
+  int numberOfServers = status.getServersSize();
+  if (status.getServers().contains(status.getMaster())) {
+numberOfServers -= 1;
+  }
+
   List> pairs =
   MetaTableAccessor.getTableRegionsAndLocations(connection, 
writeTableName);
   int numberOfRegions = pairs.size();



[21/52] [abbrv] hbase git commit: HBASE-15521 Procedure V2 - RestoreSnapshot and CloneSnapshot (Stephen Yuan Jiang)

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1d5c3d2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index db718ee..50a1832 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -56,16 +56,19 @@ import org.apache.hadoop.hbase.master.MetricsMaster;
 import org.apache.hadoop.hbase.master.SnapshotSentinel;
 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner;
+import org.apache.hadoop.hbase.master.procedure.CloneSnapshotProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.RestoreSnapshotProcedure;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure.Procedure;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
 import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
-import org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
@@ -145,12 +148,14 @@ public class SnapshotManager extends 
MasterProcedureManager implements Stoppable
   private Map snapshotHandlers =
   new HashMap();
 
-  // Restore Sentinels map, with table name as key.
+  // Restore map, with table name as key, procedure ID as value.
   // The map is always accessed and modified under the object lock using 
synchronized.
-  // restoreSnapshot()/cloneSnapshot() will insert an Handler in the table.
-  // isRestoreDone() will remove the handler requested if the operation is 
finished.
-  private Map restoreHandlers =
-  new HashMap();
+  // restoreSnapshot()/cloneSnapshot() will insert a procedure ID in the map.
+  //
+  // TODO: just as the Apache HBase 1.x implementation, this map would not 
survive master
+  // restart/failover. This is just a stopgap implementation until 
implementation of taking
+  // snapshot using Procedure-V2.
+  private Map restoreTableToProcIdMap = new 
HashMap();
 
   private Path rootDir;
   private ExecutorService executorService;
@@ -426,11 +431,9 @@ public class SnapshotManager extends 
MasterProcedureManager implements Stoppable
 
 // make sure we aren't running a restore on the same table
 if (isRestoringTable(snapshotTable)) {
-  SnapshotSentinel handler = restoreHandlers.get(snapshotTable);
   throw new SnapshotCreationException("Rejected taking "
   + ClientSnapshotDescriptionUtils.toString(snapshot)
-  + " because we are already have a restore in progress on the same 
snapshot "
-  + ClientSnapshotDescriptionUtils.toString(handler.getSnapshot()), 
snapshot);
+  + " because we are already have a restore in progress on the same 
snapshot.");
 }
 
 try {
@@ -647,14 +650,61 @@ public class SnapshotManager extends 
MasterProcedureManager implements Stoppable
   }
 
   /**
+   * Clone the specified snapshot.
+   * The clone will fail if the destination table has a snapshot or restore in 
progress.
+   *
+   * @param reqSnapshot Snapshot Descriptor from request
+   * @param tableName table to clone
+   * @param snapshot Snapshot Descriptor
+   * @param snapshotTableDesc Table Descriptor
+   * @param nonceGroup unique value to prevent duplicated RPC
+   * @param nonce unique value to prevent duplicated RPC
+   * @return procId the ID of the clone snapshot procedure
+   * @throws IOException
+   */
+  private long cloneSnapshot(
+  final SnapshotDescription reqSnapshot,
+  final TableName tableName,
+  final SnapshotDescription snapshot,
+  final HTableDescriptor snapshotTableDesc,
+  final long nonceGroup,
+  final long nonce) throws IOException {
+MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
+HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc);
+if (cpHost != null) {
+  cpHost.preCloneSnapshot(reqSnapshot, htd);
+}
+long procId;
+try {
+  pro

[02/52] [abbrv] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
deleted file mode 100644
index 1113cfd..000
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
- * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
- * for the specific language governing permissions and limitations under the 
License.
- */
-
-package org.apache.hadoop.hbase.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- *
- * Configuration is a heavy weight registry that does a lot of string 
operations and regex matching.
- * Method calls into Configuration account for high CPU usage and have huge 
performance impact.
- * This class caches the value in the TableConfiguration object to improve 
performance.
- * see HBASE-12128
- *
- */
-@InterfaceAudience.Private
-public class TableConfiguration {
-
-  public static final String WRITE_BUFFER_SIZE_KEY = 
"hbase.client.write.buffer";
-  public static final long WRITE_BUFFER_SIZE_DEFAULT = 2097152;
-  public static final String MAX_KEYVALUE_SIZE_KEY = 
"hbase.client.keyvalue.maxsize";
-  public static final int MAX_KEYVALUE_SIZE_DEFAULT = -1;
-
-  private final long writeBufferSize;
-  private final int metaOperationTimeout;
-  private final int operationTimeout;
-  private final int scannerCaching;
-  private final long scannerMaxResultSize;
-  private final int primaryCallTimeoutMicroSecond;
-  private final int replicaCallTimeoutMicroSecondScan;
-  private final int retries;
-  private final int maxKeyValueSize;
-
-// toggle for async/sync prefetch
-  private final boolean clientScannerAsyncPrefetch;
-
-/**
-   * Constructor
-   * @param conf Configuration object
-   */
-  TableConfiguration(Configuration conf) {
-this.writeBufferSize = conf.getLong(WRITE_BUFFER_SIZE_KEY, 
WRITE_BUFFER_SIZE_DEFAULT);
-
-this.metaOperationTimeout = conf.getInt(
-  HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT,
-  HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-
-this.operationTimeout = conf.getInt(
-  HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-
-this.scannerCaching = conf.getInt(
-  HConstants.HBASE_CLIENT_SCANNER_CACHING, 
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
-
-this.scannerMaxResultSize =
-conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
-  HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
-
-this.primaryCallTimeoutMicroSecond =
-conf.getInt("hbase.client.primaryCallTimeout.get", 1); // 10ms
-
-this.replicaCallTimeoutMicroSecondScan =
-conf.getInt("hbase.client.replicaCallTimeout.scan", 100); // 1000 
ms
-
-this.retries = conf.getInt(
-   HConstants.HBASE_CLIENT_RETRIES_NUMBER, 
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-
-this.clientScannerAsyncPrefetch = conf.getBoolean(
-   Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, 
Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH);
-
-this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, 
MAX_KEYVALUE_SIZE_DEFAULT);
-  }
-
-  /**
-   * Constructor
-   * This is for internal testing purpose (using the default value).
-   * In real usage, we should read the configuration from the Configuration 
object.
-   */
-  @VisibleForTesting
-  protected TableConfiguration() {
-this.writeBufferSize = WRITE_BUFFER_SIZE_DEFAULT;
-this.metaOperationTimeout = 
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT;
-this.operationTimeout = HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT;
-this.scannerCaching = HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING;
-this.scannerMaxResultSize = 
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE;
-this.primaryCallTimeoutMicroSecond = 1;
-this.replicaCallTimeoutMicroSecondScan =

[13/52] [abbrv] hbase git commit: HBASE-15324 Jitter may cause desiredMaxFileSize overflow in ConstantSizeRegionSplitPolicy and trigger unexpected split (Yu Li)

HBASE-15324 Jitter may cause desiredMaxFileSize overflow in 
ConstantSizeRegionSplitPolicy and trigger unexpected split (Yu Li)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9d56105e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9d56105e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9d56105e

Branch: refs/heads/HBASE-14850
Commit: 9d56105eece2d34922ae1c230308193cd0e9b29f
Parents: 21301a8
Author: stack 
Authored: Wed Mar 30 13:31:09 2016 -0700
Committer: stack 
Committed: Wed Mar 30 13:31:09 2016 -0700

--
 .../ConstantSizeRegionSplitPolicy.java  | 24 
 .../regionserver/TestRegionSplitPolicy.java | 20 
 2 files changed, 40 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9d56105e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
index 66ef712..836cec5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
@@ -17,13 +17,15 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import com.google.common.annotations.VisibleForTesting;
+
+import java.util.Random;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-
-import java.util.Random;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
  * A {@link RegionSplitPolicy} implementation which splits a region
@@ -37,8 +39,10 @@ import java.util.Random;
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
 public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy {
   private static final Random RANDOM = new Random();
+  private static final double EPSILON = 1E-6;
 
   private long desiredMaxFileSize;
+  private double jitterRate;
 
   @Override
   protected void configureForRegion(HRegion region) {
@@ -53,7 +57,14 @@ public class ConstantSizeRegionSplitPolicy extends 
RegionSplitPolicy {
 HConstants.DEFAULT_MAX_FILE_SIZE);
 }
 double jitter = conf.getDouble("hbase.hregion.max.filesize.jitter", 0.25D);
-this.desiredMaxFileSize += (long)(desiredMaxFileSize * (RANDOM.nextFloat() 
- 0.5D) * jitter);
+this.jitterRate = (RANDOM.nextFloat() - 0.5D) * jitter;
+long jitterValue = (long) (this.desiredMaxFileSize * this.jitterRate);
+// make sure the long value won't overflow with jitter
+if (this.jitterRate > EPSILON && jitterValue > (Long.MAX_VALUE - 
this.desiredMaxFileSize)) {
+  this.desiredMaxFileSize = Long.MAX_VALUE;
+} else {
+  this.desiredMaxFileSize += jitterValue;
+}
   }
 
   @Override
@@ -80,4 +91,9 @@ public class ConstantSizeRegionSplitPolicy extends 
RegionSplitPolicy {
   long getDesiredMaxFileSize() {
 return desiredMaxFileSize;
   }
+
+  @VisibleForTesting
+  public boolean positiveJitterRate() {
+return this.jitterRate > EPSILON;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9d56105e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
index 1c38197..341a4bf 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
@@ -321,4 +321,24 @@ public class TestRegionSplitPolicy {
 assertEquals("ijk", Bytes.toString(policy.getSplitPoint()));
   }
 
+  @Test
+  public void testConstantSizePolicyWithJitter() throws IOException {
+conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+  ConstantSizeRegionSplitPolicy.class.getName());
+htd.setMaxFileSize(Long.MAX_VALUE);
+boolean positiveJitter = false;
+ConstantSizeRegionSplitPolicy policy = null;
+while (!positiveJitter) {
+  policy = (ConstantSizeRegionSplitPolicy) 
RegionSplitPolicy.create(mockRegion, conf);
+  positiveJitter = policy.positiveJitterRate();
+}
+// add a store
+HStore mockStore =

[19/52] [abbrv] hbase git commit: HBASE-15396 Enhance mapreduce.TableSplit to add encoded region name

HBASE-15396 Enhance mapreduce.TableSplit to add encoded region name

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7d3a89ce
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7d3a89ce
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7d3a89ce

Branch: refs/heads/HBASE-14850
Commit: 7d3a89ce8e07d9fd1c31b4bd2324b71dd10ecef5
Parents: 6905d27
Author: Harsh J 
Authored: Fri Mar 4 15:59:48 2016 +0530
Committer: Sean Busbey 
Committed: Thu Mar 31 22:59:43 2016 -0500

--
 .../mapreduce/MultiTableInputFormatBase.java|  4 +-
 .../hbase/mapreduce/TableInputFormatBase.java   | 10 ++--
 .../hadoop/hbase/mapreduce/TableSplit.java  | 59 
 .../hadoop/hbase/mapreduce/TestTableSplit.java  | 22 +++-
 4 files changed, 77 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7d3a89ce/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
index 6f0075a..4931c3f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
@@ -223,11 +223,13 @@ public abstract class MultiTableInputFormatBase extends
   keys.getFirst()[i], false);
   String regionHostname = hregionLocation.getHostname();
   HRegionInfo regionInfo = hregionLocation.getRegionInfo();
+  String encodedRegionName = regionInfo.getEncodedName();
   long regionSize = sizeCalculator.getRegionSize(
   regionInfo.getRegionName());
 
   TableSplit split = new TableSplit(table.getName(),
-  scan, splitStart, splitStop, regionHostname, regionSize);
+  scan, splitStart, splitStop, regionHostname,
+  encodedRegionName, regionSize);
 
   splits.add(split);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7d3a89ce/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index 65b4efc..2cde4b9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -300,9 +300,10 @@ extends InputFormat {
   keys.getSecond()[i] : stopRow;
   
   byte[] regionName = location.getRegionInfo().getRegionName();
+  String encodedRegionName = location.getRegionInfo().getEncodedName();
   long regionSize = sizeCalculator.getRegionSize(regionName);
   TableSplit split = new TableSplit(tableName, scan,
-splitStart, splitStop, regionLocation, regionSize);
+splitStart, splitStop, regionLocation, encodedRegionName, 
regionSize);
   splits.add(split);
   if (LOG.isDebugEnabled()) {
 LOG.debug("getSplits: split -> " + i + " -> " + split);
@@ -382,6 +383,7 @@ extends InputFormat {
   TableSplit ts = (TableSplit)list.get(count);
   TableName tableName = ts.getTable();
   String regionLocation = ts.getRegionLocation();
+  String encodedRegionName = ts.getEncodedRegionName();
   long regionSize = ts.getLength();
   if (regionSize >= dataSkewThreshold) {
 // if the current region size is large than the data skew threshold,
@@ -390,9 +392,9 @@ extends InputFormat {
  //Set the size of child TableSplit as 1/2 of the region size. The 
exact size of the
  // MapReduce input splits is not far off.
 TableSplit t1 = new TableSplit(tableName, scan, ts.getStartRow(), 
splitKey, regionLocation,
-regionSize / 2);
+encodedRegionName, regionSize / 2);
 TableSplit t2 = new TableSplit(tableName, scan, splitKey, 
ts.getEndRow(), regionLocation,
-regionSize - regionSize / 2);
+encodedRegionName, regionSize - regionSize / 2);
 resultList.add(t1);
 resultList.add(t2);
 count++;
@@ -419,7 +421,7 @@ extends InputFormat {
   }
 }
 TableSplit t = new TableSplit(tableName, scan, splitStartKey, 
splitEndKey,
-

[05/52] [abbrv] hbase git commit: HBASE-11393 Replication TableCfs should be a PB object rather than a string

http://git-wip-us.apache.org/repos/asf/hbase/blob/7f39baf0/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 0240a67..f64d0c1 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -3947,6 +3947,719 @@ public final class ZooKeeperProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.DeprecatedTableState)
   }
 
+  public interface TableCFOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional .hbase.pb.TableName table_name = 1;
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+boolean hasTableName();
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName();
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder();
+
+// repeated bytes families = 2;
+/**
+ * repeated bytes families = 2;
+ */
+java.util.List getFamiliesList();
+/**
+ * repeated bytes families = 2;
+ */
+int getFamiliesCount();
+/**
+ * repeated bytes families = 2;
+ */
+com.google.protobuf.ByteString getFamilies(int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.TableCF}
+   */
+  public static final class TableCF extends
+  com.google.protobuf.GeneratedMessage
+  implements TableCFOrBuilder {
+// Use TableCF.newBuilder() to construct.
+private TableCF(com.google.protobuf.GeneratedMessage.Builder builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private TableCF(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final TableCF defaultInstance;
+public static TableCF getDefaultInstance() {
+  return defaultInstance;
+}
+
+public TableCF getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private TableCF(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder 
subBuilder = null;
+  if (((bitField0_ & 0x0001) == 0x0001)) {
+subBuilder = tableName_.toBuilder();
+  }
+  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER,
 extensionRegistry);
+  if (subBuilder != null) {
+subBuilder.mergeFrom(tableName_);
+tableName_ = subBuilder.buildPartial();
+  }
+  bitField0_ |= 0x0001;
+  break;
+}
+case 18: {
+  if (!((mutable_bitField0_ & 0x0002) == 0x0002)) {
+families_ = new 
java.util.ArrayList();
+mutable_bitField0_ |= 0x0002;
+  }
+  families_.add(input.readBytes());
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+if (((mutable_bitField0_ & 0x0002) == 0x0002)) {
+  families_ = java.util.Collections.unmodifiableList(families_);
+}
+this.unknownFields = unknow

[30/52] [abbrv] hbase git commit: HBASE-15582 SnapshotManifestV1 too verbose when there are no regions

HBASE-15582 SnapshotManifestV1 too verbose when there are no regions


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/79868bd3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/79868bd3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/79868bd3

Branch: refs/heads/HBASE-14850
Commit: 79868bd394c0fd6743d6582aa4713f91e63a8baf
Parents: 25419d8
Author: Matteo Bertozzi 
Authored: Fri Apr 1 20:55:21 2016 -0700
Committer: Matteo Bertozzi 
Committed: Fri Apr 1 20:55:21 2016 -0700

--
 .../java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/79868bd3/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
index 328c998..a5afb91 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java
@@ -116,7 +116,7 @@ public final class SnapshotManifestV1 {
   final SnapshotDescription desc) throws IOException {
 FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new 
FSUtils.RegionDirFilter(fs));
 if (regions == null) {
-  LOG.info("No regions under directory:" + snapshotDir);
+  LOG.debug("No regions under directory:" + snapshotDir);
   return null;
 }
 



[26/52] [abbrv] hbase git commit: HBASE-15234 Don't abort ReplicationLogCleaner on ZooKeeper errors

HBASE-15234 Don't abort ReplicationLogCleaner on ZooKeeper errors


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2d8e0a04
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2d8e0a04
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2d8e0a04

Branch: refs/heads/HBASE-14850
Commit: 2d8e0a0477cdfc1d13373cde54e0cd080db514f5
Parents: e1d5c3d
Author: Gary Helmling 
Authored: Tue Feb 16 14:19:19 2016 -0800
Committer: Gary Helmling 
Committed: Thu Mar 31 22:28:18 2016 -0700

--
 .../master/ReplicationLogCleaner.java   | 42 +++
 .../hbase/master/cleaner/TestLogsCleaner.java   | 75 
 2 files changed, 102 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2d8e0a04/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
index a6b6dd8..9ecba11 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.replication.master;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -47,12 +48,11 @@ import org.apache.zookeeper.KeeperException;
  * replication before deleting it when its TTL is over.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class ReplicationLogCleaner extends BaseLogCleanerDelegate implements 
Abortable {
+public class ReplicationLogCleaner extends BaseLogCleanerDelegate {
   private static final Log LOG = 
LogFactory.getLog(ReplicationLogCleaner.class);
   private ZooKeeperWatcher zkw;
   private ReplicationQueuesClient replicationQueues;
   private boolean stopped = false;
-  private boolean aborted;
 
 
   @Override
@@ -136,15 +136,23 @@ public class ReplicationLogCleaner extends 
BaseLogCleanerDelegate implements Abo
 // Make my own Configuration.  Then I'll have my own connection to zk that
 // I can close myself when comes time.
 Configuration conf = new Configuration(config);
+try {
+  setConf(conf, new ZooKeeperWatcher(conf, "replicationLogCleaner", null));
+} catch (IOException e) {
+  LOG.error("Error while configuring " + this.getClass().getName(), e);
+}
+  }
+
+  @VisibleForTesting
+  public void setConf(Configuration conf, ZooKeeperWatcher zk) {
 super.setConf(conf);
 try {
-  this.zkw = new ZooKeeperWatcher(conf, "replicationLogCleaner", null);
-  this.replicationQueues = 
ReplicationFactory.getReplicationQueuesClient(zkw, conf, this);
+  this.zkw = zk;
+  this.replicationQueues = 
ReplicationFactory.getReplicationQueuesClient(zkw, conf,
+  new WarnOnlyAbortable());
   this.replicationQueues.init();
 } catch (ReplicationException e) {
   LOG.error("Error while configuring " + this.getClass().getName(), e);
-} catch (IOException e) {
-  LOG.error("Error while configuring " + this.getClass().getName(), e);
 }
   }
 
@@ -163,15 +171,19 @@ public class ReplicationLogCleaner extends 
BaseLogCleanerDelegate implements Abo
 return this.stopped;
   }
 
-  @Override
-  public void abort(String why, Throwable e) {
-LOG.warn("Aborting ReplicationLogCleaner because " + why, e);
-this.aborted = true;
-stop(why);
-  }
+  private static class WarnOnlyAbortable implements Abortable {
 
-  @Override
-  public boolean isAborted() {
-return this.aborted;
+@Override
+public void abort(String why, Throwable e) {
+  LOG.warn("ReplicationLogCleaner received abort, ignoring.  Reason: " + 
why);
+  if (LOG.isDebugEnabled()) {
+LOG.debug(e);
+  }
+}
+
+@Override
+public boolean isAborted() {
+  return false;
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2d8e0a04/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index ebf3699..47db32b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ 
b/hbase-server/

[45/52] [abbrv] hbase git commit: HBASE-15401 Add Zookeeper to third party

HBASE-15401 Add Zookeeper to third party


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eaf86de4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eaf86de4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eaf86de4

Branch: refs/heads/HBASE-14850
Commit: eaf86de45eb762bfdcaafb7dcec59f78db756413
Parents: b288776
Author: Elliott Clark 
Authored: Mon Mar 7 14:58:21 2016 -0800
Committer: Elliott Clark 
Committed: Fri Apr 8 12:31:26 2016 -0700

--
 hbase-native-client/Dockerfile   | 26 -
 hbase-native-client/third-party/BUCK | 96 ++-
 2 files changed, 68 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eaf86de4/hbase-native-client/Dockerfile
--
diff --git a/hbase-native-client/Dockerfile b/hbase-native-client/Dockerfile
index 5f17f04..1364d22 100644
--- a/hbase-native-client/Dockerfile
+++ b/hbase-native-client/Dockerfile
@@ -15,8 +15,30 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM pjameson/buck-folly-watchman
+FROM cpp_update 
 
-RUN apt-get install -y libprotobuf-dev protobuf-compiler clang-format-3.7 vim 
maven inetutils-ping
+ARG CC=/usr/bin/gcc-5
+ARG CXX=/usr/bin/g++-5
+ARG CFLAGS="-D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -g -fno-omit-frame-pointer -O3 
-pthread"
+ARG CXXFLAGS="-D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -g -fno-omit-frame-pointer -O3 
-pthread"
+
+RUN apt-get install -y clang-format-3.7 vim maven inetutils-ping
+RUN git clone --depth 1 --branch v2.6.1 https://github.com/google/protobuf.git 
/usr/src/protobuf && \
+  cd /usr/src/protobuf/ && \
+  ./autogen.sh && \
+  ./configure --disable-shared && \
+  make && \
+  make check && \
+  make install
+RUN cd /usr/src && \
+  wget 
http://www-us.apache.org/dist/zookeeper/zookeeper-3.4.8/zookeeper-3.4.8.tar.gz 
&& \ 
+  tar zxf zookeeper-3.4.8.tar.gz && \ 
+  rm -rf zookeeper-3.4.8.tar.gz && \
+  cd zookeeper-3.4.8 && \
+  cd src/c && \
+  ./configure --disable-shared && \
+  make && \
+  make install && \
+  make clean 
 
 WORKDIR /usr/local/src/hbase/hbase-native-client

http://git-wip-us.apache.org/repos/asf/hbase/blob/eaf86de4/hbase-native-client/third-party/BUCK
--
diff --git a/hbase-native-client/third-party/BUCK 
b/hbase-native-client/third-party/BUCK
index b7baa86..e577a5f 100644
--- a/hbase-native-client/third-party/BUCK
+++ b/hbase-native-client/third-party/BUCK
@@ -15,68 +15,60 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-def add_system_libs(names = []):
+def add_system_libs(names = [], lib_dir = "/usr/lib/x86_64-linux-gnu", deps = 
[], exported_linker_flags = []):
 rules = []
 for name in names:
-prebuilt_cxx_library(
-name = name,
-lib_name = name,
-lib_dir = "/usr/lib/x86_64-linux-gnu",
-visibility = [ 'PUBLIC', ],
-)
-rules.append(":" + name)
-
+gen_rule_name = "gen_lib{}".format(name)
+genrule(
+  name = gen_rule_name,
+  out = gen_rule_name,
+  bash = "mkdir -p $OUT && cp {}/lib{}.* 
$OUT".format(lib_dir,name),
+)
+prebuilt_cxx_library(
+  name = name,
+  lib_name = name,
+  lib_dir = '$(location :{})'.format(gen_rule_name),
+  force_static = True,
+  deps = deps,
+  visibility = [ 'PUBLIC' ],
+  exported_linker_flags = exported_linker_flags,
+)
+rules.append(":" + name)
 return rules
 
 system_libs = [
+"unwind",
+"lzma",
+]
+local_libs = [
 "double-conversion",
 "glog",
-"protobuf",
 "gflags",
-"unwind",
-"lzma",
+"protobuf",
+"zookeeper_mt",
 "boost_regex",
 ]
-tp_dep_rules = add_system_libs(system_libs)
-prebuilt_cxx_library(
-name = "folly",
-lib_name = "folly",
-lib_dir = "/usr/local/lib",
-deps = tp_dep_rules,
-exported_linker_flags = [
-"-pthread",
-"-lstdc++",
-],
-visibility = [
-'PUBLIC',
-]
-)
-prebuilt_cxx_library(
-name = "follybenchmark",
-lib_name = "follybenchmark",
-lib_dir = "/usr/local/lib",
-deps = tp_dep_rules + [":folly"],
-exported_linker_flags = [
-"-pthread",
-"-lstdc++",
-],
-visibility = [
-'PUBLIC',
-

[33/52] [abbrv] hbase git commit: HBASE-15505 ReplicationPeerConfig should be builder-style (Gabor Liptak)

HBASE-15505 ReplicationPeerConfig should be builder-style (Gabor Liptak)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7e399883
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7e399883
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7e399883

Branch: refs/heads/HBASE-14850
Commit: 7e399883f62fd37e5215ce3a456a917e690c921c
Parents: a93a887
Author: Enis Soztutar 
Authored: Tue Apr 5 11:44:05 2016 -0700
Committer: Enis Soztutar 
Committed: Tue Apr 5 11:44:05 2016 -0700

--
 .../client/UnmodifyableHTableDescriptor.java| 14 +++---
 .../replication/ReplicationPeerConfig.java  |  4 +-
 .../TestUnmodifyableHTableDescriptor.java   | 47 
 .../hadoop/hbase/quotas/TestQuotaFilter.java| 47 
 .../replication/TestReplicationPeerConfig.java  | 47 
 5 files changed, 151 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7e399883/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
index 7331983..59a1bd5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java
@@ -68,12 +68,12 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @param family HColumnDescriptor of familyto add.
*/
   @Override
-  public HTableDescriptor addFamily(final HColumnDescriptor family) {
+  public UnmodifyableHTableDescriptor addFamily(final HColumnDescriptor 
family) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
   @Override
-  public HTableDescriptor modifyFamily(HColumnDescriptor family) {
+  public UnmodifyableHTableDescriptor modifyFamily(HColumnDescriptor family) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
@@ -91,7 +91,7 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @see org.apache.hadoop.hbase.HTableDescriptor#setReadOnly(boolean)
*/
   @Override
-  public HTableDescriptor setReadOnly(boolean readOnly) {
+  public UnmodifyableHTableDescriptor setReadOnly(boolean readOnly) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
@@ -99,7 +99,7 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @see org.apache.hadoop.hbase.HTableDescriptor#setValue(byte[], byte[])
*/
   @Override
-  public HTableDescriptor setValue(byte[] key, byte[] value) {
+  public UnmodifyableHTableDescriptor setValue(byte[] key, byte[] value) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
@@ -107,7 +107,7 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @see org.apache.hadoop.hbase.HTableDescriptor#setValue(java.lang.String, 
java.lang.String)
*/
   @Override
-  public HTableDescriptor setValue(String key, String value) {
+  public UnmodifyableHTableDescriptor setValue(String key, String value) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
@@ -115,7 +115,7 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @see org.apache.hadoop.hbase.HTableDescriptor#setMaxFileSize(long)
*/
   @Override
-  public HTableDescriptor setMaxFileSize(long maxFileSize) {
+  public UnmodifyableHTableDescriptor setMaxFileSize(long maxFileSize) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 
@@ -123,7 +123,7 @@ public class UnmodifyableHTableDescriptor extends 
HTableDescriptor {
* @see org.apache.hadoop.hbase.HTableDescriptor#setMemStoreFlushSize(long)
*/
   @Override
-  public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
+  public UnmodifyableHTableDescriptor setMemStoreFlushSize(long 
memstoreFlushSize) {
 throw new UnsupportedOperationException("HTableDescriptor is read-only");
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7e399883/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 8d05fa0..7799de6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/re

[51/52] [abbrv] hbase git commit: HBASE-14853 Add on protobuf to c++ chain

http://git-wip-us.apache.org/repos/asf/hbase/blob/b2887761/hbase-native-client/if/Master.proto
--
diff --git a/hbase-native-client/if/Master.proto 
b/hbase-native-client/if/Master.proto
new file mode 100644
index 000..4d3a2e1
--- /dev/null
+++ b/hbase-native-client/if/Master.proto
@@ -0,0 +1,778 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// All to do with the Master.  Includes schema management since these
+// changes are run by the Master process.
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "MasterProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+import "Client.proto";
+import "ClusterStatus.proto";
+import "ErrorHandling.proto";
+import "Procedure.proto";
+import "Quota.proto";
+
+/* Column-level protobufs */
+
+message AddColumnRequest {
+  required TableName table_name = 1;
+  required ColumnFamilySchema column_families = 2;
+  optional uint64 nonce_group = 3 [default = 0];
+  optional uint64 nonce = 4 [default = 0];
+}
+
+message AddColumnResponse {
+  optional uint64 proc_id = 1;
+}
+
+message DeleteColumnRequest {
+  required TableName table_name = 1;
+  required bytes column_name = 2;
+  optional uint64 nonce_group = 3 [default = 0];
+  optional uint64 nonce = 4 [default = 0];
+}
+
+message DeleteColumnResponse {
+  optional uint64 proc_id = 1;
+}
+
+message ModifyColumnRequest {
+  required TableName table_name = 1;
+  required ColumnFamilySchema column_families = 2;
+  optional uint64 nonce_group = 3 [default = 0];
+  optional uint64 nonce = 4 [default = 0];
+}
+
+message ModifyColumnResponse {
+  optional uint64 proc_id = 1;
+}
+
+/* Region-level Protos */
+
+message MoveRegionRequest {
+  required RegionSpecifier region = 1;
+  optional ServerName dest_server_name = 2;
+}
+
+message MoveRegionResponse {
+}
+
+/**
+ * Dispatch merging the specified regions.
+ */
+message DispatchMergingRegionsRequest {
+  required RegionSpecifier region_a = 1;
+  required RegionSpecifier region_b = 2;
+  optional bool forcible = 3 [default = false];
+}
+
+message DispatchMergingRegionsResponse {
+}
+
+message AssignRegionRequest {
+  required RegionSpecifier region = 1;
+}
+
+message AssignRegionResponse {
+}
+
+message UnassignRegionRequest {
+  required RegionSpecifier region = 1;
+  optional bool force = 2 [default = false];
+}
+
+message UnassignRegionResponse {
+}
+
+message OfflineRegionRequest {
+  required RegionSpecifier region = 1;
+}
+
+message OfflineRegionResponse {
+}
+
+/* Table-level protobufs */
+
+message CreateTableRequest {
+  required TableSchema table_schema = 1;
+  repeated bytes split_keys = 2;
+  optional uint64 nonce_group = 3 [default = 0];
+  optional uint64 nonce = 4 [default = 0];
+}
+
+message CreateTableResponse {
+  optional uint64 proc_id = 1;
+}
+
+message DeleteTableRequest {
+  required TableName table_name = 1;
+  optional uint64 nonce_group = 2 [default = 0];
+  optional uint64 nonce = 3 [default = 0];
+}
+
+message DeleteTableResponse {
+  optional uint64 proc_id = 1;
+}
+
+message TruncateTableRequest {
+  required TableName tableName = 1;
+  optional bool preserveSplits = 2 [default = false];
+  optional uint64 nonce_group = 3 [default = 0];
+  optional uint64 nonce = 4 [default = 0];
+}
+
+message TruncateTableResponse {
+  optional uint64 proc_id = 1;
+}
+
+message EnableTableRequest {
+  required TableName table_name = 1;
+  optional uint64 nonce_group = 2 [default = 0];
+  optional uint64 nonce = 3 [default = 0];
+}
+
+message EnableTableResponse {
+  optional uint64 proc_id = 1;
+}
+
+message DisableTableRequest {
+  required TableName table_name = 1;
+  optional uint64 nonce_group = 2 [default = 0];
+  optional uint64 nonce = 3 [default = 0];
+}
+
+message DisableTableResponse {
+  optional uint64 proc_id = 1;
+}
+
+message ModifyTableRequest {
+  required TableName table_name = 1;
+  required TableSchema table_schema = 2;
+  optional uint64 nonce_group = 3 [default = 0];
+  optional uint64 nonce = 4 [default = 0];
+}
+
+message ModifyTableResponse {

[06/52] [abbrv] hbase git commit: HBASE-11393 Replication TableCfs should be a PB object rather than a string

HBASE-11393 Replication TableCfs should be a PB object rather than a string


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7f39baf0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7f39baf0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7f39baf0

Branch: refs/heads/HBASE-14850
Commit: 7f39baf0f4572ff209837d7de5d37554851ecbb7
Parents: 0520097
Author: chenheng 
Authored: Fri Mar 25 14:16:47 2016 +0800
Committer: chenheng 
Committed: Tue Mar 29 10:25:29 2016 +0800

--
 .../client/replication/ReplicationAdmin.java|  170 +--
 .../replication/ReplicationSerDeHelper.java |  315 +
 .../hbase/replication/ReplicationPeer.java  |1 +
 .../replication/ReplicationPeerConfig.java  |   16 +-
 .../replication/ReplicationPeerZKImpl.java  |   76 +-
 .../hbase/replication/ReplicationPeers.java |   19 +-
 .../replication/ReplicationPeersZKImpl.java |  163 +--
 .../replication/ReplicationStateZKBase.java |   19 +
 .../protobuf/generated/ZooKeeperProtos.java | 1155 +-
 .../src/main/protobuf/ZooKeeper.proto   |6 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   10 +-
 .../replication/master/TableCFsUpdater.java |  122 ++
 .../hbase/client/TestReplicaWithCluster.java|6 +-
 .../replication/TestReplicationAdmin.java   |  195 +--
 .../cleaner/TestReplicationHFileCleaner.java|2 +-
 .../replication/TestMasterReplication.java  |   11 +-
 .../replication/TestMultiSlaveReplication.java  |   10 +-
 .../replication/TestPerTableCFReplication.java  |  158 ++-
 .../hbase/replication/TestReplicationBase.java  |4 +-
 .../replication/TestReplicationSmallTests.java  |5 +-
 .../replication/TestReplicationStateBasic.java  |   14 +-
 .../replication/TestReplicationSyncUpTool.java  |4 +-
 .../TestReplicationTrackerZKImpl.java   |   10 +-
 .../replication/TestReplicationWithTags.java|6 +-
 .../replication/master/TestTableCFsUpdater.java |  210 
 ...sibilityLabelReplicationWithExpAsString.java |9 +-
 .../TestVisibilityLabelsReplication.java|5 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |5 +-
 .../src/main/ruby/hbase/replication_admin.rb|   44 +-
 .../src/main/ruby/shell/commands/add_peer.rb|4 +-
 .../ruby/shell/commands/append_peer_tableCFs.rb |2 +-
 .../ruby/shell/commands/remove_peer_tableCFs.rb |4 +-
 .../ruby/shell/commands/set_peer_tableCFs.rb|5 +-
 33 files changed, 2309 insertions(+), 476 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7f39baf0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index dcf1957..8ee3a22 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -32,7 +32,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -48,6 +47,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
@@ -166,27 +166,6 @@ public class ReplicationAdmin implements Closeable {
   }
 
   /**
-   * Add a new peer cluster to replicate to.
-   * @param id a short name that identifies the cluster
-   * @param clusterKey the concatenation of the slave cluster's
-   * 
hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
-   * @throws IllegalStateException if there's already one slave since
-   * multi-slave isn't supported yet.
-   * @deprecated Use addPeer(String, ReplicationPeerConfig, Map) instead.
-   */
-  @Deprecated
-  public void addPeer(String id, String clusterKey) throws 
ReplicationException {
-this.addPeer(id, new ReplicationPeerConfig().setClusterKey(clusterKey), 
null);
-  }
-
-  @Deprecated
-  public void addPeer(String id, String clusterKey, String tableCFs)
-throws ReplicationException {
-this.replic

[01/52] [abbrv] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock [Forced Update!]

Repository: hbase
Updated Branches:
  refs/heads/HBASE-14850 cda4e6a51 -> a2291fb3a (forced update)


http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
index 8b2b733..45093bb 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java
@@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.PleaseHoldException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
@@ -310,6 +312,14 @@ public class TestHBaseAdminNoCluster {
   }
 });
 
Mockito.when(connection.getKeepAliveMasterService()).thenReturn(masterAdmin);
+RpcControllerFactory rpcControllerFactory = 
Mockito.mock(RpcControllerFactory.class);
+
Mockito.when(connection.getRpcControllerFactory()).thenReturn(rpcControllerFactory);
+Mockito.when(rpcControllerFactory.newController()).thenReturn(
+  Mockito.mock(PayloadCarryingRpcController.class));
+
+// we need a real retrying caller
+RpcRetryingCallerFactory callerFactory = new 
RpcRetryingCallerFactory(configuration);
+
Mockito.when(connection.getRpcRetryingCallerFactory()).thenReturn(callerFactory);
 
 Admin admin = null;
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index 1ba..d4d319a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -285,7 +285,7 @@ public class TestScannersFromClientSide {
   private void verifyExpectedCounts(Table table, Scan scan, int 
expectedRowCount,
   int expectedCellCount) throws Exception {
 ResultScanner scanner = table.getScanner(scan);
-
+
 int rowCount = 0;
 int cellCount = 0;
 Result r = null;
@@ -609,7 +609,7 @@ public class TestScannersFromClientSide {
 byte[] regionName = hri.getRegionName();
 int i = cluster.getServerWith(regionName);
 HRegionServer rs = cluster.getRegionServer(i);
-ProtobufUtil.closeRegion(
+ProtobufUtil.closeRegion(null,
   rs.getRSRpcServices(), rs.getServerName(), regionName);
 long startTime = EnvironmentEdgeManager.currentTime();
 long timeOut = 30;
@@ -627,7 +627,7 @@ public class TestScannersFromClientSide {
 RegionStates states = master.getAssignmentManager().getRegionStates();
 states.regionOffline(hri);
 states.updateRegionState(hri, State.OPENING);
-ProtobufUtil.openRegion(rs.getRSRpcServices(), rs.getServerName(), hri);
+ProtobufUtil.openRegion(null, rs.getRSRpcServices(), rs.getServerName(), 
hri);
 startTime = EnvironmentEdgeManager.currentTime();
 while (true) {
   if (rs.getOnlineRegion(regionName) != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java
new file mode 100644
index 000..b1b3b23
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/DelegatingRpcScheduler.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   

[41/52] [abbrv] hbase git commit: HBASE-15586 Unify human readable numbers in the web UI

HBASE-15586 Unify human readable numbers in the web UI


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2dcd08bc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2dcd08bc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2dcd08bc

Branch: refs/heads/HBASE-14850
Commit: 2dcd08bc3d8ab2f26da85128102926c68a95186f
Parents: a146a71
Author: Enis Soztutar 
Authored: Thu Apr 7 17:07:33 2016 -0700
Committer: Enis Soztutar 
Committed: Thu Apr 7 17:07:33 2016 -0700

--
 .../tmpl/master/RegionServerListTmpl.jamon  | 22 ++-
 .../tmpl/regionserver/BlockCacheTmpl.jamon  | 12 
 .../tmpl/regionserver/RegionListTmpl.jamon  | 16 +++
 .../tmpl/regionserver/ServerMetricsTmpl.jamon   | 29 ++--
 4 files changed, 48 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2dcd08bc/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
index c051743..a62d5eb 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
@@ -29,6 +29,7 @@ HMaster master;
 org.apache.hadoop.hbase.ServerLoad;
 org.apache.hadoop.hbase.ServerName;
 org.apache.hadoop.hbase.util.VersionInfo;
+org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
 
 
 
@@ -148,9 +149,12 @@ for (ServerName serverName: serverNames) {
 
 
 <& serverNameLink; serverName=serverName; serverLoad = sl; &>
-<% sl.getUsedHeapMB() %>m
-<% sl.getMaxHeapMB() %>m
-<% sl.getMemstoreSizeInMB() %>m
+<% TraditionalBinaryPrefix.long2String(sl.getUsedHeapMB()
+  * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
+<% TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB()
+  * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
+<% TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeInMB()
+  * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
 
 
 <%java>
@@ -226,10 +230,14 @@ if (sl != null) {
 <& serverNameLink; serverName=serverName; serverLoad = sl; &>
 <% sl.getStores() %>
 <% sl.getStorefiles() %>
-<% sl.getStoreUncompressedSizeMB() %>m
-<% sl.getStorefileSizeInMB() %>mb
-<% sl.getTotalStaticIndexSizeKB() %>k
-<% sl.getTotalStaticBloomSizeKB() %>k
+<% TraditionalBinaryPrefix.long2String(
+  sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 
1) %>
+<% TraditionalBinaryPrefix.long2String(sl.getStorefileSizeInMB()
+  * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
+<% TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB()
+  * TraditionalBinaryPrefix.KILO.value, "B", 1) %>
+<% TraditionalBinaryPrefix.long2String(sl.getTotalStaticBloomSizeKB()
+  * TraditionalBinaryPrefix.KILO.value, "B", 1) %>
 
 <%java>
 }  else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2dcd08bc/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index 3dcd5e2..1277acc 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -45,7 +45,7 @@ org.apache.hadoop.hbase.io.hfile.bucket.BucketCacheStats;
 org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
 org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator;
 org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.Bucket;
-org.apache.hadoop.util.StringUtils;
+org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
 
 
 
@@ -245,12 +245,14 @@ org.apache.hadoop.util.StringUtils;
 
 
 Size
-<% 
StringUtils.humanReadableInt(cacheConfig.getBlockCache().getCurrentSize()) 
%>
+<% 
TraditionalBinaryPrefix.long2String(cacheConfig.getBlockCache().getCurrentSize(),
+"B", 1) %>
 Current size of block cache in use (bytes)
 
 
 Free
-<% 
StringUtils.humanReadableInt(cacheConfig.getBlockCache().getFreeSize()) %>
+<% 
TraditionalBinaryPrefix.long2String(cacheConfig.getBlockCache().getFreeSize(),
+"B", 1) %>
 The total free memory currently available to store more cache

[14/52] [abbrv] hbase git commit: HBASE-15538 Implement secure async protobuf wal writer

HBASE-15538 Implement secure async protobuf wal writer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d6fd8594
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d6fd8594
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d6fd8594

Branch: refs/heads/HBASE-14850
Commit: d6fd85945130516ba10fe4854ce080e5a2329983
Parents: 9d56105
Author: zhangduo 
Authored: Tue Mar 29 23:02:41 2016 +0800
Committer: zhangduo 
Committed: Thu Mar 31 10:33:35 2016 +0800

--
 .../wal/AbstractProtobufLogWriter.java  | 89 +++-
 .../regionserver/wal/ProtobufLogReader.java |  1 +
 .../wal/SecureAsyncProtobufLogWriter.java   | 54 
 .../wal/SecureProtobufLogReader.java|  2 +
 .../wal/SecureProtobufLogWriter.java| 64 ++
 .../hadoop/hbase/wal/AsyncFSWALProvider.java| 27 --
 .../regionserver/wal/InstrumentedLogWriter.java |  5 ++
 .../wal/TestSecureAsyncWALReplay.java   | 45 ++
 .../apache/hadoop/hbase/wal/IOTestProvider.java |  5 ++
 .../apache/hadoop/hbase/wal/TestSecureWAL.java  | 50 +--
 10 files changed, 266 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d6fd8594/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
index 66f1f54..66fb672 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
@@ -22,8 +22,12 @@ import static 
org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader.WAL_TRA
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.security.Key;
+import java.security.SecureRandom;
 import java.util.concurrent.atomic.AtomicLong;
 
+import javax.crypto.spec.SecretKeySpec;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -33,9 +37,16 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.codec.Codec;
+import org.apache.hadoop.hbase.io.crypto.Cipher;
+import org.apache.hadoop.hbase.io.crypto.Encryption;
+import org.apache.hadoop.hbase.io.crypto.Encryptor;
 import org.apache.hadoop.hbase.io.util.LRUDictionary;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALTrailer;
+import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.util.ByteStringer;
+import org.apache.hadoop.hbase.util.EncryptionTest;
 import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
@@ -63,10 +74,9 @@ public abstract class AbstractProtobufLogWriter {
 return WALCellCodec.create(conf, null, compressionContext);
   }
 
-  protected WALHeader buildWALHeader(Configuration conf, WALHeader.Builder 
builder)
-  throws IOException {
+  private WALHeader buildWALHeader0(Configuration conf, WALHeader.Builder 
builder) {
 if (!builder.hasWriterClsName()) {
-  builder.setWriterClsName(ProtobufLogWriter.class.getSimpleName());
+  builder.setWriterClsName(getWriterClassName());
 }
 if (!builder.hasCellCodecClsName()) {
   builder.setCellCodecClsName(WALCellCodec.getWALCellCodecClass(conf));
@@ -74,6 +84,60 @@ public abstract class AbstractProtobufLogWriter {
 return builder.build();
   }
 
+  protected WALHeader buildWALHeader(Configuration conf, WALHeader.Builder 
builder)
+  throws IOException {
+return buildWALHeader0(conf, builder);
+  }
+
+  // should be called in sub classes's buildWALHeader method to build 
WALHeader for secure
+  // environment. Do not forget to override the setEncryptor method as it will 
be called in this
+  // method to init your encryptor.
+  protected final WALHeader buildSecureWALHeader(Configuration conf, 
WALHeader.Builder builder)
+  throws IOException {
+builder.setWriterClsName(getWriterClassName());
+if (conf.getBoolean(HConstants.ENABLE_WAL_ENCRYPTION, false)) {
+  EncryptionTest.testKeyProvider(conf);
+  EncryptionTest.testCipherProvider(conf);
+
+  // Get an instance of our cipher
+  final String cipherName =
+  conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, 
HConstants.CIPHER_AES);
+  Ciph

[36/52] [abbrv] hbase git commit: HBASE-15592 Print Procedure WAL content

HBASE-15592 Print Procedure WAL content


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ac8cd373
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ac8cd373
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ac8cd373

Branch: refs/heads/HBASE-14850
Commit: ac8cd373ebe81ed24cab6737154c6902c05ff059
Parents: 3826894
Author: Jerry He 
Authored: Wed Apr 6 21:42:38 2016 -0700
Committer: Jerry He 
Committed: Wed Apr 6 21:49:07 2016 -0700

--
 .../hadoop/hbase/procedure2/Procedure.java  |  33 
 .../store/wal/ProcedureWALPrettyPrinter.java| 189 +++
 2 files changed, 222 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ac8cd373/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index aff2b15..781bad9 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -205,6 +205,16 @@ public abstract class Procedure implements 
Comparable {
 
   @Override
   public String toString() {
+// Return the simple String presentation of the procedure.
+return toStringSimpleSB().toString();
+  }
+
+  /**
+   * Build the StringBuilder for the simple form of
+   * procedure string.
+   * @return the StringBuilder
+   */
+  protected StringBuilder toStringSimpleSB() {
 StringBuilder sb = new StringBuilder();
 toStringClassDetails(sb);
 
@@ -225,6 +235,29 @@ public abstract class Procedure implements 
Comparable {
 
 sb.append(" state=");
 toStringState(sb);
+
+return sb;
+  }
+
+  /**
+   * Extend the toString() information with more procedure
+   * details
+   */
+  public String toStringDetails() {
+StringBuilder sb = toStringSimpleSB();
+
+sb.append(" startTime=");
+sb.append(getStartTime());
+
+sb.append(" lastUpdate=");
+sb.append(getLastUpdate());
+
+if (stackIndexes != null) {
+  sb.append("\n");
+  sb.append("stackIndexes=");
+  sb.append(Arrays.toString(getStackIndexes()));
+}
+
 return sb.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ac8cd373/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
new file mode 100644
index 000..9c33ac2
--- /dev/null
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import 
org

[38/52] [abbrv] hbase git commit: HBASE-15400 Use DateTieredCompactor for Date Tiered Compaction (Clara Xiong)

http://git-wip-us.apache.org/repos/asf/hbase/blob/f60fc9d1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java
new file mode 100644
index 000..ecccbdd
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java
@@ -0,0 +1,325 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HConstants;
+import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
+import 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy;
+import 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestDateTieredCompactionPolicy extends TestCompactionPolicy {
+  ArrayList sfCreate(long[] minTimestamps, long[] maxTimestamps, 
long[] sizes)
+  throws IOException {
+ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge();
+EnvironmentEdgeManager.injectEdge(timeMachine);
+// Has to be  > 0 and < now.
+timeMachine.setValue(1);
+ArrayList ageInDisk = new ArrayList();
+for (int i = 0; i < sizes.length; i++) {
+  ageInDisk.add(0L);
+}
+
+ArrayList ret = Lists.newArrayList();
+for (int i = 0; i < sizes.length; i++) {
+  MockStoreFile msf =
+  new MockStoreFile(TEST_UTIL, TEST_FILE, sizes[i], ageInDisk.get(i), 
false, i);
+  msf.setTimeRangeTracker(new TimeRangeTracker(minTimestamps[i], 
maxTimestamps[i]));
+  ret.add(msf);
+}
+return ret;
+  }
+
+  @Override
+  protected void config() {
+super.config();
+
+// Set up policy
+conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY,
+  "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine");
+conf.setLong(CompactionConfiguration.MAX_AGE_MILLIS_KEY, 100);
+conf.setLong(CompactionConfiguration.INCOMING_WINDOW_MIN_KEY, 3);
+conf.setLong(CompactionConfiguration.BASE_WINDOW_MILLIS_KEY, 6);
+conf.setInt(CompactionConfiguration.WINDOWS_PER_TIER_KEY, 4);
+
conf.setBoolean(CompactionConfiguration.SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, 
false);
+
+// Special settings for compaction policy per window
+this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 
2);
+this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 
12);
+
this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 
1.2F);
+
+conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 20);
+conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 10);
+  }
+
+  void compactEquals(long now, ArrayList candidates, long[] 
expectedFileSizes,
+  long[] expectedBoundaries, boolean isMajor, boolean toCompact) throws 
IOException {
+ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge();
+EnvironmentEdgeManager.injectEdge(timeMachine);
+timeMachine.setValue(now);
+DateTieredCompactionRequest request;
+if (isMajor) {
+  for (StoreFile file : candidates) {
+((MockStoreFile)file).setIsMajor(true);
+  }
+  Assert.assertEquals(toCompact, ((DateTieredCompactionPolicy) 
store.storeEngine.getCompactionPolicy())
+.shouldPerformMajorCompaction(candidates));
+  request = (DateTieredCompactionRequest) ((DateTieredCompactionPolicy) 
store.storeEngine
+  .getCompactionPolicy()).selectMajorCompaction(c

[16/52] [abbrv] hbase git commit: HBASE-15571 Make MasterProcedureManagerHost accessible through MasterServices

HBASE-15571 Make MasterProcedureManagerHost accessible through MasterServices


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8c7f044e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8c7f044e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8c7f044e

Branch: refs/heads/HBASE-14850
Commit: 8c7f044efbb8a84561d28a221343c7b65263934e
Parents: bcc
Author: tedyu 
Authored: Thu Mar 31 15:45:46 2016 -0700
Committer: tedyu 
Committed: Thu Mar 31 15:45:46 2016 -0700

--
 .../main/java/org/apache/hadoop/hbase/master/HMaster.java   | 9 -
 .../org/apache/hadoop/hbase/master/MasterRpcServices.java   | 6 +++---
 .../java/org/apache/hadoop/hbase/master/MasterServices.java | 6 ++
 .../org/apache/hadoop/hbase/master/TestCatalogJanitor.java  | 6 ++
 4 files changed, 23 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8c7f044e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 9829a85..3b5af42 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -334,7 +334,7 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   // monitor for snapshot of hbase tables
   SnapshotManager snapshotManager;
   // monitor for distributed procedures
-  MasterProcedureManagerHost mpmHost;
+  private MasterProcedureManagerHost mpmHost;
 
   // it is assigned after 'initialized' guard set to true, so should be 
volatile
   private volatile MasterQuotaManager quotaManager;
@@ -2466,6 +2466,13 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 return this.snapshotManager;
   }
 
+  /**
+   * @return the underlying MasterProcedureManagerHost
+   */
+  public MasterProcedureManagerHost getMasterProcedureManagerHost() {
+return mpmHost;
+  }
+
   @Override
   public ClusterSchema getClusterSchema() {
 return this.clusterSchemaService;

http://git-wip-us.apache.org/repos/asf/hbase/blob/8c7f044e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 6a60c2c..cdadff4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -641,7 +641,7 @@ public class MasterRpcServices extends RSRpcServices
 try {
   master.checkInitialized();
   ProcedureDescription desc = request.getProcedure();
-  MasterProcedureManager mpm = master.mpmHost.getProcedureManager(
+  MasterProcedureManager mpm = 
master.getMasterProcedureManagerHost().getProcedureManager(
 desc.getSignature());
   if (mpm == null) {
 throw new ServiceException("The procedure is not registered: "
@@ -676,7 +676,7 @@ public class MasterRpcServices extends RSRpcServices
 try {
   master.checkInitialized();
   ProcedureDescription desc = request.getProcedure();
-  MasterProcedureManager mpm = master.mpmHost.getProcedureManager(
+  MasterProcedureManager mpm = 
master.getMasterProcedureManagerHost().getProcedureManager(
 desc.getSignature());
   if (mpm == null) {
 throw new ServiceException("The procedure is not registered: "
@@ -894,7 +894,7 @@ public class MasterRpcServices extends RSRpcServices
 try {
   master.checkInitialized();
   ProcedureDescription desc = request.getProcedure();
-  MasterProcedureManager mpm = master.mpmHost.getProcedureManager(
+  MasterProcedureManager mpm = 
master.getMasterProcedureManagerHost().getProcedureManager(
 desc.getSignature());
   if (mpm == null) {
 throw new ServiceException("The procedure is not registered: "

http://git-wip-us.apache.org/repos/asf/hbase/blob/8c7f044e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 4d71117..d6802fe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/

[07/52] [abbrv] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock - ADDENDUM for failing test

HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock - ADDENDUM for failing test


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/afdfd1bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/afdfd1bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/afdfd1bd

Branch: refs/heads/HBASE-14850
Commit: afdfd1bd9c938fa4b5c2aa9346e559167d550785
Parents: 7f39baf
Author: Enis Soztutar 
Authored: Tue Mar 29 15:02:18 2016 -0700
Committer: Enis Soztutar 
Committed: Tue Mar 29 15:02:18 2016 -0700

--
 .../org/apache/hadoop/hbase/client/HConnectionTestingUtility.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/afdfd1bd/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index dc1ecf1..24ef5b2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -125,6 +125,7 @@ public class HConnectionTestingUtility {
 Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn(
 RpcRetryingCallerFactory.instantiate(conf,
 RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, null));
+
Mockito.when(c.getRpcControllerFactory()).thenReturn(Mockito.mock(RpcControllerFactory.class));
 HTableInterface t = Mockito.mock(HTableInterface.class);
 Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t);
 ResultScanner rs = Mockito.mock(ResultScanner.class);



[20/52] [abbrv] hbase git commit: HBASE-15569 Make Bytes.toStringBinary faster

HBASE-15569 Make Bytes.toStringBinary faster

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ff6a3395
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ff6a3395
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ff6a3395

Branch: refs/heads/HBASE-14850
Commit: ff6a3395821fd1a7857b35b11d45b81743a75e61
Parents: 7d3a89c
Author: Junegunn Choi 
Authored: Thu Mar 31 13:20:26 2016 +0900
Committer: stack 
Committed: Thu Mar 31 21:23:44 2016 -0700

--
 .../main/java/org/apache/hadoop/hbase/util/Bytes.java  | 13 -
 1 file changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ff6a3395/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index aae6c4c..7b9eb0b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -626,6 +626,10 @@ public class Bytes implements Comparable {
 return toStringBinary(toBytes(buf));
   }
 
+  private static final char[] HEX_CHARS_UPPER = {
+'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 
'F'
+  };
+
   /**
* Write a printable representation of a byte array. Non-printable
* characters are hex escaped in the format \\x%02X, eg:
@@ -643,13 +647,12 @@ public class Bytes implements Comparable {
 if (off + len > b.length) len = b.length - off;
 for (int i = off; i < off + len ; ++i) {
   int ch = b[i] & 0xFF;
-  if ((ch >= '0' && ch <= '9')
-  || (ch >= 'A' && ch <= 'Z')
-  || (ch >= 'a' && ch <= 'z')
-  || " `~!@#$%^&*()-_=+[]{}|;:'\",.<>/?".indexOf(ch) >= 0) {
+  if (ch >= ' ' && ch <= '~' && ch != '\\') {
 result.append((char)ch);
   } else {
-result.append(String.format("\\x%02X", ch));
+result.append("\\x");
+result.append(HEX_CHARS_UPPER[ch / 0x10]);
+result.append(HEX_CHARS_UPPER[ch % 0x10]);
   }
 }
 return result.toString();



[40/52] [abbrv] hbase git commit: HBASE-15400 Use DateTieredCompactor for Date Tiered Compaction - drop TestDateTieredCompaction.java

HBASE-15400 Use DateTieredCompactor for Date Tiered Compaction - drop 
TestDateTieredCompaction.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a146a71a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a146a71a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a146a71a

Branch: refs/heads/HBASE-14850
Commit: a146a71a332fdd58f9bf4e748861f5b050a5f22f
Parents: f60fc9d
Author: tedyu 
Authored: Thu Apr 7 15:01:00 2016 -0700
Committer: tedyu 
Committed: Thu Apr 7 15:01:00 2016 -0700

--
 .../regionserver/TestDateTieredCompaction.java  | 211 ---
 1 file changed, 211 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a146a71a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompaction.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompaction.java
deleted file mode 100644
index cfb54b7..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompaction.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
-import 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestDateTieredCompaction extends TestCompactionPolicy {
-  ArrayList sfCreate(long[] minTimestamps, long[] maxTimestamps, 
long[] sizes)
-  throws IOException {
-ArrayList ageInDisk = new ArrayList();
-for (int i = 0; i < sizes.length; i++) {
-  ageInDisk.add(0L);
-}
-
-ArrayList ret = Lists.newArrayList();
-for (int i = 0; i < sizes.length; i++) {
-  MockStoreFile msf =
-  new MockStoreFile(TEST_UTIL, TEST_FILE, sizes[i], ageInDisk.get(i), 
false, i);
-  msf.setTimeRangeTracker(new TimeRangeTracker(minTimestamps[i], 
maxTimestamps[i]));
-  ret.add(msf);
-}
-return ret;
-  }
-
-  @Override
-  protected void config() {
-super.config();
-
-// Set up policy
-conf.setLong(CompactionConfiguration.MAX_AGE_MILLIS_KEY, 100);
-conf.setLong(CompactionConfiguration.INCOMING_WINDOW_MIN_KEY, 3);
-conf.setLong(CompactionConfiguration.BASE_WINDOW_MILLIS_KEY, 6);
-conf.setInt(CompactionConfiguration.WINDOWS_PER_TIER_KEY, 4);
-conf.set(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
-  DateTieredCompactionPolicy.class.getName());
-
-// Special settings for compaction policy per window
-this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 
2);
-this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 
12);
-
this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 
1.2F);
-  }
-
-  void compactEquals(long now, ArrayList candidates, long... 
expected)
-  throws IOException {
-Assert.assertTrue(((DateTieredCompactionPolicy) 
store.storeEngine.getCompactionPolicy())
-.needsCompaction(candidates, ImmutableList. of(), now));
-
-List actual =
-((DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy())
-.applyCompactionPolicy(candidates, false, false, now);
-
-Assert.assertEquals(Arrays.toString(expected), 
Arrays.toString(getSizes(actual)));
-  }
-
-  /**
-   * Test for incoming window
-   * @throws IOException with error
-   */
-  @Test
-  public void incomingWindow(

[27/52] [abbrv] hbase git commit: HBASE-15568 Procedure V2 - Remove CreateTableHandler in HBase Apache 2.0 release (Stephen Yuan Jiang)

HBASE-15568 Procedure V2 - Remove CreateTableHandler in HBase Apache 2.0 
release (Stephen Yuan Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/89d75016
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/89d75016
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/89d75016

Branch: refs/heads/HBASE-14850
Commit: 89d750166d32f1505c5278fd07d4fb575d6caa3d
Parents: 2d8e0a0
Author: Stephen Yuan Jiang 
Authored: Fri Apr 1 01:31:09 2016 -0700
Committer: Stephen Yuan Jiang 
Committed: Fri Apr 1 01:31:09 2016 -0700

--
 .../master/handler/CreateTableHandler.java  | 311 ---
 .../master/handler/TestCreateTableHandler.java  | 178 ---
 2 files changed, 489 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/89d75016/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
deleted file mode 100644
index b884544..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
+++ /dev/null
@@ -1,311 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.handler;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableDescriptor;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.TableState;
-import org.apache.hadoop.hbase.executor.EventHandler;
-import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.ipc.RpcServer;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-import org.apache.hadoop.hbase.master.MasterFileSystem;
-import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.ModifyRegionUtils;
-import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-
-/**
- * Handler to create a table.
- */
-@InterfaceAudience.Private
-public class CreateTableHandler extends EventHandler {
-  private static final Log LOG = LogFactory.getLog(CreateTableHandler.class);
-  protected final MasterFileSystem fileSystemManager;
-  protected final HTableDescriptor hTableDescriptor;
-  protected final Configuration conf;
-  private final AssignmentManager assignmentManager;
-  private final TableLockManager tableLockManager;
-  private final HRegionInfo [] newRegions;
-  private final MasterServices masterServices;
-  private final TableLock tableLock;
-  private User activeUser;
-
-  public CreateTableHandler(Server server, MasterFileSystem fileSystemManager,
-  

[48/52] [abbrv] hbase git commit: HBASE-15418 Clean up un-used warning in test util

HBASE-15418 Clean up un-used warning in test util


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a2291fb3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a2291fb3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a2291fb3

Branch: refs/heads/HBASE-14850
Commit: a2291fb3a3231b83e95ed4cae71c0ca63ca7b25d
Parents: 6536802
Author: Elliott Clark 
Authored: Fri Mar 25 15:44:06 2016 -0700
Committer: Elliott Clark 
Committed: Fri Apr 8 12:31:26 2016 -0700

--
 .../bin/start_local_hbase_and_wait.sh   |  9 +-
 .../bin/stop_local_hbase_and_wait.sh|  2 +-
 hbase-native-client/core/BUCK   |  6 
 .../core/native-client-test-env.cc  |  9 --
 hbase-native-client/core/test_env.h | 32 
 5 files changed, 15 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a2291fb3/hbase-native-client/bin/start_local_hbase_and_wait.sh
--
diff --git a/hbase-native-client/bin/start_local_hbase_and_wait.sh 
b/hbase-native-client/bin/start_local_hbase_and_wait.sh
index 64d0b68..cfc71f9 100755
--- a/hbase-native-client/bin/start_local_hbase_and_wait.sh
+++ b/hbase-native-client/bin/start_local_hbase_and_wait.sh
@@ -17,10 +17,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# Clean up from any other tests.
+rm -rf /tmp/hbase-*
+
+# Start the master/regionservers.
 $PWD/../bin/start-hbase.sh
 
-until [ $(curl -s -o /dev/null -I -w "%{http_code}" http://localhost:16010) == 
"200" ]
+until [ $(curl -s -o /dev/null -I -w "%{http_code}" 
http://localhost:16010/jmx) == "200" ]
 do
  printf "Waiting for local HBase cluster to start\n"
  sleep 1
 done
+
+# This sucks, but master can easily be up and meta not be assigned yet.
+sleep 30

http://git-wip-us.apache.org/repos/asf/hbase/blob/a2291fb3/hbase-native-client/bin/stop_local_hbase_and_wait.sh
--
diff --git a/hbase-native-client/bin/stop_local_hbase_and_wait.sh 
b/hbase-native-client/bin/stop_local_hbase_and_wait.sh
index 4e89334..761412a 100755
--- a/hbase-native-client/bin/stop_local_hbase_and_wait.sh
+++ b/hbase-native-client/bin/stop_local_hbase_and_wait.sh
@@ -17,7 +17,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-$PWD/../bin/stop-hbase.sh
+ps aux | grep proc_master | awk '{print $2}' | xargs kill -9
 
 while [ $(curl -s -o /dev/null -I -w "%{http_code}" http://localhost:16010) == 
"200" ]
 do

http://git-wip-us.apache.org/repos/asf/hbase/blob/a2291fb3/hbase-native-client/core/BUCK
--
diff --git a/hbase-native-client/core/BUCK b/hbase-native-client/core/BUCK
index 817b5a0..d1e89d1 100644
--- a/hbase-native-client/core/BUCK
+++ b/hbase-native-client/core/BUCK
@@ -51,9 +51,6 @@ cxx_library(name="core",
 ], )
 
 cxx_test(name="simple-test",
- headers=[
- "test_env.h",
- ],
  srcs=[
  "native-client-test-env.cc",
  "simple-native-client-test.cc",
@@ -63,9 +60,6 @@ cxx_test(name="simple-test",
  ],
  run_test_separately=True, )
 cxx_test(name="location-cache-test",
- headers=[
- "test_env.h",
- ],
  srcs=[
  "native-client-test-env.cc",
  "location-cache-test.cc",

http://git-wip-us.apache.org/repos/asf/hbase/blob/a2291fb3/hbase-native-client/core/native-client-test-env.cc
--
diff --git a/hbase-native-client/core/native-client-test-env.cc 
b/hbase-native-client/core/native-client-test-env.cc
index a86961f..07f30a6 100644
--- a/hbase-native-client/core/native-client-test-env.cc
+++ b/hbase-native-client/core/native-client-test-env.cc
@@ -18,18 +18,21 @@
  */
 
 #include 
-#include 
 
 namespace {
 
 class NativeClientTestEnv : public ::testing::Environment {
  public:
   void SetUp() override {
-init_test_env();
+// start local HBase cluster to be reused by all tests
+auto result = system("bin/start_local_hbase_and_wait.sh");
+ASSERT_EQ(0, result);
   }
 
   void TearDown() override {
-clean_test_env();
+// shutdown local HBase cluster
+auto result = system("bin/stop_local_hbase_and_wait.sh");
+ASSERT_EQ(0, result);
   }
 };
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a2291fb3/hbase-native-client/core/test_env.h
--
diff --git a/hbase-native-client/core/test_env.h 
b/hbase-native-client/core/test_env.h
de

[24/52] [abbrv] hbase git commit: HBASE-15521 Procedure V2 - RestoreSnapshot and CloneSnapshot (Stephen Yuan Jiang)

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1d5c3d2/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index d40c1f7..9ed9d7a 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -1380,6 +1380,224 @@ public final class MasterProcedureProtos {
   }
 
   /**
+   * Protobuf enum {@code hbase.pb.CloneSnapshotState}
+   */
+  public enum CloneSnapshotState
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * CLONE_SNAPSHOT_PRE_OPERATION = 1;
+ */
+CLONE_SNAPSHOT_PRE_OPERATION(0, 1),
+/**
+ * CLONE_SNAPSHOT_WRITE_FS_LAYOUT = 2;
+ */
+CLONE_SNAPSHOT_WRITE_FS_LAYOUT(1, 2),
+/**
+ * CLONE_SNAPSHOT_ADD_TO_META = 3;
+ */
+CLONE_SNAPSHOT_ADD_TO_META(2, 3),
+/**
+ * CLONE_SNAPSHOT_ASSIGN_REGIONS = 4;
+ */
+CLONE_SNAPSHOT_ASSIGN_REGIONS(3, 4),
+/**
+ * CLONE_SNAPSHOT_UPDATE_DESC_CACHE = 5;
+ */
+CLONE_SNAPSHOT_UPDATE_DESC_CACHE(4, 5),
+/**
+ * CLONE_SNAPSHOT_POST_OPERATION = 6;
+ */
+CLONE_SNAPSHOT_POST_OPERATION(5, 6),
+;
+
+/**
+ * CLONE_SNAPSHOT_PRE_OPERATION = 1;
+ */
+public static final int CLONE_SNAPSHOT_PRE_OPERATION_VALUE = 1;
+/**
+ * CLONE_SNAPSHOT_WRITE_FS_LAYOUT = 2;
+ */
+public static final int CLONE_SNAPSHOT_WRITE_FS_LAYOUT_VALUE = 2;
+/**
+ * CLONE_SNAPSHOT_ADD_TO_META = 3;
+ */
+public static final int CLONE_SNAPSHOT_ADD_TO_META_VALUE = 3;
+/**
+ * CLONE_SNAPSHOT_ASSIGN_REGIONS = 4;
+ */
+public static final int CLONE_SNAPSHOT_ASSIGN_REGIONS_VALUE = 4;
+/**
+ * CLONE_SNAPSHOT_UPDATE_DESC_CACHE = 5;
+ */
+public static final int CLONE_SNAPSHOT_UPDATE_DESC_CACHE_VALUE = 5;
+/**
+ * CLONE_SNAPSHOT_POST_OPERATION = 6;
+ */
+public static final int CLONE_SNAPSHOT_POST_OPERATION_VALUE = 6;
+
+
+public final int getNumber() { return value; }
+
+public static CloneSnapshotState valueOf(int value) {
+  switch (value) {
+case 1: return CLONE_SNAPSHOT_PRE_OPERATION;
+case 2: return CLONE_SNAPSHOT_WRITE_FS_LAYOUT;
+case 3: return CLONE_SNAPSHOT_ADD_TO_META;
+case 4: return CLONE_SNAPSHOT_ASSIGN_REGIONS;
+case 5: return CLONE_SNAPSHOT_UPDATE_DESC_CACHE;
+case 6: return CLONE_SNAPSHOT_POST_OPERATION;
+default: return null;
+  }
+}
+
+public static com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static com.google.protobuf.Internal.EnumLiteMap
+internalValueMap =
+  new com.google.protobuf.Internal.EnumLiteMap() {
+public CloneSnapshotState findValueByNumber(int number) {
+  return CloneSnapshotState.valueOf(number);
+}
+  };
+
+public final com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(index);
+}
+public final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(12);
+}
+
+private static final CloneSnapshotState[] VALUES = values();
+
+public static CloneSnapshotState valueOf(
+com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int index;
+private final int value;
+
+private CloneSnapshotState(int index, int value) {
+  this.index = index;
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.CloneSnapshotState)
+  }
+
+  /**
+   * Protobuf enum {@code hbase.pb.RestoreSnapshotState}
+   */
+  public enum RestoreSnapshotState
+  implements com.google.protobuf.ProtocolMessageEnum {
+/**
+ * RESTORE_SNAPSHOT_PRE_OPERATION = 1;
+ */
+RESTORE_SNAPSHOT_PRE_OPERATION(0, 1),
+/**
+ * RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR = 2;
+ */
+RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR(1, 2),
+/**
+ * RESTORE_SNAPSHOT_WRITE_FS_LAYOUT = 3;
+ */
+RESTORE_SNAPSHOT_WRITE_FS_LAYOUT(2, 3),
+  

[39/52] [abbrv] hbase git commit: HBASE-15400 Use DateTieredCompactor for Date Tiered Compaction (Clara Xiong)

HBASE-15400 Use DateTieredCompactor for Date Tiered Compaction (Clara Xiong)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f60fc9d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f60fc9d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f60fc9d1

Branch: refs/heads/HBASE-14850
Commit: f60fc9d1a0625970aa2fd14d29e4c1266f9571b7
Parents: d393603
Author: tedyu 
Authored: Thu Apr 7 14:58:59 2016 -0700
Committer: tedyu 
Committed: Thu Apr 7 14:58:59 2016 -0700

--
 .../regionserver/DateTieredStoreEngine.java | 102 ++
 .../hadoop/hbase/regionserver/HStore.java   |   2 +-
 .../hadoop/hbase/regionserver/StoreFile.java|  34 +-
 .../compactions/CompactionConfiguration.java|  10 +
 .../compactions/CompactionPolicy.java   |   2 +-
 .../compactions/CompactionRequest.java  |  16 +-
 .../compactions/DateTieredCompactionPolicy.java | 358 +--
 .../DateTieredCompactionRequest.java|  44 +++
 .../compactions/ExploringCompactionPolicy.java  |   4 +-
 .../compactions/FIFOCompactionPolicy.java   |   5 +-
 .../compactions/RatioBasedCompactionPolicy.java | 318 
 .../compactions/SortedCompactionPolicy.java | 239 +
 .../compactions/StripeCompactionPolicy.java |   3 +-
 .../hbase/regionserver/MockStoreFile.java   |  40 ++-
 .../TestDateTieredCompactionPolicy.java | 325 +
 .../compactions/EverythingPolicy.java   |   2 +-
 16 files changed, 1102 insertions(+), 402 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f60fc9d1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
new file mode 100644
index 000..773baab
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy;
+import 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest;
+import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactor;
+import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
+import org.apache.hadoop.hbase.security.User;
+
+/**
+ * HBASE-15400 This store engine allows us to store data in date tiered layout 
with exponential
+ * sizing so that the more recent data has more granularity. Time-range scan 
will perform the
+ * best with most recent data. When data reach maxAge, they are compacted in 
fixed-size time
+ * windows for TTL and archiving. Please refer to design spec for more details.
+ * 
https://docs.google.com/document/d/1_AmlNb2N8Us1xICsTeGDLKIqL6T-oHoRLZ323MG_uy8/edit#heading=h.uk6y5pd3oqgx
+ */
+@InterfaceAudience.Private
+public class DateTieredStoreEngine extends StoreEngine {
+  @Override
+  public boolean needsCompaction(List filesCompacting) {
+return compactionPolicy.needsCompaction(storeFileManager.getStorefiles(),
+  filesCompacting);
+  }
+
+  @Override
+  public CompactionContext createCompaction() throws IOException {
+return new DateTieredCompactionContext();
+  }
+
+  @Override
+  protected void createComponents(Configuration conf, Store store, 
CellComparator kvComparator)
+ 

[25/52] [abbrv] hbase git commit: HBASE-15521 Procedure V2 - RestoreSnapshot and CloneSnapshot (Stephen Yuan Jiang)

HBASE-15521 Procedure V2 - RestoreSnapshot and CloneSnapshot (Stephen Yuan 
Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e1d5c3d2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e1d5c3d2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e1d5c3d2

Branch: refs/heads/HBASE-14850
Commit: e1d5c3d269129cf8c313ba29d9a9f694b799170b
Parents: ff6a339
Author: Stephen Yuan Jiang 
Authored: Thu Mar 31 21:49:13 2016 -0700
Committer: Stephen Yuan Jiang 
Committed: Thu Mar 31 21:49:13 2016 -0700

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   36 +-
 .../hbase/client/ConnectionImplementation.java  |7 -
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  199 +-
 .../hbase/procedure2/ProcedureExecutor.java |2 +-
 .../generated/MasterProcedureProtos.java| 5804 +-
 .../hbase/protobuf/generated/MasterProtos.java  | 1040 ++--
 hbase-protocol/src/main/protobuf/Master.proto   |8 +-
 .../src/main/protobuf/MasterProcedure.proto |   40 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   32 +-
 .../procedure/CloneSnapshotProcedure.java   |  522 ++
 .../master/procedure/CreateTableProcedure.java  |   16 +-
 .../procedure/MasterDDLOperationHelper.java |   13 +
 .../procedure/RestoreSnapshotProcedure.java |  526 ++
 .../master/snapshot/CloneSnapshotHandler.java   |  195 -
 .../master/snapshot/RestoreSnapshotHandler.java |  245 -
 .../hbase/master/snapshot/SnapshotManager.java  |  330 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |   14 +-
 .../client/TestSnapshotCloneIndependence.java   |1 -
 .../MasterProcedureTestingUtility.java  |4 +-
 .../procedure/TestCloneSnapshotProcedure.java   |  239 +
 .../procedure/TestCreateTableProcedure.java |4 +-
 .../procedure/TestDeleteTableProcedure.java |8 +-
 .../TestMasterFailoverWithProcedures.java   |2 +-
 .../master/procedure/TestProcedureAdmin.java|2 +-
 .../procedure/TestRestoreSnapshotProcedure.java |  291 +
 25 files changed, 8200 insertions(+), 1380 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e1d5c3d2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index c3b524b..54d2cb9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1330,6 +1330,22 @@ public interface Admin extends Abortable, Closeable {
   void restoreSnapshot(final String snapshotName) throws IOException, 
RestoreSnapshotException;
 
   /**
+   * Restore the specified snapshot on the original table. (The table must be 
disabled) If the
+   * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is 
set to true, a
+   * snapshot of the current table is taken before executing the restore 
operation. In case of
+   * restore failure, the failsafe snapshot will be restored. If the restore 
completes without
+   * problem the failsafe snapshot is deleted.
+   *
+   * @param snapshotName name of the snapshot to restore
+   * @throws IOException if a remote or network exception occurs
+   * @throws RestoreSnapshotException if snapshot failed to be restored
+   * @return the result of the async restore snapshot. You can use 
Future.get(long, TimeUnit)
+   *to wait on the operation to complete.
+   */
+  Future restoreSnapshotAsync(final String snapshotName)
+  throws IOException, RestoreSnapshotException;
+
+  /**
* Restore the specified snapshot on the original table. (The table must be 
disabled) If
* 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is 
taken before
* executing the restore operation. In case of restore failure, the failsafe 
snapshot will be
@@ -1360,7 +1376,7 @@ public interface Admin extends Abortable, Closeable {
* @throws RestoreSnapshotException if snapshot failed to be restored
* @throws IllegalArgumentException if the restore request is formatted 
incorrectly
*/
-  void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
+  void restoreSnapshot(final String snapshotName, final boolean 
takeFailSafeSnapshot)
   throws IOException, RestoreSnapshotException;
 
   /**
@@ -1390,6 +1406,24 @@ public interface Admin extends Abortable, Closeable {
   throws IOException, TableExistsException, RestoreSnapshotException;
 
   /**
+   * Create a new table by cloning the snapshot content, but does not block
+   * and wait for it be completely cloned.
+   * You can use Future.get(long, TimeUnit) to wait on the o

[03/52] [abbrv] hbase git commit: HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing a deadlock

HBASE-15295 MutateTableAccess.multiMutate() does not get high priority causing 
a deadlock


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/05200976
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/05200976
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/05200976

Branch: refs/heads/HBASE-14850
Commit: 05200976110135abb60f9b879b9b830671c07141
Parents: cbf9c1e
Author: Enis Soztutar 
Authored: Wed Mar 23 12:30:41 2016 -0700
Committer: Enis Soztutar 
Committed: Mon Mar 28 17:56:32 2016 -0700

--
 .../org/apache/hadoop/hbase/HRegionInfo.java|   1 +
 .../hbase/client/BufferedMutatorImpl.java   |   2 +-
 .../hadoop/hbase/client/ClusterConnection.java  |  20 +-
 .../hbase/client/ConnectionConfiguration.java   | 144 ++
 .../hbase/client/ConnectionImplementation.java  |  69 ++-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 511 +--
 .../org/apache/hadoop/hbase/client/HTable.java  |  55 +-
 .../hadoop/hbase/client/TableConfiguration.java | 144 --
 .../hadoop/hbase/ipc/AbstractRpcClient.java |   2 +-
 .../hadoop/hbase/ipc/CoprocessorRpcChannel.java |  11 +-
 .../hbase/ipc/MasterCoprocessorRpcChannel.java  |  18 +-
 .../hbase/ipc/RegionCoprocessorRpcChannel.java  |  46 +-
 .../ipc/RegionServerCoprocessorRpcChannel.java  |  10 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 168 +++---
 .../security/access/AccessControlClient.java|  46 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  18 +-
 .../hbase/client/TestSnapshotFromAdmin.java |  31 +-
 .../hadoop/hbase/DistributedHBaseCluster.java   |   4 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.jamon  |   2 +-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java |   5 +-
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   |   2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  12 +-
 .../hbase/master/RegionPlacementMaintainer.java |   2 +-
 .../hadoop/hbase/master/ServerManager.java  |  34 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |   4 +-
 .../hadoop/hbase/TestGlobalMemStoreSize.java|  14 +-
 .../hadoop/hbase/TestMetaTableAccessor.java |  79 +++
 .../hadoop/hbase/TestMetaTableLocator.java  |   8 +-
 .../hbase/client/HConnectionTestingUtility.java |   5 +
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   4 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   6 +-
 .../hbase/client/TestFromClientSide3.java   |  10 +-
 .../hbase/client/TestHBaseAdminNoCluster.java   |  10 +
 .../client/TestScannersFromClientSide.java  |   6 +-
 .../hbase/ipc/DelegatingRpcScheduler.java   |  76 +++
 .../TestLoadIncrementalHFilesSplitRecovery.java |   3 +-
 .../hbase/master/TestClockSkewDetection.java|  20 +-
 .../regionserver/TestRegionServerNoMaster.java  |   2 +-
 .../hbase/security/access/SecureTestUtil.java   |  12 +-
 .../security/access/TestAccessController.java   |  20 +-
 .../security/access/TestNamespaceCommands.java  |  13 +-
 41 files changed, 1095 insertions(+), 554 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 13ba23d..71f87f7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -202,6 +202,7 @@ public class HRegionInfo implements Comparable 
{
   public final static byte[] HIDDEN_START_KEY = 
Bytes.toBytes("hidden-start-key");
 
   /** HRegionInfo for first meta region */
+  // TODO: How come Meta regions still do not have encoded region names? Fix.
   public static final HRegionInfo FIRST_META_REGIONINFO =
   new HRegionInfo(1L, TableName.META_TABLE_NAME);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/05200976/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index ef3f7e9..01aaec5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -88,7 +88,7 @@ public class BufferedMutatorImpl implements BufferedMutator {
 this.pool = params.getPool();
 this.listener = params.getListener();
 
-TableConfiguration tableConf = new TableConfiguration(conf);
+ConnectionConfiguration tableConf = new Connec

[17/52] [abbrv] hbase git commit: HBASE-15572 Adding optional timestamp semantics to HBase-Spark (Weiqing Yang)

HBASE-15572 Adding optional timestamp semantics to HBase-Spark (Weiqing Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eec27ad7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eec27ad7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eec27ad7

Branch: refs/heads/HBASE-14850
Commit: eec27ad7ef7b5078f705301bd3042991d4d4b4d9
Parents: 8c7f044
Author: tedyu 
Authored: Thu Mar 31 19:08:33 2016 -0700
Committer: tedyu 
Committed: Thu Mar 31 19:08:33 2016 -0700

--
 .../hadoop/hbase/spark/DefaultSource.scala  |   8 +-
 .../spark/datasources/HBaseSparkConf.scala  |   5 +
 .../spark/datasources/HBaseTableScanRDD.scala   |  26 +
 .../hadoop/hbase/spark/DefaultSourceSuite.scala | 105 ---
 4 files changed, 130 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eec27ad7/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
--
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
index 7970816..c71ee4e 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
@@ -88,6 +88,12 @@ case class HBaseRelation (
 userSpecifiedSchema: Option[StructType]
   )(@transient val sqlContext: SQLContext)
   extends BaseRelation with PrunedFilteredScan  with InsertableRelation  with 
Logging {
+
+  val timestamp = parameters.get(HBaseSparkConf.TIMESTAMP).map(_.toLong)
+  val minTimeStamp = parameters.get(HBaseSparkConf.MIN_TIMESTAMP).map(_.toLong)
+  val maxTimeStamp = parameters.get(HBaseSparkConf.MAX_TIMESTAMP).map(_.toLong)
+  val maxVersions = parameters.get(HBaseSparkConf.MAX_VERSIONS).map(_.toInt)
+
   val catalog = HBaseTableCatalog(parameters)
   def tableName = catalog.name
   val configResources = 
parameters.getOrElse(HBaseSparkConf.HBASE_CONFIG_RESOURCES_LOCATIONS, "")
@@ -204,7 +210,7 @@ case class HBaseRelation (
 System.arraycopy(x, 0, rBytes, offset, x.length)
 offset += x.length
   }
-  val put = new Put(rBytes)
+  val put = timestamp.fold(new Put(rBytes))(new Put(rBytes, _))
 
   colsIdxedFields.foreach { case (x, y) =>
 val b = Utils.toBytes(row(x), y)

http://git-wip-us.apache.org/repos/asf/hbase/blob/eec27ad7/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
--
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
index ca44d42..2e4c0b3 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseSparkConf.scala
@@ -36,4 +36,9 @@ object HBaseSparkConf{
   val USE_HBASE_CONTEXT = "hbase.use.hbase.context"
   val PUSH_DOWN_COLUMN_FILTER = "hbase.pushdown.column.filter"
   val defaultPushDownColumnFilter = true
+
+  val TIMESTAMP = "timestamp"
+  val MIN_TIMESTAMP = "minTimestamp"
+  val MAX_TIMESTAMP = "maxTimestamp"
+  val MAX_VERSIONS = "maxVersions"
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/eec27ad7/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
--
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
index 2e05651..886114a 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
@@ -105,6 +105,7 @@ class HBaseTableScanRDD(relation: HBaseRelation,
   val gets = new ArrayList[Get]()
   x.foreach{ y =>
 val g = new Get(y)
+handleTimeSemantics(g)
 columns.foreach { d =>
   if (!d.isRowKey) {
 g.addColumn(d.cfBytes, d.colBytes)
@@ -157,6 +158,7 @@ class HBaseTableScanRDD(relation: HBaseRelation,
   case (Some(Bound(a, b)), None) => new Scan(a)
   case (None, None) => new Scan()
 }
+handleTimeSemantics(scan)
 
 columns.foreach { d =>
   if (!d.isRowKey) {
@@ -226,6 +228,30 @@ class HBaseTableScanRDD(relation: HBaseRelation,
 } ++ gIt
 rIts
   }
+
+  private def handleTimeSemantics(query: Query): Unit = {

[42/52] [abbrv] hbase git commit: HBASE-15537 Make multi WAL work with WALs other than FSHLog

HBASE-15537 Make multi WAL work with WALs other than FSHLog


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/394b89d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/394b89d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/394b89d1

Branch: refs/heads/HBASE-14850
Commit: 394b89d153a9bef67a84633f4ff68aff26d53832
Parents: 2dcd08b
Author: zhangduo 
Authored: Wed Apr 6 17:04:28 2016 +0800
Committer: zhangduo 
Committed: Fri Apr 8 10:36:16 2016 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   4 +
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   4 +-
 .../hbase/wal/RegionGroupingProvider.java   | 138 ---
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  30 ++--
 ...ReplicationEndpointWithMultipleAsyncWAL.java |  36 +
 .../TestReplicationEndpointWithMultipleWAL.java |   2 +
 ...lMasterRSCompressedWithMultipleAsyncWAL.java |  37 +
 ...onKillMasterRSCompressedWithMultipleWAL.java |   2 +
 ...plicationSyncUpToolWithMultipleAsyncWAL.java |  37 +
 ...estReplicationSyncUpToolWithMultipleWAL.java |   2 +
 .../wal/TestBoundedRegionGroupingStrategy.java  | 131 ++
 11 files changed, 273 insertions(+), 150 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/394b89d1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index b89488a..e4c4eb3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -678,6 +678,10 @@ public abstract class AbstractFSWAL implements WAL {
 // NewPath could be equal to oldPath if replaceWriter fails.
 newPath = replaceWriter(oldPath, newPath, nextWriter);
 tellListenersAboutPostLogRoll(oldPath, newPath);
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Create new " + getClass().getSimpleName() + " writer with 
pipeline: "
+  + Arrays.toString(getPipeline()));
+}
 // Can we delete any of the old log files?
 if (getNumRolledLogFiles() > 0) {
   cleanOldLogs();

http://git-wip-us.apache.org/repos/asf/hbase/blob/394b89d1/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
index 2f5c299..e495e99 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
@@ -82,7 +82,7 @@ public abstract class AbstractFSWALProvider> implemen
* @param factory factory that made us, identity used for FS layout. may not 
be null
* @param conf may not be null
* @param listeners may be null
-   * @param providerId differentiate between providers from one facotry, used 
for FS layout. may be
+   * @param providerId differentiate between providers from one factory, used 
for FS layout. may be
*  null
*/
   @Override
@@ -109,7 +109,7 @@ public abstract class AbstractFSWALProvider> implemen
   }
 
   @Override
-  public WAL getWAL(byte[] identifier, byte[] namespace) throws IOException {
+  public T getWAL(byte[] identifier, byte[] namespace) throws IOException {
 T walCopy = wal;
 if (walCopy == null) {
   // only lock when need to create wal, and need to lock since

http://git-wip-us.apache.org/repos/asf/hbase/blob/394b89d1/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
index 0aeaccf..b447e94 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/RegionGroupingProvider.java
@@ -18,34 +18,31 @@
  */
 package org.apache.hadoop.hbase.wal;
 
-import static 
org.apache.hadoop.hbase.wal.DefaultWALProvider.META_WAL_PROVIDER_ID;
-import static 
org.apache.hadoop.hbase.wal.DefaultWALProvider.WAL_FILE_NAME_DELIMITER;
+import static 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider.META_WAL_PROVIDER_ID

[52/52] [abbrv] hbase git commit: HBASE-14853 Add on protobuf to c++ chain

HBASE-14853 Add on protobuf to c++ chain


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b2887761
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b2887761
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b2887761

Branch: refs/heads/HBASE-14850
Commit: b2887761504f272fab2339168b706232e1f96728
Parents: 33cabe4
Author: Elliott Clark 
Authored: Mon Dec 28 15:33:52 2015 -0800
Committer: Elliott Clark 
Committed: Fri Apr 8 12:31:26 2016 -0700

--
 hbase-native-client/bin/start-docker.sh |   1 +
 hbase-native-client/core/BUCK   |   1 +
 hbase-native-client/core/client.cc  |   4 +
 hbase-native-client/core/client.h   |   2 +
 hbase-native-client/if/AccessControl.proto  | 123 +++
 hbase-native-client/if/Admin.proto  | 309 
 hbase-native-client/if/Aggregate.proto  |  63 ++
 hbase-native-client/if/Authentication.proto |  82 ++
 hbase-native-client/if/BUCK |  36 +
 hbase-native-client/if/Cell.proto   |  67 ++
 hbase-native-client/if/Client.proto | 472 +++
 hbase-native-client/if/ClusterId.proto  |  34 +
 hbase-native-client/if/ClusterStatus.proto  | 224 ++
 hbase-native-client/if/Comparator.proto |  74 ++
 hbase-native-client/if/Encryption.proto |  33 +
 hbase-native-client/if/ErrorHandling.proto  |  58 ++
 hbase-native-client/if/FS.proto |  45 ++
 hbase-native-client/if/Filter.proto | 170 
 hbase-native-client/if/HBase.proto  | 258 ++
 hbase-native-client/if/HFile.proto  |  49 ++
 hbase-native-client/if/LoadBalancer.proto   |  29 +
 hbase-native-client/if/MapReduce.proto  |  37 +
 hbase-native-client/if/Master.proto | 778 +++
 hbase-native-client/if/MasterProcedure.proto| 245 ++
 hbase-native-client/if/MultiRowMutation.proto   |  45 ++
 hbase-native-client/if/Procedure.proto  | 119 +++
 hbase-native-client/if/Quota.proto  |  76 ++
 hbase-native-client/if/RPC.proto| 136 
 hbase-native-client/if/RegionNormalizer.proto   |  28 +
 hbase-native-client/if/RegionServerStatus.proto | 158 
 hbase-native-client/if/RowProcessor.proto   |  45 ++
 hbase-native-client/if/SecureBulkLoad.proto |  72 ++
 hbase-native-client/if/Snapshot.proto   |  66 ++
 hbase-native-client/if/Tracing.proto|  33 +
 hbase-native-client/if/VisibilityLabels.proto   |  83 ++
 hbase-native-client/if/WAL.proto| 172 
 hbase-native-client/if/ZooKeeper.proto  | 155 
 hbase-native-client/rpc/CMakeLists.txt  |  17 -
 hbase-native-client/third-party/BUCK|   4 +-
 39 files changed, 4385 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b2887761/hbase-native-client/bin/start-docker.sh
--
diff --git a/hbase-native-client/bin/start-docker.sh 
b/hbase-native-client/bin/start-docker.sh
index bf38912..4426705 100755
--- a/hbase-native-client/bin/start-docker.sh
+++ b/hbase-native-client/bin/start-docker.sh
@@ -20,6 +20,7 @@ set -e
 set -x
 
 eval "$(docker-machine env docker-vm)"
+eval "$(docker-machine env dinghy)"
 docker build -t hbase_native .
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b2887761/hbase-native-client/core/BUCK
--
diff --git a/hbase-native-client/core/BUCK b/hbase-native-client/core/BUCK
index 2e4e755..ef027a1 100644
--- a/hbase-native-client/core/BUCK
+++ b/hbase-native-client/core/BUCK
@@ -41,6 +41,7 @@ cxx_binary(
   "scanner.cc",
],
deps = [
+   "//if:if",
"//third-party:folly",
"//third-party:wangle",
],

http://git-wip-us.apache.org/repos/asf/hbase/blob/b2887761/hbase-native-client/core/client.cc
--
diff --git a/hbase-native-client/core/client.cc 
b/hbase-native-client/core/client.cc
index 98cf38a..a04daee 100644
--- a/hbase-native-client/core/client.cc
+++ b/hbase-native-client/core/client.cc
@@ -24,9 +24,13 @@
 #include 
 #include 
 
+#include "if/ZooKeeper.pb.h"
+
 using namespace folly;
+using namespace hbase::pb;
 
 int main(int argc, char *argv[]) {
+  MetaRegionServer mrs;
   google::ParseCommandLineFlags(&argc, &argv, true);
   google::InitGoogleLogging(argv[0]);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b2887761/hbase-native-client/core/client.h
--
diff --git a/hbase-native-client/core/client.h 
b/hbase-native-client/core/

[50/52] [abbrv] hbase git commit: HBASE-15078 Added ability to start/stop hbase local cluster for tests, global test_env for gtest, small changes to dockerfile and docker run.

HBASE-15078 Added ability to start/stop hbase local cluster for tests, global 
test_env for gtest, small changes to dockerfile and docker run.

Added check ~/.m2 folder exists; moved scripts to ./bin


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/33cabe45
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/33cabe45
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/33cabe45

Branch: refs/heads/HBASE-14850
Commit: 33cabe45a0cfce88b19c5e45aea79ccf333af8f3
Parents: 1f0d4c9
Author: Mikhail Antonov 
Authored: Wed Jan 6 15:08:21 2016 -0800
Committer: Elliott Clark 
Committed: Fri Apr 8 12:31:26 2016 -0700

--
 hbase-native-client/.buckconfig |  2 +-
 hbase-native-client/Dockerfile  |  2 +-
 hbase-native-client/bin/start-docker.sh | 10 -
 .../bin/start_local_hbase_and_wait.sh   | 26 
 .../bin/stop_local_hbase_and_wait.sh| 26 
 hbase-native-client/core/BUCK   | 15 +++
 .../core/HBaseNativeClientTestEnv.cc| 42 
 .../core/SampleNativeClientTest.cc  | 28 +
 hbase-native-client/core/test_env.h | 30 ++
 9 files changed, 178 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/33cabe45/hbase-native-client/.buckconfig
--
diff --git a/hbase-native-client/.buckconfig b/hbase-native-client/.buckconfig
index 3227a2a..402ef27 100644
--- a/hbase-native-client/.buckconfig
+++ b/hbase-native-client/.buckconfig
@@ -1,2 +1,2 @@
 [cxx]
-  gtest_dep = //third-party/googletest/googletest:google-test
+  gtest_dep = //third-party:google-test

http://git-wip-us.apache.org/repos/asf/hbase/blob/33cabe45/hbase-native-client/Dockerfile
--
diff --git a/hbase-native-client/Dockerfile b/hbase-native-client/Dockerfile
index 70e823b..5f17f04 100644
--- a/hbase-native-client/Dockerfile
+++ b/hbase-native-client/Dockerfile
@@ -17,6 +17,6 @@
 
 FROM pjameson/buck-folly-watchman
 
-RUN apt-get install -y libprotobuf-dev protobuf-compiler clang-format-3.7 vim
+RUN apt-get install -y libprotobuf-dev protobuf-compiler clang-format-3.7 vim 
maven inetutils-ping
 
 WORKDIR /usr/local/src/hbase/hbase-native-client

http://git-wip-us.apache.org/repos/asf/hbase/blob/33cabe45/hbase-native-client/bin/start-docker.sh
--
diff --git a/hbase-native-client/bin/start-docker.sh 
b/hbase-native-client/bin/start-docker.sh
index 1c9b02e..bf38912 100755
--- a/hbase-native-client/bin/start-docker.sh
+++ b/hbase-native-client/bin/start-docker.sh
@@ -28,5 +28,13 @@ if [[ ! -d third-party/googletest ]]; then
 git clone https://github.com/google/googletest.git 
third-party/googletest
 fi
 
+if [[ ! -d ~/.m2 ]]; then
+echo "~/.m2 directory doesn't exist. Check Apache Maven is installed."
+exit 1
+fi;
 
-docker run -v ${PWD}/..:/usr/local/src/hbase -it hbase_native  /bin/bash
+docker run -p 16010:16010/tcp \
+   -e "JAVA_HOME=/usr/lib/jvm/java-8-oracle" \
+   -v ${PWD}/..:/usr/local/src/hbase \
+   -v ~/.m2:/root/.m2 \
+   -it hbase_native  /bin/bash

http://git-wip-us.apache.org/repos/asf/hbase/blob/33cabe45/hbase-native-client/bin/start_local_hbase_and_wait.sh
--
diff --git a/hbase-native-client/bin/start_local_hbase_and_wait.sh 
b/hbase-native-client/bin/start_local_hbase_and_wait.sh
new file mode 100755
index 000..64d0b68
--- /dev/null
+++ b/hbase-native-client/bin/start_local_hbase_and_wait.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+$PWD/../bin/start-hbase.sh
+
+until [ $(curl -s -o /dev/null -I -w "%{http_code}" http://localhost:16010) == 
"200" ]
+do
+ printf "Waiting for local HBase cluster to start\n"
+ sleep 1
+done

htt

[22/52] [abbrv] hbase git commit: HBASE-15521 Procedure V2 - RestoreSnapshot and CloneSnapshot (Stephen Yuan Jiang)

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1d5c3d2/hbase-protocol/src/main/protobuf/Master.proto
--
diff --git a/hbase-protocol/src/main/protobuf/Master.proto 
b/hbase-protocol/src/main/protobuf/Master.proto
index 79bb862..1f7a3b7 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -370,9 +370,12 @@ message DeleteSnapshotResponse {
 
 message RestoreSnapshotRequest {
   required SnapshotDescription snapshot = 1;
+  optional uint64 nonce_group = 2 [default = 0];
+  optional uint64 nonce = 3 [default = 0];
 }
 
 message RestoreSnapshotResponse {
+  required uint64 proc_id = 1;
 }
 
 /* if you don't send the snapshot, then you will get it back
@@ -735,11 +738,6 @@ service MasterService {
   rpc RestoreSnapshot(RestoreSnapshotRequest) returns(RestoreSnapshotResponse);
 
   /**
-   * Determine if the snapshot restore is done yet.
-   */
-  rpc IsRestoreSnapshotDone(IsRestoreSnapshotDoneRequest) 
returns(IsRestoreSnapshotDoneResponse);
-
-  /**
* Execute a distributed procedure.
*/
   rpc ExecProcedure(ExecProcedureRequest) returns(ExecProcedureResponse);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1d5c3d2/hbase-protocol/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
index 2d2aff4..87aae6a 100644
--- a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -222,6 +222,46 @@ message DisableTableStateData {
   required bool skip_table_state_check = 3;
 }
 
+message RestoreParentToChildRegionsPair {
+  required string parent_region_name = 1;
+  required string child1_region_name = 2;
+  required string child2_region_name = 3;
+}
+
+enum CloneSnapshotState {
+  CLONE_SNAPSHOT_PRE_OPERATION = 1;
+  CLONE_SNAPSHOT_WRITE_FS_LAYOUT = 2;
+  CLONE_SNAPSHOT_ADD_TO_META = 3;
+  CLONE_SNAPSHOT_ASSIGN_REGIONS = 4;
+  CLONE_SNAPSHOT_UPDATE_DESC_CACHE = 5;
+  CLONE_SNAPSHOT_POST_OPERATION = 6;
+}
+
+message CloneSnapshotStateData {
+  required UserInformation user_info = 1;
+  required SnapshotDescription snapshot = 2;
+  required TableSchema table_schema = 3;
+  repeated RegionInfo region_info = 4;
+  repeated RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 
5;
+}
+
+enum RestoreSnapshotState {
+  RESTORE_SNAPSHOT_PRE_OPERATION = 1;
+  RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR = 2;
+  RESTORE_SNAPSHOT_WRITE_FS_LAYOUT = 3;
+  RESTORE_SNAPSHOT_UPDATE_META = 4;
+}
+
+message RestoreSnapshotStateData {
+  required UserInformation user_info = 1;
+  required SnapshotDescription snapshot = 2;
+  required TableSchema modified_table_schema = 3;
+  repeated RegionInfo region_info_for_restore = 4;
+  repeated RegionInfo region_info_for_remove = 5;
+  repeated RegionInfo region_info_for_add = 6;
+  repeated RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 
7;
+}
+
 message ServerCrashStateData {
   required ServerName server_name = 1;
   optional bool distributed_log_replay = 2;

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1d5c3d2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index cdadff4..319d363 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -916,33 +916,6 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   /**
-   * Returns the status of the requested snapshot restore/clone operation.
-   * This method is not exposed to the user, it is just used internally by 
HBaseAdmin
-   * to verify if the restore is completed.
-   *
-   * No exceptions are thrown if the restore is not running, the result will 
be "done".
-   *
-   * @return done true if the restore/clone operation is completed.
-   * @throws ServiceException if the operation failed.
-   */
-  @Override
-  public IsRestoreSnapshotDoneResponse isRestoreSnapshotDone(RpcController 
controller,
-  IsRestoreSnapshotDoneRequest request) throws ServiceException {
-try {
-  master.checkInitialized();
-  SnapshotDescription snapshot = request.getSnapshot();
-  IsRestoreSnapshotDoneResponse.Builder builder = 
IsRestoreSnapshotDoneResponse.newBuilder();
-  boolean done = master.snapshotManager.isRestoreDone(snapshot);
-  builder.setDone(done);
-  return builder.build();
-} catch (ForeignException e) {
-  throw new ServiceException(e.getCause());
-} catch (IOException e) {
-

[47/52] [abbrv] hbase git commit: HBASE-14852 Update build env

HBASE-14852 Update build env

Also includes HBASE-14858 Clean up so core is ready for development on a recent 
version of c++


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1f0d4c90
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1f0d4c90
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1f0d4c90

Branch: refs/heads/HBASE-14850
Commit: 1f0d4c90bf7b25bab14476118668ff617c725c06
Parents: 6ea4994
Author: Elliott Clark 
Authored: Thu Nov 19 16:28:11 2015 -0800
Committer: Elliott Clark 
Committed: Fri Apr 8 12:31:26 2016 -0700

--
 hbase-native-client/.buckconfig |   2 +
 hbase-native-client/.gitignore  |  23 +--
 hbase-native-client/CMakeLists.txt  | 157 ---
 hbase-native-client/Dockerfile  |  22 +++
 hbase-native-client/README.md   |  15 +-
 hbase-native-client/bin/build-all.sh|  41 -
 hbase-native-client/bin/build-thirdparty.sh |  64 
 hbase-native-client/bin/download-thirdparty.sh  |  70 -
 hbase-native-client/bin/hbase-client-env.sh |  47 --
 hbase-native-client/bin/start-docker.sh |  32 
 .../cmake_modules/FindGTest.cmake   |  53 ---
 .../cmake_modules/FindLibEv.cmake   |  47 --
 hbase-native-client/core/BUCK   |  50 ++
 hbase-native-client/core/admin.cc   |  20 +++
 hbase-native-client/core/admin.h|  22 +++
 hbase-native-client/core/client.cc  |  38 +
 hbase-native-client/core/client.h   |  24 +++
 hbase-native-client/core/connection.cc  |  20 +++
 hbase-native-client/core/connection.h   |  26 +++
 hbase-native-client/core/connection_attr.h  |  24 +++
 hbase-native-client/core/delete.cc  |  21 +++
 hbase-native-client/core/delete.h   |  27 
 hbase-native-client/core/get.cc |  20 +++
 hbase-native-client/core/get.h  |  22 +++
 hbase-native-client/core/hbase_macros.h |  56 +++
 hbase-native-client/core/mutation.cc|  41 +
 hbase-native-client/core/mutation.h |  58 +++
 hbase-native-client/core/put.cc |  21 +++
 hbase-native-client/core/put.h  |  27 
 hbase-native-client/core/scanner.cc |  20 +++
 hbase-native-client/core/scanner.h  |  22 +++
 hbase-native-client/rpc/CMakeLists.txt  |  17 ++
 hbase-native-client/src/async/CMakeLists.txt|  32 
 hbase-native-client/src/async/get-test.cc   |  59 ---
 hbase-native-client/src/async/hbase_admin.cc|  57 ---
 hbase-native-client/src/async/hbase_admin.h |  69 
 hbase-native-client/src/async/hbase_client.cc   |  47 --
 hbase-native-client/src/async/hbase_client.h|  60 ---
 .../src/async/hbase_connection.cc   |  37 -
 .../src/async/hbase_connection.h|  52 --
 hbase-native-client/src/async/hbase_errno.h |  23 ---
 hbase-native-client/src/async/hbase_get.cc  |  61 ---
 hbase-native-client/src/async/hbase_get.h   |  73 -
 .../src/async/hbase_mutations.cc| 111 -
 hbase-native-client/src/async/hbase_mutations.h | 119 --
 hbase-native-client/src/async/hbase_result.cc   |  37 -
 hbase-native-client/src/async/hbase_result.h|  44 --
 hbase-native-client/src/async/hbase_scanner.cc  |  59 ---
 hbase-native-client/src/async/hbase_scanner.h   |  80 --
 hbase-native-client/src/async/mutations-test.cc | 102 
 hbase-native-client/src/core/CMakeLists.txt |  31 
 hbase-native-client/src/core/admin.cc   |  20 ---
 hbase-native-client/src/core/admin.h|  25 ---
 hbase-native-client/src/core/client.cc  |  20 ---
 hbase-native-client/src/core/client.h   |  25 ---
 hbase-native-client/src/core/connection.cc  |  22 ---
 hbase-native-client/src/core/connection.h   |  26 ---
 hbase-native-client/src/core/connection_attr.h  |  30 
 hbase-native-client/src/core/delete.cc  |  22 ---
 hbase-native-client/src/core/delete.h   |  29 
 hbase-native-client/src/core/get.cc |  20 ---
 hbase-native-client/src/core/get.h  |  26 ---
 .../src/core/hbase_connection_attr.cc   |  41 -
 .../src/core/hbase_connection_attr.h|  51 --
 hbase-native-client/src/core/hbase_macros.h |  60 ---
 hbase-native-client/src/core/hbase_types.h  |  83 --
 hbase-native-client/src/core/mutation.cc|  42 -
 hbase-native-client/src/core/mutation.h |  48 --
 hbase-native-client/src/core/put.cc |  22 ---
 hbase-native-client/src/core/put.h  |  29 
 hbase-native-client/s

[35/52] [abbrv] hbase git commit: HBASE-15369 Handle NPE in region.jsp (Samir Ahmic)

HBASE-15369 Handle NPE in region.jsp (Samir Ahmic)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3826894f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3826894f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3826894f

Branch: refs/heads/HBASE-14850
Commit: 3826894f890a850270053a25b53f07a007555711
Parents: c93cffb
Author: tedyu 
Authored: Wed Apr 6 06:52:51 2016 -0700
Committer: tedyu 
Committed: Wed Apr 6 06:52:51 2016 -0700

--
 .../hbase-webapps/regionserver/region.jsp | 18 +++---
 1 file changed, 15 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3826894f/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp
--
diff --git 
a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp 
b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp
index 874ac43..02f3d94 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp
@@ -21,6 +21,7 @@
   import="java.util.Collection"
   import="java.util.Date"
   import="java.util.List"
+  import="org.owasp.esapi.ESAPI"
   import="static org.apache.commons.lang.StringEscapeUtils.escapeXml"
   import="org.apache.hadoop.conf.Configuration"
   import="org.apache.hadoop.hbase.HTableDescriptor"
@@ -35,10 +36,14 @@
   String regionName = request.getParameter("name");
   HRegionServer rs = (HRegionServer) 
getServletContext().getAttribute(HRegionServer.REGIONSERVER);
   Configuration conf = rs.getConfiguration();
-
+  String displayName = null;
   Region region = rs.getFromOnlineRegions(regionName);
-  String displayName = 
HRegionInfo.getRegionNameAsStringForDisplay(region.getRegionInfo(),
+  if(region == null) {
+displayName= ESAPI.encoder().encodeForHTML(regionName) + " does not exist";
+  } else {
+displayName = 
HRegionInfo.getRegionNameAsStringForDisplay(region.getRegionInfo(),
 rs.getConfiguration());
+  }
 %>
 

[44/52] [abbrv] hbase git commit: HBASE-15407 Add SASL support for fan out OutputStream

HBASE-15407 Add SASL support for fan out OutputStream


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6ea49945
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6ea49945
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6ea49945

Branch: refs/heads/HBASE-14850
Commit: 6ea4994569e05ff44e0fa571e053cef828ab57ed
Parents: e450d94
Author: zhangduo 
Authored: Sun Mar 27 19:01:05 2016 +0800
Committer: zhangduo 
Committed: Fri Apr 8 21:46:47 2016 +0800

--
 .../util/FanOutOneBlockAsyncDFSOutput.java  |   38 +-
 .../FanOutOneBlockAsyncDFSOutputHelper.java |  230 ++--
 .../FanOutOneBlockAsyncDFSOutputSaslHelper.java | 1032 ++
 .../util/TestFanOutOneBlockAsyncDFSOutput.java  |   13 +-
 .../TestSaslFanOutOneBlockAsyncDFSOutput.java   |  192 
 5 files changed, 1385 insertions(+), 120 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6ea49945/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
index b10f180..bdbf865 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FanOutOneBlockAsyncDFSOutput.java
@@ -17,11 +17,26 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import static io.netty.handler.timeout.IdleState.READER_IDLE;
+import static io.netty.handler.timeout.IdleState.WRITER_IDLE;
 import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
 import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
 import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
 import static 
org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufAllocator;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.EventLoop;
+import io.netty.channel.SimpleChannelInboundHandler;
+import io.netty.handler.codec.protobuf.ProtobufDecoder;
+import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
+import io.netty.handler.timeout.IdleStateEvent;
+import io.netty.handler.timeout.IdleStateHandler;
+import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.FutureListener;
+import io.netty.util.concurrent.Promise;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -36,6 +51,8 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
+import com.google.common.base.Supplier;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -52,23 +69,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.util.DataChecksum;
 
-import com.google.common.base.Supplier;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.ByteBufAllocator;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.EventLoop;
-import io.netty.channel.SimpleChannelInboundHandler;
-import io.netty.handler.codec.protobuf.ProtobufDecoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-import io.netty.handler.timeout.IdleState;
-import io.netty.handler.timeout.IdleStateEvent;
-import io.netty.handler.timeout.IdleStateHandler;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.FutureListener;
-import io.netty.util.concurrent.Promise;
-
 /**
  * An asynchronous HDFS output stream implementation which fans out data to 
datanode and only
  * supports writing file with only one block.
@@ -278,7 +278,7 @@ public class FanOutOneBlockAsyncDFSOutput implements 
Closeable {
   public void userEventTriggered(ChannelHandlerContext ctx, Object evt) 
throws Exception {
 if (evt instanceof IdleStateEvent) {
   IdleStateEvent e = (IdleStateEvent) evt;
-  if (e.state() == IdleState.READER_IDLE) {
+  if (e.state() == READER_IDLE) {
 failed(ctx.channel(), new Supplier() {
 
   @Override
@@ -286,7 +286,7 @@ public class FanOutOneBlockAsyncDFSOutput implements 
Closeable {
 return new IOException("Timeout(" + timeoutMs + "ms) waiting 
for response"

[23/52] [abbrv] hbase git commit: HBASE-15521 Procedure V2 - RestoreSnapshot and CloneSnapshot (Stephen Yuan Jiang)

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1d5c3d2/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 073eba9..b91a36b 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -38793,6 +38793,26 @@ public final class MasterProtos {
  * required .hbase.pb.SnapshotDescription snapshot = 1;
  */
 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder
 getSnapshotOrBuilder();
+
+// optional uint64 nonce_group = 2 [default = 0];
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+boolean hasNonceGroup();
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+long getNonceGroup();
+
+// optional uint64 nonce = 3 [default = 0];
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+boolean hasNonce();
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+long getNonce();
   }
   /**
* Protobuf type {@code hbase.pb.RestoreSnapshotRequest}
@@ -38858,6 +38878,16 @@ public final class MasterProtos {
   bitField0_ |= 0x0001;
   break;
 }
+case 16: {
+  bitField0_ |= 0x0002;
+  nonceGroup_ = input.readUInt64();
+  break;
+}
+case 24: {
+  bitField0_ |= 0x0004;
+  nonce_ = input.readUInt64();
+  break;
+}
   }
 }
   } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -38920,8 +38950,42 @@ public final class MasterProtos {
   return snapshot_;
 }
 
+// optional uint64 nonce_group = 2 [default = 0];
+public static final int NONCE_GROUP_FIELD_NUMBER = 2;
+private long nonceGroup_;
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+public boolean hasNonceGroup() {
+  return ((bitField0_ & 0x0002) == 0x0002);
+}
+/**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+public long getNonceGroup() {
+  return nonceGroup_;
+}
+
+// optional uint64 nonce = 3 [default = 0];
+public static final int NONCE_FIELD_NUMBER = 3;
+private long nonce_;
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+public boolean hasNonce() {
+  return ((bitField0_ & 0x0004) == 0x0004);
+}
+/**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+public long getNonce() {
+  return nonce_;
+}
+
 private void initFields() {
   snapshot_ = 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance();
+  nonceGroup_ = 0L;
+  nonce_ = 0L;
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -38946,6 +39010,12 @@ public final class MasterProtos {
   if (((bitField0_ & 0x0001) == 0x0001)) {
 output.writeMessage(1, snapshot_);
   }
+  if (((bitField0_ & 0x0002) == 0x0002)) {
+output.writeUInt64(2, nonceGroup_);
+  }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+output.writeUInt64(3, nonce_);
+  }
   getUnknownFields().writeTo(output);
 }
 
@@ -38959,6 +39029,14 @@ public final class MasterProtos {
 size += com.google.protobuf.CodedOutputStream
   .computeMessageSize(1, snapshot_);
   }
+  if (((bitField0_ & 0x0002) == 0x0002)) {
+size += com.google.protobuf.CodedOutputStream
+  .computeUInt64Size(2, nonceGroup_);
+  }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+size += com.google.protobuf.CodedOutputStream
+  .computeUInt64Size(3, nonce_);
+  }
   size += getUnknownFields().getSerializedSize();
   memoizedSerializedSize = size;
   return size;
@@ -38987,6 +39065,16 @@ public final class MasterProtos {
 result = result && getSnapshot()
 .equals(other.getSnapshot());
   }
+  result = result && (hasNonceGroup() == other.hasNonceGroup());
+  if (hasNonceGroup()) {
+result = result && (getNonceGroup()
+== other.getNonceGroup());
+  }
+  result = result && (hasNonce() == other.hasNonce());
+  if (hasNonce()) {
+result = result && (getNonce()
+== other.getNonce());
+  }
   result = result &&
   getUnknownFields().equals(other.getUnknownFields());
   return result;
@@ -39004,6 +39092,14 @@ public final class MasterProtos {
 hash = (

[49/52] [abbrv] hbase git commit: HBASE-14854 Read meta location from zk

HBASE-14854 Read meta location from zk


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/65368023
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/65368023
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/65368023

Branch: refs/heads/HBASE-14850
Commit: 65368023c2368386bb9eb1934bfb4b6f32f05cdf
Parents: eaf86de
Author: Elliott Clark 
Authored: Sat Mar 5 00:09:08 2016 -0800
Committer: Elliott Clark 
Committed: Fri Apr 8 12:31:26 2016 -0700

--
 hbase-native-client/Dockerfile  |  20 ++-
 hbase-native-client/bin/start-docker.sh |   5 +-
 hbase-native-client/core/BUCK   | 106 +++---
 .../core/HBaseNativeClientTestEnv.cc|  42 --
 .../core/SampleNativeClientTest.cc  |  28 
 hbase-native-client/core/location-cache-test.cc |  14 ++
 hbase-native-client/core/location-cache.cc  |  67 +
 hbase-native-client/core/location-cache.h   |  35 +
 .../core/native-client-test-env.cc  |  42 ++
 .../core/simple-native-client-test.cc   |  28 
 hbase-native-client/core/test_env.h |   2 +
 hbase-native-client/if/BUCK |   1 +
 hbase-native-client/third-party/BUCK| 138 ++-
 13 files changed, 339 insertions(+), 189 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/65368023/hbase-native-client/Dockerfile
--
diff --git a/hbase-native-client/Dockerfile b/hbase-native-client/Dockerfile
index 1364d22..36959a5 100644
--- a/hbase-native-client/Dockerfile
+++ b/hbase-native-client/Dockerfile
@@ -15,7 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM cpp_update 
+FROM pjameson/buck-folly-watchman
 
 ARG CC=/usr/bin/gcc-5
 ARG CXX=/usr/bin/g++-5
@@ -25,20 +25,26 @@ ARG CXXFLAGS="-D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -g 
-fno-omit-frame-pointer -O3 -p
 RUN apt-get install -y clang-format-3.7 vim maven inetutils-ping
 RUN git clone --depth 1 --branch v2.6.1 https://github.com/google/protobuf.git 
/usr/src/protobuf && \
   cd /usr/src/protobuf/ && \
+  ldconfig && \
   ./autogen.sh && \
-  ./configure --disable-shared && \
+  ./configure && \
   make && \
-  make check && \
-  make install
+  make install && \ 
+  make clean && \
+  rm -rf .git
+
 RUN cd /usr/src && \
   wget 
http://www-us.apache.org/dist/zookeeper/zookeeper-3.4.8/zookeeper-3.4.8.tar.gz 
&& \ 
   tar zxf zookeeper-3.4.8.tar.gz && \ 
   rm -rf zookeeper-3.4.8.tar.gz && \
   cd zookeeper-3.4.8 && \
   cd src/c && \
-  ./configure --disable-shared && \
+  ldconfig && \
+  ./configure && \
   make && \
   make install && \
-  make clean 
+  make clean
+
+RUN ldconfig
 
-WORKDIR /usr/local/src/hbase/hbase-native-client
+WORKDIR /usr/src/hbase/hbase-native-client

http://git-wip-us.apache.org/repos/asf/hbase/blob/65368023/hbase-native-client/bin/start-docker.sh
--
diff --git a/hbase-native-client/bin/start-docker.sh 
b/hbase-native-client/bin/start-docker.sh
index 4426705..725ed6a 100755
--- a/hbase-native-client/bin/start-docker.sh
+++ b/hbase-native-client/bin/start-docker.sh
@@ -19,8 +19,11 @@
 set -e
 set -x
 
+# Try out some standard docker machine names that could work
 eval "$(docker-machine env docker-vm)"
 eval "$(docker-machine env dinghy)"
+
+# Build the image
 docker build -t hbase_native .
 
 
@@ -36,6 +39,6 @@ fi;
 
 docker run -p 16010:16010/tcp \
-e "JAVA_HOME=/usr/lib/jvm/java-8-oracle" \
-   -v ${PWD}/..:/usr/local/src/hbase \
+   -v ${PWD}/..:/usr/src/hbase \
-v ~/.m2:/root/.m2 \
-it hbase_native  /bin/bash

http://git-wip-us.apache.org/repos/asf/hbase/blob/65368023/hbase-native-client/core/BUCK
--
diff --git a/hbase-native-client/core/BUCK b/hbase-native-client/core/BUCK
index ef027a1..817b5a0 100644
--- a/hbase-native-client/core/BUCK
+++ b/hbase-native-client/core/BUCK
@@ -15,52 +15,62 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+cxx_library(name="core",
+headers=[
+"admin.h",
+"client.h",
+"connection.h",
+"connection_attr.h",
+"delete.h",
+"get.h",
+"hbase_macros.h",
+"mutation.h",
+"put.h",
+"scanner.h",
+"location-cache.h",
+],
+srcs=[
+"admin.cc",
+"client.cc",
+"connection.cc",
+"get.cc"

[29/52] [abbrv] hbase git commit: HBASE-15424 Add bulk load hfile-refs for replication in ZK after the event is appended in the WAL

HBASE-15424 Add bulk load hfile-refs for replication in ZK after the event is 
appended in the WAL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25419d8b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25419d8b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25419d8b

Branch: refs/heads/HBASE-14850
Commit: 25419d8b18dd8f35a102614cd31b274659f747ef
Parents: 5d79790
Author: Ashish Singhi 
Authored: Fri Apr 1 15:40:36 2016 +0530
Committer: Ashish Singhi 
Committed: Fri Apr 1 15:40:36 2016 +0530

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |  4 +-
 .../hbase/regionserver/wal/MetricsWAL.java  |  7 ++-
 .../regionserver/wal/WALActionsListener.java| 10 +++-
 .../replication/regionserver/Replication.java   | 50 
 .../hadoop/hbase/wal/DisabledWALProvider.java   |  7 +--
 .../hbase/regionserver/wal/TestMetricsWAL.java  | 10 ++--
 .../hbase/wal/WALPerformanceEvaluation.java |  3 +-
 7 files changed, 58 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/25419d8b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index f189ff1..b89488a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -840,14 +840,14 @@ public abstract class AbstractFSWAL implements WAL {
 return true;
   }
 
-  private long postAppend(final Entry e, final long elapsedTime) {
+  private long postAppend(final Entry e, final long elapsedTime) throws 
IOException {
 long len = 0;
 if (!listeners.isEmpty()) {
   for (Cell cell : e.getEdit().getCells()) {
 len += CellUtil.estimatedSerializedSizeOf(cell);
   }
   for (WALActionsListener listener : listeners) {
-listener.postAppend(len, elapsedTime);
+listener.postAppend(len, elapsedTime, e.getKey(), e.getEdit());
   }
 }
 return len;

http://git-wip-us.apache.org/repos/asf/hbase/blob/25419d8b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java
index 99792e5..69a31cd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWAL.java
@@ -20,9 +20,13 @@
 package org.apache.hadoop.hbase.regionserver.wal;
 
 import com.google.common.annotations.VisibleForTesting;
+
+import java.io.IOException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
 import org.apache.hadoop.util.StringUtils;
 
@@ -51,7 +55,8 @@ public class MetricsWAL extends WALActionsListener.Base {
   }
 
   @Override
-  public void postAppend(final long size, final long time) {
+  public void postAppend(final long size, final long time, final WALKey logkey,
+  final WALEdit logEdit) throws IOException {
 source.incrementAppendCount();
 source.incrementAppendTime(time);
 source.incrementAppendSize(size);

http://git-wip-us.apache.org/repos/asf/hbase/blob/25419d8b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java
index a6452e2..adcc6eb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALActionsListener.java
@@ -98,8 +98,12 @@ public interface WALActionsListener {
* TODO: Combine this with above.
* @param entryLen approx length of cells in this append.
* @param elapsedTimeMillis elapsed time in milliseconds.
+   * @param logKey A WAL key
+   * @param logEdit A WAL edit containing list of cells.
+   * @throws IOException if any network or I/O error occurred
*/
-  void postAppend(

[11/52] [abbrv] hbase git commit: HBASE-14983 Create metrics for per block type hit/miss ratios

HBASE-14983 Create metrics for per block type hit/miss ratios

Summary: Missing a root index block is worse than missing a data block. We 
should know the difference

Test Plan: Tested on a local instance. All numbers looked reasonable.

Differential Revision: https://reviews.facebook.net/D55563


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a71ce6e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a71ce6e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a71ce6e7

Branch: refs/heads/HBASE-14850
Commit: a71ce6e7382e8af0c8a005897093a8ab1ac9a492
Parents: b18de5e
Author: Elliott Clark 
Authored: Tue Dec 15 15:35:16 2015 -0800
Committer: Elliott Clark 
Committed: Wed Mar 30 11:41:11 2016 -0700

--
 .../hbase/io/hfile/MemcachedBlockCache.java |   5 +-
 .../regionserver/MetricsRegionServerSource.java |  23 +++
 .../MetricsRegionServerWrapper.java |  40 
 .../MetricsRegionServerSourceImpl.java  |  38 +++-
 .../hadoop/hbase/io/hfile/BlockCacheKey.java|  19 +-
 .../hadoop/hbase/io/hfile/CacheStats.java   | 196 ++-
 .../hbase/io/hfile/CombinedBlockCache.java  | 106 ++
 .../hadoop/hbase/io/hfile/HFileBlockIndex.java  |   2 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java  |   6 +-
 .../hadoop/hbase/io/hfile/HFileWriterImpl.java  |   4 +-
 .../hadoop/hbase/io/hfile/LruBlockCache.java|   6 +-
 .../hbase/io/hfile/bucket/BucketCache.java  |   6 +-
 .../MetricsRegionServerWrapperImpl.java | 158 +++
 .../hbase/io/hfile/TestCombinedBlockCache.java  |  16 +-
 .../hbase/io/hfile/TestLruBlockCache.java   |  44 ++---
 .../MetricsRegionServerWrapperStub.java | 100 ++
 16 files changed, 713 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a71ce6e7/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
--
diff --git 
a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
 
b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
index ae871c4..69d8521 100644
--- 
a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
+++ 
b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
@@ -149,14 +149,13 @@ public class MemcachedBlockCache implements BlockCache {
   // Update stats if this request doesn't have it turned off 100% of the 
time
   if (updateCacheMetrics) {
 if (result == null) {
-  cacheStats.miss(caching, cacheKey.isPrimary());
+  cacheStats.miss(caching, cacheKey.isPrimary(), 
cacheKey.getBlockType());
 } else {
-  cacheStats.hit(caching, cacheKey.isPrimary());
+  cacheStats.hit(caching, cacheKey.isPrimary(), 
cacheKey.getBlockType());
 }
   }
 }
 
-
 return result;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a71ce6e7/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 9693bba..b0d8c24 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -297,6 +297,29 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String BLOCK_CACHE_FAILED_INSERTION_COUNT = "blockCacheFailedInsertionCount";
   String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = "Number of times that a 
block cache " +
   "insertion failed. Usually due to size restrictions.";
+  String BLOCK_CACHE_DATA_MISS_COUNT = "blockCacheDataMissCount";
+  String BLOCK_CACHE_ENCODED_DATA_MISS_COUNT = 
"blockCacheEncodedDataMissCount";
+  String BLOCK_CACHE_LEAF_INDEX_MISS_COUNT = "blockCacheLeafIndexMissCount";
+  String BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT = "blockCacheBloomChunkMissCount";
+  String BLOCK_CACHE_META_MISS_COUNT = "blockCacheMetaMissCount";
+  String BLOCK_CACHE_ROOT_INDEX_MISS_COUNT = "blockCacheRootIndexMissCount";
+  String BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT = 
"blockCacheIntermediateIndexMissCount";
+  String BLOCK_CACHE_FILE_INFO_MISS_COUNT = "blockCacheFileInfoMissCount";
+  String BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT = 
"blockCacheGeneralBloomMetaMissCount";
+  String BLOCK_C

[34/52] [abbrv] hbase git commit: HBASE-15587 FSTableDescriptors.getDescriptor() logs stack trace erronously

HBASE-15587 FSTableDescriptors.getDescriptor() logs stack trace erronously


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c93cffb9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c93cffb9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c93cffb9

Branch: refs/heads/HBASE-14850
Commit: c93cffb95c0322fc4244fe78a584ff225bc105c9
Parents: 7e39988
Author: Enis Soztutar 
Authored: Tue Apr 5 18:13:40 2016 -0700
Committer: Enis Soztutar 
Committed: Tue Apr 5 18:13:40 2016 -0700

--
 .../java/org/apache/hadoop/hbase/util/FSTableDescriptors.java| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c93cffb9/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index cce37d7..18156cb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -125,11 +125,13 @@ public class FSTableDescriptors implements 
TableDescriptors {
 this.metaTableDescritor = TableDescriptor.metaTableDescriptor(conf);
   }
 
+  @Override
   public void setCacheOn() throws IOException {
 this.cache.clear();
 this.usecache = true;
   }
 
+  @Override
   public void setCacheOff() throws IOException {
 this.usecache = false;
 this.cache.clear();
@@ -175,6 +177,8 @@ public class FSTableDescriptors implements TableDescriptors 
{
 } catch (NullPointerException e) {
   LOG.debug("Exception during readTableDecriptor. Current table name = "
   + tablename, e);
+} catch (TableInfoMissingException e) {
+  // ignore. This is regular operation
 } catch (IOException ioe) {
   LOG.debug("Exception during readTableDecriptor. Current table name = "
   + tablename, ioe);



[37/52] [abbrv] hbase git commit: HBASE-15606 Limit creating zk connection in HBaseAdmin#getCompactionState() only to case when 'hbase:meta' is checked.

HBASE-15606 Limit creating zk connection in HBaseAdmin#getCompactionState() 
only to case when 'hbase:meta' is checked.

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d393603d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d393603d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d393603d

Branch: refs/heads/HBASE-14850
Commit: d393603dea23306cd3f18f6dbd1cf14561d45bd0
Parents: ac8cd37
Author: Samir Ahmic 
Authored: Thu Apr 7 21:52:51 2016 +0200
Committer: stack 
Committed: Thu Apr 7 14:25:49 2016 -0700

--
 .../java/org/apache/hadoop/hbase/client/HBaseAdmin.java   | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d393603d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index aea86b9..a900abd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -3017,12 +3017,12 @@ public class HBaseAdmin implements Admin {
 break;
   case NORMAL:
   default:
-ZooKeeperWatcher zookeeper =
-new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + 
connection.toString(),
-new ThrowableAbortable());
+ZooKeeperWatcher zookeeper = null;
 try {
   List> pairs;
   if (TableName.META_TABLE_NAME.equals(tableName)) {
+zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + 
connection.toString(),
+  new ThrowableAbortable());
 pairs = new 
MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
   } else {
 pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, 
tableName);
@@ -3074,7 +3074,9 @@ public class HBaseAdmin implements Admin {
 } catch (ServiceException se) {
   throw ProtobufUtil.getRemoteException(se);
 } finally {
-  zookeeper.close();
+  if (zookeeper != null) {
+zookeeper.close();
+  }
 }
 break;
 }



[46/52] [abbrv] hbase git commit: HBASE-14852 Update build env

http://git-wip-us.apache.org/repos/asf/hbase/blob/1f0d4c90/hbase-native-client/src/async/hbase_result.h
--
diff --git a/hbase-native-client/src/async/hbase_result.h 
b/hbase-native-client/src/async/hbase_result.h
deleted file mode 100644
index eecbbb3..000
--- a/hbase-native-client/src/async/hbase_result.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef ASYNC_HBASE_RESULT_H_
-#define ASYNC_HBASE_RESULT_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "core/hbase_macros.h"
-#include "core/hbase_types.h"
-
-HBASE_API int32_t hb_result_destroy(hb_result_t result);
-
-HBASE_API int32_t hb_result_get_cells(hb_result_t result,
-hb_cell_t ** cell_ptr, size_t * num_cells);
-
-HBASE_API int32_t hb_result_get_table(hb_result_t result,
-char ** table, size_t * table_length);
-HBASE_API int32_t hb_result_get_namespace(hb_result_t result,
-char ** name_space, size_t * name_space_length);
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif  // __cplusplus
-
-#endif  // ASYNC_HBASE_RESULT_H_

http://git-wip-us.apache.org/repos/asf/hbase/blob/1f0d4c90/hbase-native-client/src/async/hbase_scanner.cc
--
diff --git a/hbase-native-client/src/async/hbase_scanner.cc 
b/hbase-native-client/src/async/hbase_scanner.cc
deleted file mode 100644
index 5a8e555..000
--- a/hbase-native-client/src/async/hbase_scanner.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-#include "async/hbase_scanner.h"
-
-#include 
-
-#include "core/hbase_types.h"
-#include "core/scanner.h"
-
-int32_t hb_scanner_create(hb_scanner_t * scanner_ptr) {
-  (*scanner_ptr) = reinterpret_cast(new Scanner());
-  return (*scanner_ptr != NULL)?0:1;
-}
-
-HBASE_API int32_t hb_scanner_set_table(hb_scanner_t scanner,
-char * table, size_t table_length) {
-  return 0;
-}
-
-HBASE_API int32_t hb_scanner_set_namespace(hb_scanner_t scanner,
-char * name_space, size_t name_space_length) {
-  return 0;
-}
-
-int32_t hb_scanner_set_start_row(hb_scanner_t scanner,
-unsigned char * start_row, size_t start_row_length) {
-  return 0;
-}
-
-int32_t hb_scanner_set_end_row(hb_scanner_t scanner,
-unsigned char * end_row, size_t end_row_length) {
-  return 0;
-}
-
-int32_t hb_scanner_set_cache_size(hb_scanner_t scanner,
-size_t cache_size) {
-  return 0;
-}
-
-int32_t hb_scanner_set_num_versions(hb_scanner_t scanner,
-int8_t num_versions) {
-  return 0;
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/1f0d4c90/hbase-native-client/src/async/hbase_scanner.h
--
diff --git a/hbase-native-client/src/async/hbase_scanner.h 
b/hbase-native-client/src/async/hbase_scanner.h
deleted file mode 100644
index cd3f544..000
--- a/hbase-native-client/src/async/hbase_scanner.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http

[32/52] [abbrv] hbase git commit: HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)

HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a93a8878
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a93a8878
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a93a8878

Branch: refs/heads/HBASE-14850
Commit: a93a8878fea49224310e9e51ac929c33ae6aa41f
Parents: 33396c3
Author: tedyu 
Authored: Mon Apr 4 12:52:24 2016 -0700
Committer: tedyu 
Committed: Mon Apr 4 12:52:24 2016 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  |   4 +-
 .../hbase/regionserver/ScannerContext.java  |   9 +
 .../hbase/filter/TestFilterFromRegionSide.java  | 183 +++
 3 files changed, 194 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a93a8878/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 4da0f13..acaecf1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5652,7 +5652,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 // partial Result means that we should not reset the filters; filters
 // should only be reset in
 // between rows
-if (!scannerContext.partialResultFormed()) resetFilters();
+if (!scannerContext.midRowResultFormed()) resetFilters();
 
 if (isFilterDoneInternal()) {
   moreValues = false;
@@ -5727,7 +5727,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   nextKv = heap.peek();
   moreCellsInRow = moreCellsInRow(nextKv, currentRowCell);
   if (!moreCellsInRow) 
incrementCountOfRowsScannedMetric(scannerContext);
-  if (scannerContext.checkBatchLimit(limitScope)) {
+  if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) {
 return 
scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues();
   } else if (scannerContext.checkSizeLimit(limitScope)) {
 ScannerContext.NextState state =

http://git-wip-us.apache.org/repos/asf/hbase/blob/a93a8878/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 6674443..de4647d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -230,6 +230,15 @@ public class ScannerContext {
   }
 
   /**
+   * @return true when a mid-row result is formed.
+   */
+  boolean midRowResultFormed() {
+return scannerState == NextState.SIZE_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.TIME_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.BATCH_LIMIT_REACHED;
+  }
+
+  /**
* @param checkerScope
* @return true if the batch limit can be enforced in the checker's scope
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/a93a8878/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
new file mode 100644
index 000..0a287ce
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the Lice

  1   2   >