[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index d3e74d7..8e77a9e 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -5638,6 +5638,8 @@
 
 beforeShipped()
 - Method in class org.apache.hadoop.hbase.regionserver.querymatcher.ScanWildcardColumnTracker
 
+beforeShipped()
 - Method in class org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher
+
 beforeShipped()
 - Method in interface org.apache.hadoop.hbase.regionserver.ShipperListener
 
 The action that needs to be performed before Shipper.shipped()
 is performed
@@ -17530,6 +17532,8 @@
 
 count
 - Variable in class org.apache.hadoop.hbase.regionserver.querymatcher.ColumnCount
 
+count
 - Variable in class org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher
+
 count
 - Variable in class org.apache.hadoop.metrics2.util.MetricSampleQuantiles
 
 Total number of items in stream
@@ -20204,6 +20208,8 @@
 
 curChunk
 - Variable in class org.apache.hadoop.hbase.regionserver.MemStoreLABImpl
 
+curColCell
 - Variable in class org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher
+
 curFamily
 - Variable in class org.apache.hadoop.hbase.security.visibility.VisibilityLabelFilter
 
 curFamilyMaxVersions
 - Variable in class org.apache.hadoop.hbase.security.visibility.VisibilityLabelFilter
@@ -30635,8 +30641,6 @@
 
 flushCheckInterval
 - Variable in class org.apache.hadoop.hbase.regionserver.HRegion
 
-flushCommits()
 - Method in class org.apache.hadoop.hbase.client.HTable
-
 flushCommits()
 - Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
 
 flushConfig()
 - Method in class org.apache.hadoop.hbase.rsgroup.RSGroupInfoManagerImpl
@@ -33571,8 +33575,6 @@
 
 getBufferedMutator(TableName)
 - Method in class org.apache.hadoop.hbase.client.ConnectionImplementation
 
-getBufferedMutator()
 - Method in class org.apache.hadoop.hbase.client.HTable
-
 getBufferedMutator(ImmutableBytesWritable)
 - Method in class org.apache.hadoop.hbase.mapreduce.MultiTableOutputFormat.MultiTableRecordWriter
 
 getBufferOffset()
 - Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
@@ -49144,6 +49146,10 @@
 
 getTotalRITsOverThreshold()
 - Method in class org.apache.hadoop.hbase.master.assignment.AssignmentManager.RegionInTransitionStat
 
+getTotalRowActionRequestCount()
 - Method in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper
+
+getTotalRowActionRequestCount()
 - Method in class org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapperImpl
+
 getTotalSize()
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator
 
 getTotalSizeOfCells(Result)
 - Static method in class org.apache.hadoop.hbase.client.Result
@@ -50326,20 +50332,6 @@
 
 getWriteBufferSize()
 - Method in class org.apache.hadoop.hbase.client.ConnectionConfiguration
 
-getWriteBufferSize()
 - Method in class org.apache.hadoop.hbase.client.HTable
-
-Returns the maximum size in bytes of the write buffer for 
this HTable.
-
-getWriteBufferSize()
 - Method in class org.apache.hadoop.hbase.client.HTableWrapper
-
-getWriteBufferSize()
 - Method in interface org.apache.hadoop.hbase.client.Table
-
-Deprecated.
-as of 1.0.1 (should not 
have been in 1.0.0). Replaced by BufferedMutator.getWriteBufferSize()
-
-
-getWriteBufferSize()
 - Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
 getWriteEntry()
 - Method in class org.apache.hadoop.hbase.wal.WALKey
 
 Use it to complete mvcc transaction.
@@ -54237,10 +54229,6 @@
 
 Creates an object to access a HBase table.
 
-HTable(ClusterConnection,
 BufferedMutatorImpl) - Constructor for class 
org.apache.hadoop.hbase.client.HTable
-
-For internal testing.
-
 htable
 - Variable in class org.apache.hadoop.hbase.mapred.TableRecordReaderImpl
 
 htable
 - Variable in class org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl
@@ -57998,6 +57986,8 @@
 
 Checks whether the block cache is enabled.
 
+isBoolean(String)
 - Static method in class org.apache.hadoop.hbase.thrift.DemoClient
+
 isBootstrapNamespace()
 - Method in class org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure
 
 isBranch()
 - Method in class org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArrayScanner
@@ -68462,6 +68452,8 @@
 
 mergeFamilyMaps(Mapbyte[],
 ListCell, Mapbyte[], ListCell) - Method 
in class org.apache.hadoop.hbase.regionserver.HRegion
 
+mergeFilterResponse(Cell,
 ScanQueryMatcher.MatchCode, Filter.ReturnCode) - Method in class 
org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher
+
 mergeLocations(RegionLocations)
 - Method in class org.apache.hadoop.hbase.RegionLocations
 
 Merges this RegionLocations list with the given list 
assuming
@@ -71594,12 +71586,8 @@
 
 MutationType()
 - Constructor for enum 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 40fb8bd..477e3fa 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -301,823 +301,821 @@
 293   * Close a region. For expert-admins 
Runs close on the regionserver. The master will not be
 294   * informed of the close.
 295   * @param regionName region name to 
close
-296   * @param serverName The servername of 
the regionserver. If not present, we will use servername
-297   *  found in the hbase:meta 
table. A server name is made of host, port and startcode. Here
-298   *  is an example: code 
host187.example.com,60020,1289493121758/code
-299   * @return true if the region was 
closed, false if not. The return value will be wrapped by a
-300   * {@link CompletableFuture}.
-301   */
-302  CompletableFutureBoolean 
closeRegion(byte[] regionName, OptionalServerName serverName);
-303
-304  /**
-305   * Get all the online regions on a 
region server.
-306   */
-307  
CompletableFutureListHRegionInfo getOnlineRegions(ServerName 
serverName);
-308
-309  /**
-310   * Get the regions of a given table.
-311   */
-312  
CompletableFutureListHRegionInfo getTableRegions(TableName 
tableName);
-313
-314  /**
-315   * Flush a table.
-316   * @param tableName table to flush
-317   */
-318  CompletableFutureVoid 
flush(TableName tableName);
-319
-320  /**
-321   * Flush an individual region.
-322   * @param regionName region to flush
-323   */
-324  CompletableFutureVoid 
flushRegion(byte[] regionName);
-325
-326  /**
-327   * Compact a table. When the returned 
CompletableFuture is done, it only means the compact request
-328   * was sent to HBase and may need some 
time to finish the compact operation.
-329   * @param tableName table to compact
-330   */
-331  default CompletableFutureVoid 
compact(TableName tableName) {
-332return compact(tableName, 
Optional.empty());
-333  }
-334
-335  /**
-336   * Compact a column family within a 
table. When the returned CompletableFuture is done, it only
-337   * means the compact request was sent 
to HBase and may need some time to finish the compact
-338   * operation.
-339   * @param tableName table to compact
-340   * @param columnFamily column family 
within a table. If not present, compact the table's all
-341   *  column families.
-342   */
-343  CompletableFutureVoid 
compact(TableName tableName, Optionalbyte[] columnFamily);
-344
-345  /**
-346   * Compact an individual region. When 
the returned CompletableFuture is done, it only means the
-347   * compact request was sent to HBase 
and may need some time to finish the compact operation.
-348   * @param regionName region to 
compact
-349   */
-350  default CompletableFutureVoid 
compactRegion(byte[] regionName) {
-351return compactRegion(regionName, 
Optional.empty());
-352  }
-353
-354  /**
-355   * Compact a column family within a 
region. When the returned CompletableFuture is done, it only
-356   * means the compact request was sent 
to HBase and may need some time to finish the compact
-357   * operation.
-358   * @param regionName region to 
compact
-359   * @param columnFamily column family 
within a region. If not present, compact the region's all
-360   *  column families.
-361   */
-362  CompletableFutureVoid 
compactRegion(byte[] regionName, Optionalbyte[] columnFamily);
-363
-364  /**
-365   * Major compact a table. When the 
returned CompletableFuture is done, it only means the compact
-366   * request was sent to HBase and may 
need some time to finish the compact operation.
-367   * @param tableName table to major 
compact
-368   */
-369  default CompletableFutureVoid 
majorCompact(TableName tableName) {
-370return majorCompact(tableName, 
Optional.empty());
-371  }
-372
-373  /**
-374   * Major compact a column family within 
a table. When the returned CompletableFuture is done, it
-375   * only means the compact request was 
sent to HBase and may need some time to finish the compact
-376   * operation.
-377   * @param tableName table to major 
compact
-378   * @param columnFamily column family 
within a table. If not present, major compact the table's all
-379   *  column families.
-380   */
-381  CompletableFutureVoid 
majorCompact(TableName tableName, Optionalbyte[] columnFamily);
-382
-383  /**
-384   * Major compact a region. When the 
returned CompletableFuture is done, it only means the compact
-385   * request was sent to HBase and may 
need some time to finish the compact operation.
-386   * @param regionName region to major 
compact
-387   */
-388  default CompletableFutureVoid 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-151import 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.Rejection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.Rejection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.Rejection.html
index 7dabb5e..782b6f3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.Rejection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.Rejection.html
@@ -352,369 +352,376 @@
 344ThreadPoolExecutor pool = (selectNow 
 s.throttleCompaction(compaction.getRequest().getSize()))
 345  ? longCompactions : 
shortCompactions;
 346pool.execute(new CompactionRunner(s, 
r, compaction, pool, user));
-347if (LOG.isDebugEnabled()) {
-348  String type = (pool == 
shortCompactions) ? "Small " : "Large ";
-349  LOG.debug(type + "Compaction 
requested: " + (selectNow ? compaction.toString() : "system")
-350  + (why != null  
!why.isEmpty() ? "; Because: " + why : "") + "; " + this);
-351}
-352return selectNow ? 
compaction.getRequest() : null;
-353  }
-354
-355  private CompactionContext 
selectCompaction(final Region r, final Store s,
-356  int priority, CompactionRequest 
request, User user) throws IOException {
-357CompactionContext compaction = 
s.requestCompaction(priority, request, user);
-358if (compaction == null) {
-359  if(LOG.isDebugEnabled()  
r.getRegionInfo() != null) {
-360LOG.debug("Not compacting " + 
r.getRegionInfo().getRegionNameAsString() +
-361" because compaction request 
was cancelled");
-362  }
-363  return null;
-364}
-365assert compaction.hasSelection();
-366if (priority != Store.NO_PRIORITY) 
{
-367  
compaction.getRequest().setPriority(priority);
-368}
-369return compaction;
-370  }
-371
-372  /**
-373   * Only interrupt once it's done with a 
run through the work loop.
-374   */
-375  void interruptIfNecessary() {
-376splits.shutdown();
-377longCompactions.shutdown();
-378shortCompactions.shutdown();
-379  }
-380
-381  private void waitFor(ThreadPoolExecutor 
t, String name) {
-382boolean done = false;
-383while (!done) {
-384  try {
-385done = t.awaitTermination(60, 
TimeUnit.SECONDS);
-386LOG.info("Waiting for " + name + 
" to finish...");
-387if (!done) {
-388  t.shutdownNow();
-389}
-390  } catch (InterruptedException ie) 
{
-391LOG.warn("Interrupted waiting for 
" + name + " to finish...");
-392  }
-393}
-394  }
-395
-396  void join() {
-397waitFor(splits, "Split Thread");
-398waitFor(longCompactions, "Large 
Compaction Thread");
-399waitFor(shortCompactions, "Small 
Compaction Thread");
-400  }
-401
-402  /**
-403   * Returns the current size of the 
queue containing regions that are
-404   * processed.
-405   *
-406   * @return The current size of the 
regions queue.
-407   */
-408  public int getCompactionQueueSize() {
-409return 
longCompactions.getQueue().size() + shortCompactions.getQueue().size();
-410  }
-411
-412  public int 
getLargeCompactionQueueSize() {
-413return 
longCompactions.getQueue().size();
-414  }
-415
+347
((HRegion)r).incrementCompactionsQueuedCount();
+348if (LOG.isDebugEnabled()) {
+349  String type = (pool == 
shortCompactions) ? "Small " : "Large ";
+350  LOG.debug(type + "Compaction 
requested: " + (selectNow ? compaction.toString() : "system")
+351  + (why != null  
!why.isEmpty() ? "; Because: " + why : "") + "; " + this);
+352}
+353return selectNow ? 
compaction.getRequest() : null;
+354  }
+355
+356  private CompactionContext 
selectCompaction(final Region r, final Store s,
+357  int priority, CompactionRequest 
request, User user) throws IOException {
+358CompactionContext compaction = 
s.requestCompaction(priority, request, user);
+359if (compaction == null) {
+360  if(LOG.isDebugEnabled()  
r.getRegionInfo() != null) {
+361LOG.debug("Not compacting " + 
r.getRegionInfo().getRegionNameAsString() +
+362" because compaction request 
was cancelled");
+363  }
+364  return null;
+365}
+366assert compaction.hasSelection();
+367if (priority != Store.NO_PRIORITY) 
{
+368  
compaction.getRequest().setPriority(priority);
+369}
+370return compaction;
+371  }
+372
+373  /**
+374   * Only interrupt once it's done with a 
run through the work loop.
+375   */
+376  void interruptIfNecessary() {
+377splits.shutdown();
+378longCompactions.shutdown();
+379shortCompactions.shutdown();
+380  }
+381
+382  private void waitFor(ThreadPoolExecutor 
t, String name) {
+383boolean done = false;
+384while (!done) {
+385  try {
+386done = t.awaitTermination(60, 
TimeUnit.SECONDS);

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html
index c800c26..1ebfe64 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html
@@ -141,7 +141,7 @@ extends 
 
 Fields inherited from classorg.apache.hadoop.hbase.HColumnDescriptor
-BLOCKCACHE,
 BLOCKSIZE,
 BLOOMFILTER,
 CACHE_BLOOMS_ON_WRITE,
 CACHE_DATA_IN_L1,
 CACHE_DATA_ON_WRITE,
 CACHE_INDEX_ON_WRITE,
 COMPRESS_TAGS,
 COMPRESSION,
 COMPRESSION_COMPACT,
 DATA_BLOCK_ENCODING,
 DEFAULT_BLOCKCACHE,
 DEFAULT_BLOCKSIZE,
 DEFAULT_BLOOMFILTER,
 DEFAULT_CACHE_BLOOMS_ON_WRITE,
 DEFAULT_CACHE_DATA_IN_L1,
 DEFAULT_CACHE_DATA_ON_WRITE,
 DEFAULT_CACHE_INDEX_ON_WRITE,
 DEFAULT_COMPRESS_TAGS,
 DEFAULT_COMPRESSION,
 DEFAULT_DATA_BLOCK_ENCODING,
 DEFAULT_DFS_REPLICATION,
 DEFAULT_ENCODE_ON_DISK,
 DEFAULT_EVICT_BLOCKS_ON_CLOSE,
 DEFAULT_IN_MEMORY,
 DEFAULT_KEEP_DELETED,
 DEFAULT_MIN_VERSIONS,
 DEFAULT_MOB_COMPACT_PARTITION_POLICY,
 DEFAULT_MOB_THRESHOLD,
 DEFAULT_PREFETCH_BLOCKS_ON_OPEN,
 DEFAULT_REPLICATION_SCOPE,
 DEFAULT_TTL,
 DEFAULT_VERSIONS,
 delegatee, DFS_REPLICATION,
 ENCODE_ON_DISK,
 ENCRYPTION,
 ENCRYPTION_KEY,
 EVICT_BLOCKS_ON_CLOSE,
 FOREVER,
 IN_MEMORY_COMPACTION,
 IS_MOB,
 IS_MOB_B
 YTES, KEEP_DELETED_CELLS,
 LENGTH,
 MIN_VERSIONS,
 MOB_COMPACT_PARTITION_POLICY,
 MOB_COMPACT_PARTITION_POLICY_BYTES,
 MOB_THRESHOLD,
 MOB_THRESHOLD_BYTES,
 PREFETCH_BLOCKS_ON_OPEN,
 REPLICATION_SCOPE, REPLICATION_SCOPE_BYTES,
 STORAGE_POLICY,
 TTL
+BLOCKCACHE,
 BLOCKSIZE,
 BLOOMFILTER,
 CACHE_BLOOMS_ON_WRITE,
 CACHE_DATA_IN_L1,
 CACHE_DATA_ON_WRITE,
 CACHE_INDEX_ON_WRITE,
 COMPRESS_TAGS,
 COMPRESSION,
 COMPRESSION_COMPACT,
 DATA_BLOCK_ENCODING,
 DEFAULT_BLOCKCACHE,
 DEFAULT_BLOCKSIZE,
 DEFAULT_BLOOMFILTER,
 DEFAULT_CACHE_BLOOMS_ON_WRITE,
 DEFAULT_CACHE_DATA_IN_L1,
 DEFAULT_CACHE_DATA_ON_WRITE,
 DEFAULT_CACHE_INDEX_ON_WRITE,
 DEFAULT_COMPRESS_TAGS,
 DEFAULT_COMPRESSION,
 DEFAULT_DATA_BLOCK_ENCODING,
 DEFAULT_DFS_REPLICATION,
 DEFAULT_ENCODE_ON_DISK,
 DEFAULT_EVICT_BLOCKS_ON_CLOSE,
 DEFAULT_IN_MEMORY,
 DEFAULT_KEEP_DELETED,
 DEFAULT_MIN_VERSIONS,
 DEFAULT_MOB_COMPACT_PARTITION_POLICY,
 DEFAULT_MOB_THRESHOLD,
 DEFAULT_NEW_VERSION_BEHAVIOR,
 DEFAULT_PREFETCH_BLOCKS_ON_OPEN,
 DEFAULT_REPLICATION_SCOPE,
 DEFAULT_TTL,
 DEFAULT_VERSIONS,
 delegatee,
 DFS_REPLICATION,
 ENCODE_ON_DISK,
 ENCRYPTION,
 ENCRYPTION_KEY,
 EVICT_BLOCKS_ON_CLOSE,
 FOREVER,
 IN_MEMORY_COMPACTION,
 IS_MOB, IS_MOB_BYTES,
 KEEP_DELETED_CELLS,
 LENGTH,
 MIN_VERSIONS,
 MOB_COMPACT_PARTITION_POLICY,
 MOB_COMPACT_PARTITION_POLICY_BYTES,
 MOB_THRESHOLD,
 MOB_THRESHOLD_BYTES,
 NEW_VERSION_BEHAVIOR, 
PREFETCH_BLOCKS_ON_OPEN,
 REPLICATION_SCOPE,
 REPLICATION_SCOPE_BYTES,
 STORAGE_POLICY,
 TTL
 
 
 
@@ -200,7 +200,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.HColumnDescriptor
-compareTo,
 equals,
 getBlocksize,
 getBloomFilterType,
 getCompactionCompression,
 getCompactionCompressionType,
 getCompression,
 getCompressionType,
 getConfiguration,
 getConfigurationValue,
 getDataBlockEncoding,
 getDefaultValues,
 getDFSReplication,
 getEncryptionKey,
 getEncryptionType,
 getInMemoryCompaction,
 getKeepDeletedCells, getMaxVersions,
 getMinVersions,
 getMobCompactPartitionPolicy,
 getMobThreshold,
 getName,
 getNameAsString,
 getScope,
 getStoragePolicy,
 getTimeToLive, getUnit,
 getValue,
 getValue,
 getValue,
 getValues,
 hashCode,
 isBlockCacheEnabled,
 isCacheBloomsOnWrite,
 isCacheDataInL1, isCacheDataOnWrite,
 isCacheIndexesOnWrite,
 isCompressTags,
 isEvictBlocksOnClose,
 isInMemory,
 isLegalFamilyName,
 isMobEnabled,
 isPrefetchBlocksOnOpen,
 parseFrom, remove,
 removeConfiguration,
 setBlockCacheEnabled,
 setBlocksize,
 setBloomFilterType,
 setCacheBloomsOnWrite,
 setCacheDataInL1,
 setCacheDataOnWrite,
 setCacheIndexesOnWrite,
 setCompactionCompressionType,
 setCompressionType,
 setCompressTags,
 setConfiguration,
 setDataBlockEncoding, setDFSReplication,
 setEncryptionKey,
 setEncryptionType,
 setEvictBlocksOnClose,
 setInMemory,
 setInMemoryCompaction,
 setKeepDeletedCells,
 setMaxVersions,
 setMinVersions,
 setMobCompactPartitionPolicy,
 setMobEnabled,
 setMobThreshold,
 setPrefetchBlocksOnOpen,
 setScope,
 setStoragePolicy,
 setTimeToLive,
 setTimeToLive,
 setValue,
 setValue,
 setVersions,
 toByteArray,
 toString,
 toStringCustomizedValues
+compareTo,
 equals,
 getBlocksize,
 getBloomFilterType,
 getCompactionCompression,
 getCompactionCompressionType,
 getCompression,
 getCompressionType,
 getConfiguration,
 getConfigurationValue,
 getDataBlockEncoding,
 getDefaultValues,
 getDFSReplication,
 getEncryptionKey,
 getEncryptionType,
 getInMemoryCompaction,
 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.html
index bfe44a9..23cbcc6 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.html
@@ -180,7 +180,7 @@ extends Procedure
-abort,
 acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 deserializeStateData, href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#doAcquireLock-TEnvironment-">doAcquireLock,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#doExecute-TEnvironment-">doExecute,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#doReleaseLock-TEnvironment-">doReleaseLock,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#doRollback-TEnvironment-">doRollback,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#elapsedTime--">elapsedTime,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#execute-TEnvironment-">execute,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getChildrenLatch--">getChildrenLatch,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getException--">getException,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getLas
 tUpdate--">getLastUpdate, getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing, isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 releaseLock,
 removeStackIndex,
 rollback,
 serializeStateData,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner, setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout, setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 toStringState, tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+abort,
 acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 deserializeStateData, href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#doAcquireLock-TEnvironment-">doAcquireLock,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#doExecute-TEnvironment-">doExecute,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#doReleaseLock-TEnvironment-">doReleaseLock,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#doRollback-TEnvironment-">doRollback,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#elapsedTime--">elapsedTime,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#execute-TEnvironment-">execute,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getChildrenLatch--">getChildrenLatch,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getException--">getException,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getLas
 tUpdate--">getLastUpdate, getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished, isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 releaseLock,
 removeStackIndex,
 rollback,
 serializeStateData,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime, setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleS
 B, toStringState,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 



[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index 5a6b9fc..79f099a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -931,19 +931,15 @@
 
 
 protected Procedure.LockState
-DispatchMergingRegionsProcedure.acquireLock(MasterProcedureEnvenv)
-
-
-protected Procedure.LockState
 ServerCrashProcedure.acquireLock(MasterProcedureEnvenv)
 
-
+
 private void
 CloneSnapshotProcedure.addRegionsToMeta(MasterProcedureEnvenv)
 Add regions to hbase:meta table.
 
 
-
+
 private static void
 CreateTableProcedure.addRegionsToMeta(MasterProcedureEnvenv,
 HTableDescriptorhTableDescriptor,
@@ -951,7 +947,7 @@
 Add the specified set of regions to the hbase:meta 
table.
 
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 CreateTableProcedure.addReplicas(MasterProcedureEnvenv,
HTableDescriptorhTableDescriptor,
@@ -960,35 +956,35 @@
  already created is passed to the method)
 
 
-
+
 protected static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 CreateTableProcedure.addTableToMeta(MasterProcedureEnvenv,
   HTableDescriptorhTableDescriptor,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegions)
 
-
+
 private static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest
 RSProcedureDispatcher.buildOpenRegionRequest(MasterProcedureEnvenv,
   ServerNameserverName,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations)
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo
 RSProcedureDispatcher.RegionOpenOperation.buildRegionOpenInfoRequest(MasterProcedureEnvenv)
 
-
+
 protected void
 AbstractStateMachineRegionProcedure.checkTableModifiable(MasterProcedureEnvenv)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 protected void
 AbstractStateMachineTableProcedure.checkTableModifiable(MasterProcedureEnvenv)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 private static void
 DeleteTableProcedure.cleanAnyRemainingRows(MasterProcedureEnvenv,
  TableNametableName)
@@ -996,34 +992,34 @@
  info:regioninfo column was empty because of some write error.
 
 
-
+
 protected void
 ModifyColumnFamilyProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected void
 AddColumnFamilyProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected void
 ModifyTableProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected void
 TruncateTableProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected void
 DeleteColumnFamilyProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected static void
 CreateNamespaceProcedure.createDirectory(MasterProcedureEnvenv,
NamespaceDescriptornsDescriptor)
 Create the namespace directory
 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 CloneSnapshotProcedure.createFilesystemLayout(MasterProcedureEnvenv,
   HTableDescriptorhTableDescriptor,
@@ -1031,20 +1027,20 @@
 Create regions in file system.
 
 
-
+
 protected static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 CreateTableProcedure.createFsLayout(MasterProcedureEnvenv,
   HTableDescriptorhTableDescriptor,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfonewRegions)
 
-
+
 protected static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
 CreateTableProcedure.createFsLayout(MasterProcedureEnvenv,
   HTableDescriptorhTableDescriptor,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfonewRegions,

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
index 85a4db9..438db17 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
@@ -135,432 +135,437 @@
 127  TimeRange tr = entry.getValue();
 128  
setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
 129}
-130  }
-131
-132  /**
-133   * Create a Get operation for the 
specified row.
-134   * @param row
-135   * @param rowOffset
-136   * @param rowLength
-137   */
-138  public Get(byte[] row, int rowOffset, 
int rowLength) {
-139Mutation.checkRow(row, rowOffset, 
rowLength);
-140this.row = Bytes.copy(row, rowOffset, 
rowLength);
-141  }
-142
-143  /**
-144   * Create a Get operation for the 
specified row.
-145   * @param row
-146   */
-147  public Get(ByteBuffer row) {
-148Mutation.checkRow(row);
-149this.row = new 
byte[row.remaining()];
-150row.get(this.row);
-151  }
-152
-153  public boolean isCheckExistenceOnly() 
{
-154return checkExistenceOnly;
-155  }
-156
-157  public Get 
setCheckExistenceOnly(boolean checkExistenceOnly) {
-158this.checkExistenceOnly = 
checkExistenceOnly;
-159return this;
-160  }
-161
-162  /**
-163   * This will always return the default 
value which is false as client cannot set the value to this
-164   * property any more.
-165   * @deprecated since 2.0.0 and will be 
removed in 3.0.0
-166   */
-167  @Deprecated
-168  public boolean isClosestRowBefore() {
-169return closestRowBefore;
-170  }
-171
-172  /**
-173   * This is not used any more and does 
nothing. Use reverse scan instead.
-174   * @deprecated since 2.0.0 and will be 
removed in 3.0.0
-175   */
-176  @Deprecated
-177  public Get setClosestRowBefore(boolean 
closestRowBefore) {
-178// do Nothing
-179return this;
-180  }
-181
-182  /**
-183   * Get all columns from the specified 
family.
-184   * p
-185   * Overrides previous calls to 
addColumn for this family.
-186   * @param family family name
-187   * @return the Get object
-188   */
-189  public Get addFamily(byte [] family) 
{
-190familyMap.remove(family);
-191familyMap.put(family, null);
-192return this;
-193  }
-194
-195  /**
-196   * Get the column from the specific 
family with the specified qualifier.
-197   * p
-198   * Overrides previous calls to 
addFamily for this family.
-199   * @param family family name
-200   * @param qualifier column qualifier
-201   * @return the Get objec
-202   */
-203  public Get addColumn(byte [] family, 
byte [] qualifier) {
-204NavigableSetbyte [] set = 
familyMap.get(family);
-205if(set == null) {
-206  set = new 
TreeSet(Bytes.BYTES_COMPARATOR);
-207}
-208if (qualifier == null) {
-209  qualifier = 
HConstants.EMPTY_BYTE_ARRAY;
-210}
-211set.add(qualifier);
-212familyMap.put(family, set);
-213return this;
-214  }
-215
-216  /**
-217   * Get versions of columns only within 
the specified timestamp range,
-218   * [minStamp, maxStamp).
-219   * @param minStamp minimum timestamp 
value, inclusive
-220   * @param maxStamp maximum timestamp 
value, exclusive
-221   * @throws IOException
-222   * @return this for invocation 
chaining
-223   */
-224  @Override
-225  public Get setTimeRange(long minStamp, 
long maxStamp) throws IOException {
-226return (Get) 
super.setTimeRange(minStamp, maxStamp);
-227  }
-228
-229  /**
-230   * Get versions of columns only within 
the specified timestamp range,
-231   * @param tr Input TimeRange
-232   * @return this for invocation 
chaining
-233   */
-234  @Override
-235  public Get setTimeRange(TimeRange tr) 
{
-236return (Get) 
super.setTimeRange(tr);
-237  }
-238
-239  /**
-240   * Get versions of columns with the 
specified timestamp.
-241   * @param timestamp version timestamp
-242   * @return this for invocation 
chaining
-243   */
-244  public Get setTimeStamp(long 
timestamp)
-245  throws IOException {
-246try {
-247  super.setTimeRange(timestamp, 
timestamp + 1);
-248} catch(Exception e) {
-249  // This should never happen, unless 
integer overflow or something extremely wrong...
-250  LOG.error("TimeRange failed, likely 
caused by integer overflow. ", e);
-251  throw e;
-252}
-253return this;
-254  }
-255
-256  @Override
-257  public Get 
setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
-258return (Get) 
super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
-259  }
-260
-261  @Override
-262  public Get 
setColumnFamilyTimeRange(byte[] cf, TimeRange tr) {
-263return (Get) 
super.setColumnFamilyTimeRange(cf, tr);
-264  }
-265
-266  /**
-267   * Get all available versions.
-268   * @return this for 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html 
b/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
index be80b7f..6470278 100644
--- a/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
+++ b/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
@@ -250,7 +250,7 @@ implements RegionObserver
-postAppend,
 postBatchMutate,
 postBatchMutateIndispensably,
 postBulkLoadHFile,
 postBulkLoadHFile,
 postCheckAndDelete,
 postCheckAndPut,
 postClose, postCloseRegionOperation,
 postCommitStoreFile,
 postCompact,
 postCompact,
 postCompactSelection,
 postCompactSelection,
 postCompleteSplit,
 postDelete,
 postExists,
 postFlush,
 postFlush,
 postGetOp,
 postIncrement,
 postIncrementColumnValue,
 postInstantiateDeleteTracker,
 postLogReplay,
 postMutationBeforeWAL,
 postOpen,
 postPut,
 postReplayWALs,
 postRollBackSplit, postScannerClose,
 postScannerFilterRow,
 postScannerNext,
 postScannerOpen, postSplit,
 postStartRegionOperation,
 postStoreFileReaderOpen,
 postWALRestore,
 preAppend,
 preAppendAfterRowLock,
 preBatchMutate,
 preBulkLoadHFile,
 preCheckAndDelete,
 preCheckAndDeleteAfterRowLock,
 preCheckAndPut,
 preCheckAndPutAfterRowLock,
 preClose,
 preCommitStoreFile,
 preCompact,
 preCompact,
 preCompactScannerOpen,
 preCompactScannerOpen,
 preCompactScannerOpen,
 preCompactSelecti
 on, preCompactSelection,
 preDelete,
 preExists,
 preFlush,
 preFlush,
 preFlushScannerOpen,
 preFlushScannerOpen,
 preFlushScannerOpen,
 preGetOp,
 preIncrement,
 preIncrementAfterRowLock,
 preIncrementColumnValue, href="../../../../../org/apache/hadoop/hbase/coprocessor/RegionObserver.html#preOpen-org.apache.hadoop.hbase.coprocessor.ObserverContext-">preOpen,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/RegionObserver.html#prePrepareTimeStampForDeleteVersion-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.Mutation-org.apache.hadoop.hbase.Cell-byte:A-org.apache.hadoop.hbase.client.Get-">prePrepareTimeStampForDeleteVersion,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/RegionObserver.html#preReplayWALs-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.HRegionInfo-org.apache.hadoop.fs.Path-">preReplayWALs,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/RegionObserver.html#preRollBackSplit-org.apache.hadoop.hbase.coprocessor.ObserverContext-">preRollBackSplit,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/RegionObserver.html#preScannerClose-org.apache.hadoop.h
 
base.coprocessor.ObserverContext-org.apache.hadoop.hbase.regionserver.InternalScanner-">preScannerClose,
 preScannerNext,
 preScannerOpen,
 preSplit,
 preSplit,
 preSplitAfterPONR,
 preSplitBeforePONR,
 preStoreFileReaderOpen,
 preStoreScannerOpen,
 preStoreScannerOpen,
 preWALRestore
+postAppend,
 postBatchMutate,
 postBatchMutateIndispensably,
 postBulkLoadHFile,
 postBulkLoadHFile,
 postCheckAndDelete,
 postCheckAndPut,
 postClose, postCloseRegionOperation,
 postCommitStoreFile,
 postCompact,
 postCompact,
 postCompactSelection,
 postCompactSelection,
 postCompleteSplit,
 postDelete,
 postExists,
 postFlush,
 postFlush,
 postGetOp, postIncrement,
 postIncrementColumnValue,
 postInstantiateDeleteTracker,
 postLogReplay,
 postMutationBeforeWAL,
 postOpen,
 postPut,
 postReplayWALs,
 postRollBackSplit,
 postScannerClose,
 postScannerFilterRow,
 postScannerNext,
 postScannerOpen,
 postSplit,
 postStartRegionOperation,
 postStoreFileReaderOpen, postWALRestore,
 preAppend,
 preAppendAfterRowLock,
 preBatchMutate,
 
 preBulkLoadHFile,
 preCheckAndDelete,
 preCheckAndDeleteAfterRowLock,
 preCheckAndPut,
 preCheckAndPutAfterRowLock,
 preClose,
 preCommitStoreFile,
 preCompact,
 preCompact,
 preCompactScannerOpen,
  preCompactScannerOpen,
 preCompactScannerOpen,
 preCompactSelection, preCompactSelection,
 preDelete,
 preExists,
 preFlush, preFlush,
 preFlushScannerOpen,
 preFlushScannerOpen,
 preFlushScannerOpen,
 preGetOp,
 preIncrement,
 preIncrementAfterRowLock,
 preIncrementColumnValue, preOpen,
 prePrepareTimeStampForDeleteVersion,
 preReplayWALs,
 preRollBackSplit,
 preScannerClose,
 preScannerNext,
 preScannerOpen,
 preSplit,
 preSplit,
 
 preSplitAfterPONR,
 preSplitBeforePONR,
 preStoreFileReaderOpen,
 preStoreScannerOpen,
 preStoreScannerOpen,
 preWALRestore
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html
index f4db034..ee25eee 100644
--- 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.html
index 7b499bf..f9cda22 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.html
@@ -59,154 +59,148 @@
 051  protected final TableName tableName;
 052  protected final byte[] row;
 053  protected final int replicaId;
-054  protected final static int 
MIN_WAIT_DEAD_SERVER = 1;
-055
-056  public 
RegionAdminServiceCallable(ClusterConnection connection,
-057  RpcControllerFactory 
rpcControllerFactory, TableName tableName, byte[] row) {
-058this(connection, 
rpcControllerFactory, null, tableName, row);
-059  }
-060
-061  public 
RegionAdminServiceCallable(ClusterConnection connection,
-062  RpcControllerFactory 
rpcControllerFactory, HRegionLocation location,
-063  TableName tableName, byte[] row) 
{
-064this(connection, 
rpcControllerFactory, location,
-065  tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
-066  }
-067
-068  public 
RegionAdminServiceCallable(ClusterConnection connection,
-069  RpcControllerFactory 
rpcControllerFactory, HRegionLocation location,
-070  TableName tableName, byte[] row, 
int replicaId) {
-071this.connection = connection;
-072this.rpcControllerFactory = 
rpcControllerFactory;
-073this.location = location;
-074this.tableName = tableName;
-075this.row = row;
-076this.replicaId = replicaId;
-077  }
-078
-079  @Override
-080  public void prepare(boolean reload) 
throws IOException {
-081if (Thread.interrupted()) {
-082  throw new 
InterruptedIOException();
-083}
-084if (reload || location == null) {
-085  location = getLocation(!reload);
-086}
-087if (location == null) {
-088  // With this exception, there will 
be a retry.
-089  throw new 
HBaseIOException(getExceptionMessage());
-090}
-091
this.setStub(connection.getAdmin(location.getServerName()));
-092  }
-093
-094  protected void 
setStub(AdminService.BlockingInterface stub) {
-095this.stub = stub;
-096  }
-097
-098  public HRegionLocation 
getLocation(boolean useCache) throws IOException {
-099RegionLocations rl = 
getRegionLocations(connection, tableName, row, useCache, replicaId);
-100if (rl == null) {
-101  throw new 
HBaseIOException(getExceptionMessage());
-102}
-103HRegionLocation location = 
rl.getRegionLocation(replicaId);
-104if (location == null) {
-105  throw new 
HBaseIOException(getExceptionMessage());
-106}
-107
-108return location;
-109  }
-110
-111  @Override
-112  public void throwable(Throwable t, 
boolean retrying) {
-113if (location != null) {
-114  
connection.updateCachedLocations(tableName, 
location.getRegionInfo().getRegionName(), row,
-115  t, location.getServerName());
-116}
-117  }
-118
-119  /**
-120   * @return {@link Connection} instance 
used by this Callable.
-121   */
-122  Connection getConnection() {
-123return this.connection;
-124  }
-125
-126  //subclasses can override this.
-127  protected String getExceptionMessage() 
{
-128return "There is no location" + " 
table=" + tableName
-129+ " ,replica=" + replicaId + ", 
row=" + Bytes.toStringBinary(row);
-130  }
-131
-132  @Override
-133  public String 
getExceptionMessageAdditionalDetail() {
-134return null;
-135  }
-136
-137  @Override
-138  public long sleep(long pause, int 
tries) {
-139long sleep = 
ConnectionUtils.getPauseTime(pause, tries);
-140if (sleep  MIN_WAIT_DEAD_SERVER
-141 (location == null || 
connection.isDeadServer(location.getServerName( {
-142  sleep = 
ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);
-143}
-144return sleep;
-145  }
-146
-147  public static RegionLocations 
getRegionLocations(
-148  ClusterConnection connection, 
TableName tableName, byte[] row,
-149  boolean useCache, int replicaId)
-150  throws RetriesExhaustedException, 
DoNotRetryIOException, InterruptedIOException {
-151RegionLocations rl;
-152try {
-153  rl = 
connection.locateRegion(tableName, row, useCache, true, replicaId);
-154} catch (DoNotRetryIOException e) {
-155  throw e;
-156} catch (RetriesExhaustedException e) 
{
-157  throw e;
-158} catch (InterruptedIOException e) 
{
-159  throw e;
-160} catch (IOException e) {
-161  throw new 
RetriesExhaustedException("Can't get the location", e);
-162}
-163if (rl == null) {
-164  throw new 
RetriesExhaustedException("Can't get the locations");
-165}
-166return rl;
-167  }
-168

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 049e6fc..eec096e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
+private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
 extends RawAsyncHBaseAdmin.TableProcedureBiConsumer
 
 
@@ -240,7 +240,7 @@ extends 
 
 AddColumnFamilyProcedureBiConsumer
-AddColumnFamilyProcedureBiConsumer(AsyncAdminadmin,
+AddColumnFamilyProcedureBiConsumer(AsyncAdminadmin,
TableNametableName)
 
 
@@ -258,7 +258,7 @@ extends 
 
 getOperationType
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
 
 Specified by:
 getOperationTypein
 classRawAsyncHBaseAdmin.TableProcedureBiConsumer

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
index f3e6d94..8a99fdc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
 title="class or interface in java.lang">@FunctionalInterface
-private static interface RawAsyncHBaseAdmin.AdminRpcCallRESP,REQ
+private static interface RawAsyncHBaseAdmin.AdminRpcCallRESP,REQ
 
 
 
@@ -159,7 +159,7 @@ private static interface 
 
 call
-voidcall(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interfacestub,
+voidcall(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interfacestub,
   HBaseRpcControllercontroller,
   REQreq,
   org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallbackRESPdone)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
index 333d6e6..3c643f0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
 title="class or interface in java.lang">@FunctionalInterface
-private static interface RawAsyncHBaseAdmin.ConverterD,S
+private static interface RawAsyncHBaseAdmin.ConverterD,S
 
 
 
@@ -156,7 +156,7 @@ private static interface 
 
 convert
-Dconvert(Ssrc)
+Dconvert(Ssrc)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
index 521af3d..c4d67c9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
+private class 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
index 0a32350..cf44d69 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
@@ -75,735 +75,796 @@
 067import 
org.apache.hadoop.conf.Configuration;
 068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
 069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.FileSystem;
-071import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-072import org.apache.hadoop.fs.Path;
-073import 
org.apache.hadoop.fs.UnresolvedLinkException;
-074import 
org.apache.hadoop.fs.permission.FsPermission;
-075import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-076import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-077import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-078import 
org.apache.hadoop.hbase.util.FSUtils;
-079import 
org.apache.hadoop.hdfs.DFSClient;
-080import 
org.apache.hadoop.hdfs.DFSOutputStream;
-081import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-082import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-083import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-084import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-085import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-086import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-088import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-092import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-093import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-100import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-102import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-103import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-104import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-105import 
org.apache.hadoop.io.EnumSetWritable;
-106import 
org.apache.hadoop.ipc.RemoteException;
-107import org.apache.hadoop.net.NetUtils;
-108import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-109import 
org.apache.hadoop.security.token.Token;
-110import 
org.apache.hadoop.util.DataChecksum;
-111
-112/**
-113 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-114 */
-115@InterfaceAudience.Private
-116public final class 
FanOutOneBlockAsyncDFSOutputHelper {
-117
-118  private static final Log LOG = 
LogFactory.getLog(FanOutOneBlockAsyncDFSOutputHelper.class);
-119
-120  private 
FanOutOneBlockAsyncDFSOutputHelper() {
-121  }
-122
-123  // use pooled allocator for 
performance.
-124  private static final ByteBufAllocator 
ALLOC = PooledByteBufAllocator.DEFAULT;
-125
-126  // copied from DFSPacket since it is 
package private.
-127  public static final long 
HEART_BEAT_SEQNO = -1L;
-128
-129  // Timeouts for communicating with 
DataNode for streaming writes/reads
-130  public static final int READ_TIMEOUT = 
60 * 1000;
-131  public static final int 
READ_TIMEOUT_EXTENSION = 5 * 1000;
-132  public static final int WRITE_TIMEOUT = 
8 * 60 * 1000;
-133
-134  // helper class for getting Status from 
PipelineAckProto. In hadoop 2.6 or before, there is a
-135  // getStatus method, and for hadoop 2.7 
or after, the status is retrieved from flag. The flag may
-136  // get from proto directly, or combined 
by the reply field of the proto and a ECN object. See
-137  // createPipelineAckStatusGetter for 
more details.
-138  private interface 
PipelineAckStatusGetter {
-139Status 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index 477054a..9ee14b8 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RawAsyncHBaseAdmin
+public class RawAsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -373,24 +373,31 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+clearCompactionQueues(ServerNameserverName,
+ http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringqueues)
+Clear compacting queues on a region server.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 cloneSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
  TableNametableName)
 Create a new table by cloning the snapshot content.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.html
index 463362d..2a43abe 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/AsyncProtobufLogWriter.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class AsyncProtobufLogWriter
+public class AsyncProtobufLogWriter
 extends AbstractProtobufLogWriter
 implements AsyncFSWALProvider.AsyncWriter
 AsyncWriter for protobuf-based WAL.
@@ -169,14 +169,18 @@ implements asyncOutputWrapper
 
 
+private http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends 
io.netty.channel.Channel
+channelClass
+
+
 private io.netty.channel.EventLoop
 eventLoop
 
-
+
 private static 
org.apache.commons.logging.Log
 LOG
 
-
+
 private AsyncFSOutput
 output
 
@@ -202,7 +206,8 @@ implements Constructor and Description
 
 
-AsyncProtobufLogWriter(io.netty.channel.EventLoopeventLoop)
+AsyncProtobufLogWriter(io.netty.channel.EventLoopeventLoop,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends 
io.netty.channel.ChannelchannelClass)
 
 
 
@@ -313,7 +318,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -322,7 +327,16 @@ implements 
 
 eventLoop
-private finalio.netty.channel.EventLoop eventLoop
+private finalio.netty.channel.EventLoop eventLoop
+
+
+
+
+
+
+
+channelClass
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends 
io.netty.channel.Channel channelClass
 
 
 
@@ -331,7 +345,7 @@ implements 
 
 output
-privateAsyncFSOutput output
+privateAsyncFSOutput output
 
 
 
@@ -340,7 +354,7 @@ implements 
 
 asyncOutputWrapper
-privatehttp://docs.oracle.com/javase/8/docs/api/java/io/OutputStream.html?is-external=true;
 title="class or interface in java.io">OutputStream asyncOutputWrapper
+privatehttp://docs.oracle.com/javase/8/docs/api/java/io/OutputStream.html?is-external=true;
 title="class or interface in java.io">OutputStream asyncOutputWrapper
 
 
 
@@ -351,13 +365,14 @@ implements 
+
 
 
 
 
 AsyncProtobufLogWriter
-publicAsyncProtobufLogWriter(io.netty.channel.EventLoopeventLoop)
+publicAsyncProtobufLogWriter(io.netty.channel.EventLoopeventLoop,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends 
io.netty.channel.ChannelchannelClass)
 
 
 
@@ -374,7 +389,7 @@ implements 
 
 append
-publicvoidappend(WAL.Entryentry)
+publicvoidappend(WAL.Entryentry)
 
 Specified by:
 appendin
 interfaceWALProvider.AsyncWriter
@@ -387,7 +402,7 @@ implements 
 
 sync
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Longsync()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Longsync()
 
 Specified by:
 syncin
 interfaceWALProvider.AsyncWriter
@@ -400,7 +415,7 @@ implements 
 
 close
-publicvoidclose()
+publicvoidclose()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -418,7 +433,7 @@ implements 
 
 getOutput
-publicAsyncFSOutputgetOutput()
+publicAsyncFSOutputgetOutput()
 
 
 
@@ -427,7 +442,7 @@ implements 
 
 initOutput
-protectedvoidinitOutput(org.apache.hadoop.fs.FileSystemfs,
+protectedvoidinitOutput(org.apache.hadoop.fs.FileSystemfs,
   org.apache.hadoop.fs.Pathpath,
   booleanoverwritable,
   intbufferSize,
@@ -448,7 +463,7 @@ implements 
 
 write
-privatelongwrite(http://docs.oracle.com/javase/8/docs/api/java/util/function/Consumer.html?is-external=true;
 title="class or interface in java.util.function">Consumerhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(ListServerName servers) throws IOException {
-4039final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public ListServerName 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallableListServerName(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public ListServerName 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064ListServerName servers = 
new ArrayList();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
-4076final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public ListServerName 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallableListServerName(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public ListServerName 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056ListServerName servers = 
new ArrayList();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
+4068executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public ListTableCFs 
listReplicatedTableCFs() throws IOException {
+4079ListTableCFs 
replicatedTableCFs = new ArrayList();
+4080HTableDescriptor[] tables = 
listTables();
+4081for (HTableDescriptor table : 
tables) {
+4082  

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
index 2ff095f..515b2b7 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
@@ -73,600 +73,609 @@
 065  /**
 066   * Gets the {@link 
org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
 067   * @throws java.io.IOException if a 
remote or network exception occurs.
-068   */
-069  HTableDescriptor getTableDescriptor() 
throws IOException;
-070
-071  /**
-072   * Test for the existence of columns in 
the table, as specified by the Get.
-073   * p
-074   *
-075   * This will return true if the Get 
matches one or more keys, false if not.
-076   * p
-077   *
-078   * This is a server-side call so it 
prevents any data from being transfered to
-079   * the client.
-080   *
-081   * @param get the Get
-082   * @return true if the specified Get 
matches one or more keys, false if not
-083   * @throws IOException e
-084   */
-085  boolean exists(Get get) throws 
IOException;
-086
-087  /**
-088   * Test for the existence of columns in 
the table, as specified by the Gets.
-089   * p
-090   *
-091   * This will return an array of 
booleans. Each value will be true if the related Get matches
-092   * one or more keys, false if not.
-093   * p
-094   *
-095   * This is a server-side call so it 
prevents any data from being transferred to
-096   * the client.
-097   *
-098   * @param gets the Gets
-099   * @return Array of boolean.  True if 
the specified Get matches one or more keys, false if not.
-100   * @throws IOException e
-101   */
-102  boolean[] existsAll(ListGet 
gets) throws IOException;
-103
-104  /**
-105   * Method that does a batch call on 
Deletes, Gets, Puts, Increments and Appends.
-106   * The ordering of execution of the 
actions is not defined. Meaning if you do a Put and a
-107   * Get in the same {@link #batch} call, 
you will not necessarily be
-108   * guaranteed that the Get returns what 
the Put had put.
-109   *
-110   * @param actions list of Get, Put, 
Delete, Increment, Append objects
-111   * @param results Empty Object[], same 
size as actions. Provides access to partial
-112   *results, in case an 
exception is thrown. A null in the result array means that
-113   *the call for that 
action failed, even after retries. The order of the objects
-114   *in the results array 
corresponds to the order of actions in the request list.
-115   * @throws IOException
-116   * @since 0.90.0
-117   */
-118  void batch(final List? extends 
Row actions, final Object[] results) throws IOException,
-119InterruptedException;
-120
-121  /**
-122   * Same as {@link #batch(List, 
Object[])}, but with a callback.
-123   * @since 0.96.0
-124   */
-125  R void batchCallback(
-126final List? extends Row 
actions, final Object[] results, final Batch.CallbackR callback
-127  )
-128throws IOException, 
InterruptedException;
+068   * @deprecated since 2.0 version and 
will be removed in 3.0 version.
+069   * use {@link 
#getDescriptor()}
+070   */
+071  @Deprecated
+072  HTableDescriptor getTableDescriptor() 
throws IOException;
+073
+074  /**
+075   * Gets the {@link 
org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this 
table.
+076   * @throws java.io.IOException if a 
remote or network exception occurs.
+077   */
+078  TableDescriptor getDescriptor() throws 
IOException;
+079
+080  /**
+081   * Test for the existence of columns in 
the table, as specified by the Get.
+082   * p
+083   *
+084   * This will return true if the Get 
matches one or more keys, false if not.
+085   * p
+086   *
+087   * This is a server-side call so it 
prevents any data from being transfered to
+088   * the client.
+089   *
+090   * @param get the Get
+091   * @return true if the specified Get 
matches one or more keys, false if not
+092   * @throws IOException e
+093   */
+094  boolean exists(Get get) throws 
IOException;
+095
+096  /**
+097   * Test for the existence of columns in 
the table, as specified by the Gets.
+098   * p
+099   *
+100   * This will return an array of 
booleans. Each value will be true if the related Get matches
+101   * one or more keys, false if not.
+102   * p
+103   *
+104   * This is a server-side call so it 
prevents any data from being transferred to
+105   * the client.
+106   *
+107   * @param gets the Gets
+108   * @return Array of boolean.  True if 
the specified Get matches one or more keys, false if not.
+109   * @throws IOException e
+110   */
+111  boolean[] existsAll(ListGet 
gets) throws IOException;
+112
+113  /**
+114   * Method that does a batch call on 
Deletes, Gets, 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index c6e6a70..d82e120 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -692,20 +692,20 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
 org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
 org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.ScanType
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.RegionOpeningState
 org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
+org.apache.hadoop.hbase.regionserver.RegionOpeningState
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
 org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
+org.apache.hadoop.hbase.regionserver.ScanType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 09fbcc3..9502da9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -125,10 +125,10 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
 org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteCompare
-org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
-org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
+org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
index fb9c3fe..eaf6241 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
@@ -214,8 +214,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
index 6de29a3..a8396c1 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
@@ -695,27 +695,31 @@
 SegmentFactory.createImmutableSegment(CellComparatorcomparator)
 
 
-ImmutableSegment
-SegmentFactory.createImmutableSegment(org.apache.hadoop.conf.Configurationconf,
+private ImmutableSegment
+SegmentFactory.createImmutableSegment(org.apache.hadoop.conf.Configurationconf,
   CellComparatorcomparator,
-  MemStoreSegmentsIteratoriterator)
+  MemStoreSegmentsIteratoriterator,
+  MemStoreLABmemStoreLAB,
+  intnumOfCells,
+  MemStoreCompactor.Actionaction,
+  CompactingMemStore.IndexTypeidxType)
 
 
 ImmutableSegment
-SegmentFactory.createImmutableSegmentByCompaction(org.apache.hadoop.conf.Configurationconf,
+SegmentFactory.createImmutableSegmentByCompaction(org.apache.hadoop.conf.Configurationconf,
   CellComparatorcomparator,
   MemStoreSegmentsIteratoriterator,
   intnumOfCells,
-  ImmutableSegment.TypesegmentType)
+  CompactingMemStore.IndexTypeidxType)
 
 
 ImmutableSegment
-SegmentFactory.createImmutableSegmentByMerge(org.apache.hadoop.conf.Configurationconf,
+SegmentFactory.createImmutableSegmentByMerge(org.apache.hadoop.conf.Configurationconf,
  CellComparatorcomparator,
  MemStoreSegmentsIteratoriterator,
  intnumOfCells,
- ImmutableSegment.TypesegmentType,
- http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListImmutableSegmentsegments)
+ http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListImmutableSegmentsegments,
+ CompactingMemStore.IndexTypeidxType)
 
 
 MutableSegment
@@ -788,6 +792,28 @@
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listbyte[]targetBoundaries)
 
 
+CellArrayImmutableSegment(CellComparatorcomparator,
+ MemStoreSegmentsIteratoriterator,
+ MemStoreLABmemStoreLAB,
+ intnumOfCells,
+ MemStoreCompactor.Actionaction)
+
+ C-tor to be used when new CellArrayImmutableSegment is a result of compaction 
of a
+ list of older ImmutableSegments.
+
+
+
+CellChunkImmutableSegment(CellComparatorcomparator,
+ MemStoreSegmentsIteratoriterator,
+ MemStoreLABmemStoreLAB,
+ intnumOfCells,
+ MemStoreCompactor.Actionaction)
+
+ C-tor to be used when new CellChunkImmutableSegment is built as a result of 
compaction/merge
+ of a list of older ImmutableSegments.
+
+
+
 CellSet(CellComparatorc)
 
 
@@ -820,63 +846,50 @@
 
 
 
-ImmutableSegment(CellComparatorcomparator,
-MemStoreSegmentsIteratoriterator,
+ImmutableSegment(CellSetcs,
+CellComparatorcomparator,
 MemStoreLABmemStoreLAB)
 
- C-tor to be used when new SKIP-LIST BASED ImmutableSegment is a result of 
compaction of a
- list of older ImmutableSegments.
+ C-tor to be used to build the derived classes
 
 
 
-ImmutableSegment(CellComparatorcomparator,
-MemStoreSegmentsIteratoriterator,
-MemStoreLABmemStoreLAB,
-intnumOfCells,
-ImmutableSegment.Typetype,
-booleanmerge)
-
- C-tor to be used when new CELL_ARRAY BASED ImmutableSegment is a result of 
compaction of a
- list of older ImmutableSegments.
-
-
-
 KeyValueHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners,
 CellComparatorcomparator)
 Constructor.
 
 
-
+
 KVScannerComparator(CellComparatorkvComparator)
 Constructor
 
 
-

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-105import 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index b60c322..f422fb0 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -701,14 +701,14 @@ service.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
-AsyncMetaTableAccessor.getTableRegionsAndLocations(RawAsyncTablemetaTable,
-   http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableNametableName)
-Used to get table regions' info and server.
+static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocation
+AsyncMetaTableAccessor.getTableHRegionLocations(RawAsyncTablemetaTable,
+http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableNametableName)
+Used to get all region locations for the specific 
table.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
+private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
 AsyncMetaTableAccessor.getTableRegionsAndLocations(RawAsyncTablemetaTable,
http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableNametableName,
booleanexcludeOfflinedSplitParents)
@@ -2282,6 +2282,24 @@ service.
 AsyncHBaseAdmin.checkRegionsAndGetTableName(byte[]encodeRegionNameA,
byte[]encodeRegionNameB)
 
+
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableName
+AsyncAdmin.listTableNames()
+List all of the names of userspace tables.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableName
+AsyncHBaseAdmin.listTableNames(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,
+  booleanincludeSysTables)
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableName
+AsyncAdmin.listTableNames(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,
+  booleanincludeSysTables)
+List all of the names of userspace tables.
+
+
 
 
 
@@ -2520,32 +2538,23 @@ service.
   TableNametableName)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
index 49714a2..d0f1508 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
@@ -172,1438 +172,1562 @@
 164MapServerName, 
ListHRegionInfo clusterState;
 165
 166protected final RackManager 
rackManager;
-167
-168protected Cluster(
-169MapServerName, 
ListHRegionInfo clusterState,
-170MapString, 
DequeBalancerRegionLoad loads,
-171RegionLocationFinder 
regionFinder,
-172RackManager rackManager) {
-173  this(null, clusterState, loads, 
regionFinder, rackManager);
-174}
-175
-176@SuppressWarnings("unchecked")
-177protected Cluster(
-178CollectionHRegionInfo 
unassignedRegions,
-179MapServerName, 
ListHRegionInfo clusterState,
-180MapString, 
DequeBalancerRegionLoad loads,
-181RegionLocationFinder 
regionFinder,
-182RackManager rackManager) {
-183
-184  if (unassignedRegions == null) {
-185unassignedRegions = 
EMPTY_REGION_LIST;
-186  }
+167// Maps region - rackIndex - 
locality of region on rack
+168private float[][] rackLocalities;
+169// Maps localityType - region 
- [server|rack]Index with highest locality
+170private int[][] 
regionsToMostLocalEntities;
+171
+172protected Cluster(
+173MapServerName, 
ListHRegionInfo clusterState,
+174MapString, 
DequeBalancerRegionLoad loads,
+175RegionLocationFinder 
regionFinder,
+176RackManager rackManager) {
+177  this(null, clusterState, loads, 
regionFinder, rackManager);
+178}
+179
+180@SuppressWarnings("unchecked")
+181protected Cluster(
+182CollectionHRegionInfo 
unassignedRegions,
+183MapServerName, 
ListHRegionInfo clusterState,
+184MapString, 
DequeBalancerRegionLoad loads,
+185RegionLocationFinder 
regionFinder,
+186RackManager rackManager) {
 187
-188  serversToIndex = new 
HashMap();
-189  hostsToIndex = new 
HashMap();
-190  racksToIndex = new 
HashMap();
-191  tablesToIndex = new 
HashMap();
-192
-193  //TODO: We should get the list of 
tables from master
-194  tables = new ArrayList();
-195  this.rackManager = rackManager != 
null ? rackManager : new DefaultRackManager();
+188  if (unassignedRegions == null) {
+189unassignedRegions = 
EMPTY_REGION_LIST;
+190  }
+191
+192  serversToIndex = new 
HashMap();
+193  hostsToIndex = new 
HashMap();
+194  racksToIndex = new 
HashMap();
+195  tablesToIndex = new 
HashMap();
 196
-197  numRegions = 0;
-198
-199  ListListInteger 
serversPerHostList = new ArrayList();
-200  ListListInteger 
serversPerRackList = new ArrayList();
-201  this.clusterState = clusterState;
-202  this.regionFinder = regionFinder;
-203
-204  // Use servername and port as there 
can be dead servers in this list. We want everything with
-205  // a matching hostname and port to 
have the same index.
-206  for (ServerName sn : 
clusterState.keySet()) {
-207if (sn == null) {
-208  LOG.warn("TODO: Enable TRACE on 
BaseLoadBalancer. Empty servername); " +
-209  "skipping; unassigned 
regions?");
-210  if (LOG.isTraceEnabled()) {
-211LOG.trace("EMPTY SERVERNAME " 
+ clusterState.toString());
-212  }
-213  continue;
-214}
-215if 
(serversToIndex.get(sn.getAddress().toString()) == null) {
-216  
serversToIndex.put(sn.getHostAndPort(), numServers++);
-217}
-218if 
(!hostsToIndex.containsKey(sn.getHostname())) {
-219  
hostsToIndex.put(sn.getHostname(), numHosts++);
-220  serversPerHostList.add(new 
ArrayList(1));
+197  //TODO: We should get the list of 
tables from master
+198  tables = new ArrayList();
+199  this.rackManager = rackManager != 
null ? rackManager : new DefaultRackManager();
+200
+201  numRegions = 0;
+202
+203  ListListInteger 
serversPerHostList = new ArrayList();
+204  ListListInteger 
serversPerRackList = new ArrayList();
+205  this.clusterState = clusterState;
+206  this.regionFinder = regionFinder;
+207
+208  // Use servername and port as there 
can be dead servers in this list. We want everything with
+209  // a matching hostname and port to 
have the same index.
+210  for (ServerName sn : 
clusterState.keySet()) {
+211if (sn == 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index 89e07b5..9299946 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -127,9 +127,12 @@
 org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore
 org.apache.hadoop.hbase.quotas.QuotaCache.QuotaRefresherChore
 org.apache.hadoop.hbase.quotas.QuotaObserverChore
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore
 org.apache.hadoop.hbase.quotas.SpaceQuotaRefresherChore
 
 
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.SnapshotWithSize
+org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.StoreFileReference
 org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot
 org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus
 org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory
@@ -198,13 +201,13 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.quotas.QuotaSnapshotStore.ViolationState
-org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
 org.apache.hadoop.hbase.quotas.ThrottlingException.Type
-org.apache.hadoop.hbase.quotas.ThrottleType
-org.apache.hadoop.hbase.quotas.QuotaScope
 org.apache.hadoop.hbase.quotas.QuotaType
+org.apache.hadoop.hbase.quotas.QuotaScope
 org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
+org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
+org.apache.hadoop.hbase.quotas.QuotaSnapshotStore.ViolationState
+org.apache.hadoop.hbase.quotas.ThrottleType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/quotas/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-use.html
index a4f61de..9ef8639 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-use.html
@@ -157,11 +157,17 @@
 
 
 
+SnapshotQuotaObserverChore
+A Master-invoked Chore that computes the size 
of each snapshot which was created from
+ a table which has a space quota.
+
+
+
 SpaceQuotaSnapshot
 A point-in-time view of a space quota on a table.
 
 
-
+
 SpaceQuotaSnapshotNotifier
 An interface which abstract away the action taken to enable 
or disable
  a space quota violation policy across the HBase cluster.
@@ -329,6 +335,16 @@
 
 
 
+SnapshotQuotaObserverChore.SnapshotWithSize
+A struct encapsulating the name of a snapshot and its 
"size" on the filesystem.
+
+
+
+SnapshotQuotaObserverChore.StoreFileReference
+A reference to a collection of files in the archive 
directory for a single region.
+
+
+
 SpaceLimitingException
 An Exception that is thrown when a space quota is in 
violation.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
index e2832b3..51c5c1b 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
@@ -472,7 +472,7 @@ extends HStore
-add,
 add,
 addChangedReaderObserver,
 areWritesEnabled,
 assertBulkLoadHFileOk,
 bulkLoadHFile,
 bulkLoadHFile,
 cancelRequestedCompaction,
 canSplit,
 close,
 closeAndArchiveCompactedFiles,
 compact,
 compact,
 compactRecentForTestingAssumingDefaultPolicy,
 completeCompaction,
 createFlushContext,
 createWriterInTmp,
 createWriterInTmp,
 createWriterInTmp,
 deleteChangedReaderObserver,
 deregisterChildren,
 determineTTLFromFamily,
 flushCache, getAvgStoreFileAge,
 getBlockingFileCount,
 getBytesPerChecksum,
 getCacheConfig,
 getChecksumType,
 getCloseCheckInterval,
 getColumnFamilyName,
 getCompactedCellsCount,
 getCompactedCellsSize,
 getCompactionCheckMultiplier,
 getCompactionPressure,
 getCompactionProgress,
 getCompactPriority,
 getComparator,
 getCoprocessorHost,
 getDataBlockEncoder,
 getFamily,
 getFileSystem,
 getFlushableSize,
 getFlushedCellsCount,
 getFlushedCellsSize,
 getFlushedOutputFileSize,
 getHRegion,
 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
new file mode 100644
index 000..339b982
--- /dev/null
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
@@ -0,0 +1,1419 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package org.apache.hadoop.hbase.client;
+019
+020import 
com.google.common.base.Preconditions;
+021import java.io.IOException;
+022import java.util.Collections;
+023import java.util.HashMap;
+024import java.util.HashSet;
+025import java.util.Map;
+026import java.util.Set;
+027import java.util.function.Function;
+028import 
org.apache.hadoop.hbase.HConstants;
+029import 
org.apache.hadoop.hbase.KeepDeletedCells;
+030import 
org.apache.hadoop.hbase.MemoryCompactionPolicy;
+031import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+032import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+033import 
org.apache.hadoop.hbase.exceptions.HBaseException;
+034import 
org.apache.hadoop.hbase.io.compress.Compression;
+035import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+036import 
org.apache.hadoop.hbase.regionserver.BloomType;
+037import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema;
+039import 
org.apache.hadoop.hbase.util.Bytes;
+040import 
org.apache.hadoop.hbase.util.PrettyPrinter;
+041import 
org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
+042
+043@InterfaceAudience.Public
+044public class 
ColumnFamilyDescriptorBuilder {
+045  // For future backward compatibility
+046
+047  // Version  3 was when column names 
become byte arrays and when we picked up
+048  // Time-to-live feature.  Version 4 was 
when we moved to byte arrays, HBASE-82.
+049  // Version  5 was when bloom filter 
descriptors were removed.
+050  // Version  6 adds metadata as a map 
where keys and values are byte[].
+051  // Version  7 -- add new compression 
and hfile blocksize to HColumnDescriptor (HBASE-1217)
+052  // Version  8 -- reintroduction of 
bloom filters, changed from boolean to enum
+053  // Version  9 -- add data block 
encoding
+054  // Version 10 -- change metadata to 
standard type.
+055  // Version 11 -- add column family 
level configuration.
+056  private static final byte 
COLUMN_DESCRIPTOR_VERSION = (byte) 11;
+057
+058  @InterfaceAudience.Private
+059  public static final String 
IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION";
+060  private static final Bytes 
IN_MEMORY_COMPACTION_BYTES = new Bytes(Bytes.toBytes(IN_MEMORY_COMPACTION));
+061
+062  @InterfaceAudience.Private
+063  public static final String IN_MEMORY = 
HConstants.IN_MEMORY;
+064  private static final Bytes 
IN_MEMORY_BYTES = new Bytes(Bytes.toBytes(IN_MEMORY));
+065
+066  // These constants are used as FileInfo 
keys
+067  @InterfaceAudience.Private
+068  public static final String COMPRESSION 
= "COMPRESSION";
+069  private static final Bytes 
COMPRESSION_BYTES = new Bytes(Bytes.toBytes(COMPRESSION));
+070  @InterfaceAudience.Private
+071  public static final String 
COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
+072  private static final Bytes 
COMPRESSION_COMPACT_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT));
+073  @InterfaceAudience.Private
+074  public static final String 
DATA_BLOCK_ENCODING = "DATA_BLOCK_ENCODING";
+075  private static final Bytes 
DATA_BLOCK_ENCODING_BYTES = new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING));
+076  /**
+077   * Key for the BLOCKCACHE attribute. A 
more exact name would be
+078   * CACHE_DATA_ON_READ because this flag 
sets whether or not we cache DATA
+079   * blocks. We always cache INDEX and 
BLOOM blocks; caching these blocks cannot
+080   * be disabled.
+081   */
+082  

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/apidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.html 
b/apidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.html
index 219dcd6..40ddef4 100644
--- a/apidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.html
+++ b/apidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.html
@@ -4,7 +4,7 @@
 
 
 
-TsvImporterTextMapper (Apache HBase 2.0.0-SNAPSHOT API)
+TsvImporterTextMapper (Apache HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
-VisibilityExpressionResolver (Apache HBase 2.0.0-SNAPSHOT API)
+VisibilityExpressionResolver (Apache HBase 3.0.0-SNAPSHOT API)