[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
index ec8d970..78ff5df 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
@@ -332,7 +332,7 @@ implements MetricsRegionServerSource
-APPEND_KEY,
 AVERAGE_REGION_SIZE,
 AVERAGE_REGION_SIZE_DESC,
 AVG_STORE_FILE_AGE,
 AVG_STORE_FILE_AGE_DESC,
 BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT,
 BLOCK_CAC
 HE_BLOOM_CHUNK_MISS_COUNT, BLOCK_CACHE_COUNT,
 BLOCK_CACHE_COUNT_DESC,
 BLOCK_CACHE_DATA_HIT_COUNT,
 BLOCK_CACHE_DATA_MISS_COUNT,
 BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT,
 BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT,
 BLOCK_CACHE_ENCODED_DATA_HIT_COUNT,
 BLOCK_CACHE_ENCODED_DATA_MISS_COUNT,
 BLOCK_CACHE_EVICTION_COUNT,
 BLOCK_CACHE_EVICTION_COUNT_DESC,
 BLOCK_CACHE_EXPRESS_HIT_PERCENT,
 BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC,
 BLOCK_CACHE_FAILED_INSERTION_COUNT,
 BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC,
 BLOCK_CACHE_FILE_INFO_HIT_COUNT,
 BLOCK_CACHE_FILE_INFO_MISS_COUNT,
 BLOCK_CACHE_FREE_DESC,
 BLOCK_CACHE_FREE_SIZE,
 BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT,
 BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT,
 BLOCK_CACHE_HIT_COUNT,
 BLOCK_CACHE_HIT_COUNT_DESC,
 BLOCK_CACHE_HIT_PERCENT,
 BLOCK_CACHE_HIT_PERCENT_DESC,
 BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT,
 BLOCK_CACHE_LEAF_INDEX_HIT_COUNT,
 BLOCK_CACHE_LEAF_INDEX_MISS_COUNT,
 BLOCK_CACHE_META_HIT_COUNT,
 BLOCK_CACHE_META_MISS_COUNT,
 BLOCK_CACHE_MISS_COUNT, BLOCK_CACHE_PRIMARY_EVICTION_COUNT,
 BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC,
 BLOCK_CACHE_PRIMARY_HIT_COUNT,
 BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC,
 BLOCK_CACHE_PRIMARY_MISS_COUNT,
 B
 LOCK_CACHE_ROOT_INDEX_HIT_COUNT, BLOCK_CACHE_ROOT_INDEX_MISS_COUNT,
 BLOCK_CACHE_SIZE,
 BLOCK_CACHE_SIZE_DESC,
 BLOCK_CACHE_TRAILER_HIT_COUNT,
 BLOCK_CACHE_TRAILER_MISS_COUNT,
 BLOCK_COUNT_MISS_COUNT_DESC,
 BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC,
 BLOCKED_REQUESTS_COUNT,
 BLOCKED_REQUESTS_COUNT_DESC,
 CELLS_COUNT_COMPACTED_FROM_MOB,
 CELLS_COUNT_COMPACTED_FROM_MOB_DESC,
 CELLS_COUNT_COMPACTED_TO_MOB,
 CELLS_COUNT_COMPACTED_TO_MOB_DESC, CELLS_SIZE_COMPACTED_FROM_MOB,
 CELLS_SIZE_COMPACTED_FROM_MOB_DESC,
 CELLS_SIZE_COMPACTED_TO_MOB,
 CELLS_SIZE_COMPACTED_TO_MOB_DESC,
 CHECK_AND_DELETE_KEY,
 CHECK_AND_PUT_KEY,
 CHECK_MUTATE_FAILED_COUNT,
 CHECK_MUTATE_FAILED_COUNT_DESC,
 CHECK_MUTATE_PASSED_COUNT,
 CHECK_MUTATE_PASSED_COUNT_DESC,
 CLUSTER_ID_DESC,
 CLUSTER_ID_NAME,
 COMPACTED_CE
 LLS, COMPACTED_CELLS_DESC,
 COMPACTED_CELLS_SIZE,
 COMPACTED_CELLS_SIZE_DESC,
 COMPACTED_INPUT_BYTES,
 COMPACTED_INPUT_BYTES_DESC,
 COMPACTED_OUTPUT_BYTES,
 COMPACTED_OUTPUT_BYTES_DESC, href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_FILE_COUNT">COMPACTION_INPUT_FILE_COUNT,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_FILE_COUNT_DESC">COMPACTION_INPUT_FILE_COUNT_DESC,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_SIZE">COMPACTION_INPUT_SIZE,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_SIZE_DESC">COMPACTION_INPUT_SIZE_DESC,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_OUTPUT_FILE_COUNT">COMPACTION_OUTPUT_FILE_COUNT,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_OUTPUT_FILE_COUNT_DESC">COMPACTION_OUTPUT_FILE_COUNT_DESC,
 > COMPACTION_OUTPUT_SIZE,
 COMPACTION_OUTPUT_SIZE_DESC,
 COMPACTION_QUEUE_LENGTH,
 COMPACTION_QUEUE_LENGTH_DESC,
 COMPACTION_TIME,
 COMPACTION_TIME_DESC,
 DATA_SIZE_WITHOUT_WAL,
 DATA_SIZE_WITHOUT_WAL_DESC,
 DELETE_BATCH_KEY,
 DELETE_KEY,
 FILTERED_READ_REQUEST_COUNT,
 FILTERED_READ_REQUEST_COUNT_DESC,
 FLUSH_MEMSTORE_SIZE,
 FLUSH_MEMSTORE_SIZE_DESC,
 FLUSH_OUTPUT_SIZE,
 FLUSH_OUTPUT_SIZE_DESC,
 FLUSH_QUEUE_LENGTH,
 FLUSH_QUEUE_LENGTH_DESC,
 FLUSH_TIME,
 FLUSH_TIME_DESC,
 FLUSHED_CELLS,
 FLUSHED_CELLS_DESC, FLUSHED_CELLS_SIZE,
 FLUSHED_CELLS_SIZE_DESC,
 FLUSHED_MEMSTORE_BYTES,
 FLUSHED_MEMSTORE_BYTES_DESC,
 FLUSHED_OUTPUT_BYTES,
 FLUSHED_OUTPUT_BYTES_DESC,
 GET_KEY, GET_SIZE_KEY,
 HEDGED_READ_WINS,
 HEDGED_READ_WINS_DESC,
 HEDGED_READS,
 HEDGED_READS_DESC,
 INCREMENT_KEY,
 LARGE_COMPACTION_QUEUE_LENGTH
 , LARGE_COMPACTION_QUEUE_LENGTH_DESC,
 MAJOR_COMPACTED_CELLS,
 MAJOR_COMPACTED_CELLS_DESC,
 MAJOR_COMPACTED_CELLS_SIZE,
 MAJOR_COMPACTED_CELLS_SIZE_DESC,
 MAJOR_COMPACTED_INPUT_BYTES,
 MAJOR_COMPACTED_INPUT_BYTES_DESC,
 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/class-use/KeyValue.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/KeyValue.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/KeyValue.html
index 24b9f39..fb2126c 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/KeyValue.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/KeyValue.html
@@ -201,22 +201,22 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static KeyValue
-KeyValue.create(http://docs.oracle.com/javase/8/docs/api/java/io/DataInput.html?is-external=true;
 title="class or interface in java.io">DataInputin)
+KeyValueUtil.create(http://docs.oracle.com/javase/8/docs/api/java/io/DataInput.html?is-external=true;
 title="class or interface in java.io">DataInputin)
 
 
 static KeyValue
-KeyValueUtil.create(http://docs.oracle.com/javase/8/docs/api/java/io/DataInput.html?is-external=true;
 title="class or interface in java.io">DataInputin)
+KeyValue.create(http://docs.oracle.com/javase/8/docs/api/java/io/DataInput.html?is-external=true;
 title="class or interface in java.io">DataInputin)
 
 
 static KeyValue
-KeyValue.create(intlength,
+KeyValueUtil.create(intlength,
   http://docs.oracle.com/javase/8/docs/api/java/io/DataInput.html?is-external=true;
 title="class or interface in java.io">DataInputin)
 Create a KeyValue reading length from 
in
 
 
 
 static KeyValue
-KeyValueUtil.create(intlength,
+KeyValue.create(intlength,
   http://docs.oracle.com/javase/8/docs/api/java/io/DataInput.html?is-external=true;
 title="class or interface in java.io">DataInputin)
 Create a KeyValue reading length from 
in
 
@@ -332,31 +332,31 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static KeyValue
-KeyValue.createKeyValueFromKey(byte[]b)
+KeyValueUtil.createKeyValueFromKey(byte[]b)
 
 
 static KeyValue
-KeyValueUtil.createKeyValueFromKey(byte[]b)
+KeyValue.createKeyValueFromKey(byte[]b)
 
 
 static KeyValue
-KeyValue.createKeyValueFromKey(byte[]b,
+KeyValueUtil.createKeyValueFromKey(byte[]b,
  into,
  intl)
 
 
 static KeyValue
-KeyValueUtil.createKeyValueFromKey(byte[]b,
+KeyValue.createKeyValueFromKey(byte[]b,
  into,
  intl)
 
 
 static KeyValue
-KeyValue.createKeyValueFromKey(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in 
java.nio">ByteBufferbb)
+KeyValueUtil.createKeyValueFromKey(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in 
java.nio">ByteBufferbb)
 
 
 static KeyValue
-KeyValueUtil.createKeyValueFromKey(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in 
java.nio">ByteBufferbb)
+KeyValue.createKeyValueFromKey(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in 
java.nio">ByteBufferbb)
 
 
 static KeyValue
@@ -526,17 +526,17 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static long
-KeyValue.write(KeyValuekv,
+KeyValueUtil.write(KeyValuekv,
  http://docs.oracle.com/javase/8/docs/api/java/io/DataOutput.html?is-external=true;
 title="class or interface in java.io">DataOutputout)
-Write out a KeyValue in the manner in which we used to when 
KeyValue was a Writable.
+Write out a KeyValue in the manner in which we used to when 
KeyValue was a
+ Writable.
 
 
 
 static long
-KeyValueUtil.write(KeyValuekv,
+KeyValue.write(KeyValuekv,
  http://docs.oracle.com/javase/8/docs/api/java/io/DataOutput.html?is-external=true;
 title="class or interface in java.io">DataOutputout)
-Write out a KeyValue in the manner in which we used to when 
KeyValue was a
- Writable.
+Write out a KeyValue in the manner in which we used to when 
KeyValue was a Writable.
 
 
 
@@ -747,38 +747,28 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorStoreFile
-StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
-See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
- for details on this methods.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorStoreFile
 DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorStoreFile
 StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
 Gets initial, full list of candidate store files to check 
for 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
index f355960..13d9b4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
@@ -360,478 +360,480 @@
 352
 353  @Override
 354  public void requestFlush(Region r, 
boolean forceFlushAllStores) {
-355synchronized (regionsInQueue) {
-356  if (!regionsInQueue.containsKey(r)) 
{
-357// This entry has no delay so it 
will be added at the top of the flush
-358// queue.  It'll come out near 
immediately.
-359FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-360this.regionsInQueue.put(r, 
fqe);
-361this.flushQueue.add(fqe);
-362  }
-363}
-364  }
-365
-366  @Override
-367  public void requestDelayedFlush(Region 
r, long delay, boolean forceFlushAllStores) {
-368synchronized (regionsInQueue) {
-369  if (!regionsInQueue.containsKey(r)) 
{
-370// This entry has some delay
-371FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-372fqe.requeue(delay);
-373this.regionsInQueue.put(r, 
fqe);
-374this.flushQueue.add(fqe);
-375  }
-376}
-377  }
-378
-379  public int getFlushQueueSize() {
-380return flushQueue.size();
-381  }
-382
-383  /**
-384   * Only interrupt once it's done with a 
run through the work loop.
-385   */
-386  void interruptIfNecessary() {
-387lock.writeLock().lock();
-388try {
-389  for (FlushHandler flushHander : 
flushHandlers) {
-390if (flushHander != null) 
flushHander.interrupt();
-391  }
-392} finally {
-393  lock.writeLock().unlock();
-394}
-395  }
-396
-397  synchronized void 
start(UncaughtExceptionHandler eh) {
-398ThreadFactory flusherThreadFactory = 
Threads.newDaemonThreadFactory(
-399
server.getServerName().toShortString() + "-MemStoreFlusher", eh);
-400for (int i = 0; i  
flushHandlers.length; i++) {
-401  flushHandlers[i] = new 
FlushHandler("MemStoreFlusher." + i);
-402  
flusherThreadFactory.newThread(flushHandlers[i]);
-403  flushHandlers[i].start();
-404}
-405  }
-406
-407  boolean isAlive() {
-408for (FlushHandler flushHander : 
flushHandlers) {
-409  if (flushHander != null  
flushHander.isAlive()) {
-410return true;
-411  }
-412}
-413return false;
-414  }
-415
-416  void join() {
-417for (FlushHandler flushHander : 
flushHandlers) {
-418  if (flushHander != null) {
-419
Threads.shutdown(flushHander.getThread());
-420  }
-421}
-422  }
-423
-424  /**
-425   * A flushRegion that checks store file 
count.  If too many, puts the flush
-426   * on delay queue to retry later.
-427   * @param fqe
-428   * @return true if the region was 
successfully flushed, false otherwise. If
-429   * false, there will be accompanying 
log messages explaining why the region was
-430   * not flushed.
-431   */
-432  private boolean flushRegion(final 
FlushRegionEntry fqe) {
-433Region region = fqe.region;
-434if 
(!region.getRegionInfo().isMetaRegion() 
-435isTooManyStoreFiles(region)) {
-436  if 
(fqe.isMaximumWait(this.blockingWaitTime)) {
-437LOG.info("Waited " + 
(EnvironmentEdgeManager.currentTime() - fqe.createTime) +
-438  "ms on a compaction to clean up 
'too many store files'; waited " +
-439  "long enough... proceeding with 
flush of " +
-440  
region.getRegionInfo().getRegionNameAsString());
-441  } else {
-442// If this is first time we've 
been put off, then emit a log message.
-443if (fqe.getRequeueCount() = 
0) {
-444  // Note: We don't impose 
blockingStoreFiles constraint on meta regions
-445  LOG.warn("Region " + 
region.getRegionInfo().getRegionNameAsString() + " has too many " +
-446"store files; delaying flush 
up to " + this.blockingWaitTime + "ms");
-447  if 
(!this.server.compactSplitThread.requestSplit(region)) {
-448try {
-449  
this.server.compactSplitThread.requestSystemCompaction(
-450  region, 
Thread.currentThread().getName());
-451} catch (IOException e) {
-452  e = e instanceof 
RemoteException ?
-453  
((RemoteException)e).unwrapRemoteException() : e;
-454  LOG.error("Cache flush 
failed for region " +
-455
Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
-456}
-457  }

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
index cbae725..d1ac69f 100644
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ZKUtil
+public class ZKUtil
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Internal HBase utility class for ZooKeeper.
 
@@ -757,7 +757,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -766,7 +766,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ZNODE_PATH_SEPARATOR
-public static finalchar ZNODE_PATH_SEPARATOR
+public static finalchar ZNODE_PATH_SEPARATOR
 
 See Also:
 Constant
 Field Values
@@ -779,7 +779,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 zkDumpConnectionTimeOut
-private staticint zkDumpConnectionTimeOut
+private staticint zkDumpConnectionTimeOut
 
 
 
@@ -796,7 +796,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ZKUtil
-publicZKUtil()
+publicZKUtil()
 
 
 
@@ -813,7 +813,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 connect
-public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,
+public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,

org.apache.zookeeper.Watcherwatcher)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Creates a new connection to ZooKeeper, pulling settings and 
ensemble config
@@ -837,7 +837,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 connect
-public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,
+public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringensemble,

org.apache.zookeeper.Watcherwatcher)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -853,7 +853,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 connect
-public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,
+public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringensemble,

org.apache.zookeeper.Watcherwatcher,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringidentifier)
@@ -870,7 +870,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 loginServer
-public staticvoidloginServer(org.apache.hadoop.conf.Configurationconf,
+public staticvoidloginServer(org.apache.hadoop.conf.Configurationconf,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringkeytabFileKey,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserNameKey,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname)
@@ -898,7 +898,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 loginClient
-public staticvoidloginClient(org.apache.hadoop.conf.Configurationconf,
+public staticvoidloginClient(org.apache.hadoop.conf.Configurationconf,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringkeytabFileKey,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
index 826e912..9f31bd3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -51,1973 +51,1976 @@
 043import 
org.apache.hadoop.hbase.TableName;
 044import 
org.apache.hadoop.hbase.UnknownRegionException;
 045import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-046import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-047import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-048import 
org.apache.hadoop.hbase.client.TableState;
-049import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-050import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-051import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
-052import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-053import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-054import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-055import 
org.apache.hadoop.hbase.ipc.QosPriority;
-056import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-057import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-058import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-059import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-060import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-061import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
-062import 
org.apache.hadoop.hbase.mob.MobUtils;
-063import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
-064import 
org.apache.hadoop.hbase.procedure2.LockInfo;
-065import 
org.apache.hadoop.hbase.procedure2.Procedure;
-066import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-067import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-068import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-069import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-070import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-071import 
org.apache.hadoop.hbase.replication.ReplicationException;
-072import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-073import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-074import 
org.apache.hadoop.hbase.security.User;
-075import 
org.apache.hadoop.hbase.security.access.AccessController;
-076import 
org.apache.hadoop.hbase.security.visibility.VisibilityController;
-077import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-078import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-079import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.*;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.*;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
-100import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.CoprocessorOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.CoprocessorOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.CoprocessorOperation.html
index e690c2d..ec75aa9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.CoprocessorOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.CoprocessorOperation.html
@@ -818,1092 +818,1070 @@
 810});
 811  }
 812
-813  public void preDispatchMerge(final 
HRegionInfo regionInfoA, final HRegionInfo regionInfoB)
-814  throws IOException {
+813  public void preMergeRegions(final 
HRegionInfo[] regionsToMerge)
+814  throws IOException {
 815execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
 816  @Override
 817  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
 818  throws IOException {
-819oserver.preDispatchMerge(ctx, 
regionInfoA, regionInfoB);
+819oserver.preMergeRegions(ctx, 
regionsToMerge);
 820  }
 821});
 822  }
 823
-824  public void postDispatchMerge(final 
HRegionInfo regionInfoA, final HRegionInfo regionInfoB)
-825  throws IOException {
+824  public void postMergeRegions(final 
HRegionInfo[] regionsToMerge)
+825  throws IOException {
 826execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
 827  @Override
 828  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
 829  throws IOException {
-830oserver.postDispatchMerge(ctx, 
regionInfoA, regionInfoB);
+830oserver.postMergeRegions(ctx, 
regionsToMerge);
 831  }
 832});
 833  }
 834
-835  public void preMergeRegions(final 
HRegionInfo[] regionsToMerge)
-836  throws IOException {
-837execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
-838  @Override
-839  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
-840  throws IOException {
-841oserver.preMergeRegions(ctx, 
regionsToMerge);
-842  }
-843});
-844  }
-845
-846  public void postMergeRegions(final 
HRegionInfo[] regionsToMerge)
-847  throws IOException {
-848execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
-849  @Override
-850  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
-851  throws IOException {
-852oserver.postMergeRegions(ctx, 
regionsToMerge);
-853  }
-854});
-855  }
-856
-857  public boolean preBalance() throws 
IOException {
-858return 
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
-859  @Override
-860  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
-861  throws IOException {
-862oserver.preBalance(ctx);
-863  }
-864});
-865  }
-866
-867  public void postBalance(final 
ListRegionPlan plans) throws IOException {
+835  public boolean preBalance() throws 
IOException {
+836return 
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+837  @Override
+838  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
+839  throws IOException {
+840oserver.preBalance(ctx);
+841  }
+842});
+843  }
+844
+845  public void postBalance(final 
ListRegionPlan plans) throws IOException {
+846execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
+847  @Override
+848  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
+849  throws IOException {
+850oserver.postBalance(ctx, 
plans);
+851  }
+852});
+853  }
+854
+855  public boolean 
preSetSplitOrMergeEnabled(final boolean newValue,
+856  final MasterSwitchType switchType) 
throws IOException {
+857return 
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+858  @Override
+859  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
+860  throws IOException {
+861
oserver.preSetSplitOrMergeEnabled(ctx, newValue, switchType);
+862  }
+863});
+864  }
+865
+866  public void 
postSetSplitOrMergeEnabled(final boolean newValue,
+867  final MasterSwitchType switchType) 
throws IOException {
 868execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
 869  @Override
 870  public void call(MasterObserver 
oserver, ObserverContextMasterCoprocessorEnvironment ctx)
 871  throws 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
index 11bd53b..5a31618 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public static class ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
+public static class ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ColumnFamilyDescriptor, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
 An ModifyableFamilyDescriptor contains information about a 
column family such as the
@@ -617,7 +617,7 @@ implements 
 
 name
-private finalbyte[] name
+private finalbyte[] name
 
 
 
@@ -626,7 +626,7 @@ implements 
 
 values
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,Bytes values
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,Bytes values
 
 
 
@@ -635,7 +635,7 @@ implements 
 
 configuration
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String configuration
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String configuration
 A map which holds the configuration specific to the column 
family. The
  keys of the map have the same names as config keys and override the
  defaults with cf-specific settings. Example usage may be for compactions,
@@ -657,7 +657,7 @@ implements 
 ModifyableColumnFamilyDescriptor
 @InterfaceAudience.Private
-publicModifyableColumnFamilyDescriptor(byte[]name)
+publicModifyableColumnFamilyDescriptor(byte[]name)
 Construct a column descriptor specifying only the family 
name The other
  attributes are defaulted.
 
@@ -675,7 +675,7 @@ public
 ModifyableColumnFamilyDescriptor
 @InterfaceAudience.Private
-publicModifyableColumnFamilyDescriptor(ColumnFamilyDescriptordesc)
+publicModifyableColumnFamilyDescriptor(ColumnFamilyDescriptordesc)
 Constructor. Makes a deep copy of the supplied descriptor.
  TODO: make this private after the HCD is removed.
 
@@ -690,7 +690,7 @@ public
 
 ModifyableColumnFamilyDescriptor
-privateModifyableColumnFamilyDescriptor(byte[]name,
+privateModifyableColumnFamilyDescriptor(byte[]name,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,Bytesvalues,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringconfig)
 
@@ -709,7 +709,7 @@ public
 
 getName
-publicbyte[]getName()
+publicbyte[]getName()
 
 Specified by:
 getNamein
 interfaceColumnFamilyDescriptor
@@ -724,7 +724,7 @@ public
 
 getNameAsString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetNameAsString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetNameAsString()
 
 Specified by:
 getNameAsStringin
 interfaceColumnFamilyDescriptor
@@ 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
index 8c8883f..86d5438 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class ReplicationSource.LogsComparator
+public static class ReplicationSource.LogsComparator
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in 
java.util">Comparatororg.apache.hadoop.fs.Path
 Comparator used to compare logs together based on their 
start time
@@ -203,7 +203,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
 
 
 LogsComparator
-publicLogsComparator()
+publicLogsComparator()
 
 
 
@@ -220,7 +220,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
 
 
 compare
-publicintcompare(org.apache.hadoop.fs.Patho1,
+publicintcompare(org.apache.hadoop.fs.Patho1,
org.apache.hadoop.fs.Patho2)
 
 Specified by:
@@ -234,7 +234,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
 
 
 getTS
-private staticlonggetTS(org.apache.hadoop.fs.Pathp)
+private staticlonggetTS(org.apache.hadoop.fs.Pathp)
 Split a path to get the start time
  For example: 10.20.20.171%3A60020.1277499063250
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
index a5a0b3e..663f736 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ReplicationSource
+public class ReplicationSource
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
 implements ReplicationSourceInterface
 Class that handles the source of a replication stream.
@@ -524,7 +524,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -533,7 +533,7 @@ implements 
 
 queues
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/PriorityBlockingQueue.html?is-external=true;
 title="class or interface in 
java.util.concurrent">PriorityBlockingQueueorg.apache.hadoop.fs.Path
 queues
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/PriorityBlockingQueue.html?is-external=true;
 title="class or interface in 
java.util.concurrent">PriorityBlockingQueueorg.apache.hadoop.fs.Path
 queues
 
 
 
@@ -542,7 +542,7 @@ implements 
 
 queueSizePerGroup
-protectedint queueSizePerGroup
+protectedint queueSizePerGroup
 
 
 
@@ -551,7 +551,7 @@ implements 
 
 replicationQueues
-protectedReplicationQueues 
replicationQueues
+protectedReplicationQueues 
replicationQueues
 
 
 
@@ -560,7 +560,7 @@ implements 
 
 replicationPeers
-privateReplicationPeers 
replicationPeers
+privateReplicationPeers 
replicationPeers
 
 
 
@@ -569,7 +569,7 @@ implements 
 
 conf
-protectedorg.apache.hadoop.conf.Configuration conf
+protectedorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -578,7 +578,7 @@ implements 
 
 replicationQueueInfo
-protectedReplicationQueueInfo 
replicationQueueInfo
+protectedReplicationQueueInfo 
replicationQueueInfo
 
 
 
@@ -587,7 +587,7 @@ implements 
 
 peerId
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 504e470..38667c0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -2866,5375 +2866,5371 @@
 2858checkResources();
 2859
startRegionOperation(Operation.DELETE);
 2860try {
-2861  delete.getRow();
-2862  // All edits for the given row 
(across all column families) must happen atomically.
-2863  doBatchMutate(delete);
-2864} finally {
-2865  
closeRegionOperation(Operation.DELETE);
-2866}
-2867  }
-2868
-2869  /**
-2870   * Row needed by below method.
-2871   */
-2872  private static final byte [] 
FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
-2873
-2874  /**
-2875   * This is used only by unit tests. 
Not required to be a public API.
-2876   * @param familyMap map of family to 
edits for the given family.
-2877   * @throws IOException
-2878   */
-2879  void delete(NavigableMapbyte[], 
ListCell familyMap,
-2880  Durability durability) throws 
IOException {
-2881Delete delete = new 
Delete(FOR_UNIT_TESTS_ONLY);
-2882
delete.setFamilyCellMap(familyMap);
-2883delete.setDurability(durability);
-2884doBatchMutate(delete);
-2885  }
-2886
-2887  @Override
-2888  public void 
prepareDeleteTimestamps(Mutation mutation, Mapbyte[], ListCell 
familyMap,
-2889  byte[] byteNow) throws IOException 
{
-2890for (Map.Entrybyte[], 
ListCell e : familyMap.entrySet()) {
-2891
-2892  byte[] family = e.getKey();
-2893  ListCell cells = 
e.getValue();
-2894  assert cells instanceof 
RandomAccess;
-2895
-2896  Mapbyte[], Integer kvCount 
= new TreeMap(Bytes.BYTES_COMPARATOR);
-2897  int listSize = cells.size();
-2898  for (int i=0; i  listSize; 
i++) {
-2899Cell cell = cells.get(i);
-2900//  Check if time is LATEST, 
change to time of most recent addition if so
-2901//  This is expensive.
-2902if (cell.getTimestamp() == 
HConstants.LATEST_TIMESTAMP  CellUtil.isDeleteType(cell)) {
-2903  byte[] qual = 
CellUtil.cloneQualifier(cell);
-2904  if (qual == null) qual = 
HConstants.EMPTY_BYTE_ARRAY;
-2905
-2906  Integer count = 
kvCount.get(qual);
-2907  if (count == null) {
-2908kvCount.put(qual, 1);
-2909  } else {
-2910kvCount.put(qual, count + 
1);
-2911  }
-2912  count = kvCount.get(qual);
-2913
-2914  Get get = new 
Get(CellUtil.cloneRow(cell));
-2915  get.setMaxVersions(count);
-2916  get.addColumn(family, qual);
-2917  if (coprocessorHost != null) 
{
-2918if 
(!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell,
-2919byteNow, get)) {
-2920  
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2921}
-2922  } else {
-2923
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2924  }
-2925} else {
-2926  
CellUtil.updateLatestStamp(cell, byteNow, 0);
-2927}
-2928  }
-2929}
-2930  }
-2931
-2932  void 
updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] 
byteNow)
-2933  throws IOException {
-2934ListCell result = get(get, 
false);
-2935
-2936if (result.size()  count) {
-2937  // Nothing to delete
-2938  CellUtil.updateLatestStamp(cell, 
byteNow, 0);
-2939  return;
-2940}
-2941if (result.size()  count) {
-2942  throw new 
RuntimeException("Unexpected size: " + result.size());
-2943}
-2944Cell getCell = result.get(count - 
1);
-2945CellUtil.setTimestamp(cell, 
getCell.getTimestamp());
-2946  }
-2947
-2948  @Override
-2949  public void put(Put put) throws 
IOException {
-2950checkReadOnly();
-2951
-2952// Do a rough check that we have 
resources to accept a write.  The check is
-2953// 'rough' in that between the 
resource check and the call to obtain a
-2954// read lock, resources may run out. 
 For now, the thought is that this
-2955// will be extremely rare; we'll 
deal with it when it happens.
-2956checkResources();
-2957
startRegionOperation(Operation.PUT);
-2958try {
-2959  // All edits for the given row 
(across all column families) must happen atomically.
-2960  doBatchMutate(put);
-2961} finally {
-2962  
closeRegionOperation(Operation.PUT);
-2963}
-2964  }
-2965
-2966  /**
-2967   * Struct-like class that tracks the 
progress of a batch operation,
-2968   * accumulating status 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
index feb42ea..4bd98f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
@@ -185,4189 +185,4266 @@
 177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-188import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-189import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-190import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-191import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-192import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-193import 
org.apache.hadoop.hbase.util.Addressing;
-194import 
org.apache.hadoop.hbase.util.Bytes;
-195import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-196import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-197import 
org.apache.hadoop.hbase.util.Pair;
-198import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-199import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-200import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-201import 
org.apache.hadoop.ipc.RemoteException;
-202import 
org.apache.hadoop.util.StringUtils;
-203import 
org.apache.zookeeper.KeeperException;
-204
-205import 
com.google.common.annotations.VisibleForTesting;
-206import com.google.protobuf.Descriptors;
-207import com.google.protobuf.Message;
-208import 
com.google.protobuf.RpcController;
-209import java.util.stream.Collectors;
-210
-211/**
-212 * HBaseAdmin is no longer a client API. 
It is marked InterfaceAudience.Private indicating that
-213 * this is an HBase-internal class as 
defined in
-214 * 
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
-215 * There are no guarantees for backwards 
source / binary compatibility and methods or class can
-216 * change or go away without 
deprecation.
-217 * Use {@link Connection#getAdmin()} to 
obtain an instance of {@link Admin} instead of constructing
-218 * an HBaseAdmin directly.
-219 *
-220 * pConnection should be an 
iunmanaged/i connection obtained via
-221 * {@link 
ConnectionFactory#createConnection(Configuration)}
-222 *
-223 * @see ConnectionFactory
-224 * @see Connection
-225 * @see Admin
-226 */
-227@InterfaceAudience.Private
-228@InterfaceStability.Evolving
-229public class HBaseAdmin implements Admin 
{
-230  private static final Log LOG = 
LogFactory.getLog(HBaseAdmin.class);
-231
-232  private static final String 
ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
+180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
+182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
+183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
+184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
+185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
+186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+188import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
+189import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+190import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+191import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.RegionSnapshotTask.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.RegionSnapshotTask.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.RegionSnapshotTask.html
index 238f4d6..c73100f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.RegionSnapshotTask.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.RegionSnapshotTask.html
@@ -25,177 +25,197 @@
 017 */
 018package 
org.apache.hadoop.hbase.regionserver.snapshot;
 019
-020import java.util.List;
-021import java.util.concurrent.Callable;
-022
-023import org.apache.commons.logging.Log;
-024import 
org.apache.commons.logging.LogFactory;
-025import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-026import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-027import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
-028import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-029import 
org.apache.hadoop.hbase.procedure.ProcedureMember;
-030import 
org.apache.hadoop.hbase.procedure.Subprocedure;
-031import 
org.apache.hadoop.hbase.regionserver.HRegion;
-032import 
org.apache.hadoop.hbase.regionserver.Region;
-033import 
org.apache.hadoop.hbase.regionserver.Region.FlushResult;
-034import 
org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-036import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-037
-038/**
-039 * This online snapshot implementation 
uses the distributed procedure framework to force a
-040 * store flush and then records the 
hfiles.  Its enter stage does nothing.  Its leave stage then
-041 * flushes the memstore, builds the 
region server's snapshot manifest from its hfiles list, and
-042 * copies .regioninfos into the snapshot 
working directory.  At the master side, there is an atomic
-043 * rename of the working dir into the 
proper snapshot directory.
-044 */
-045@InterfaceAudience.Private
-046@InterfaceStability.Unstable
-047public class FlushSnapshotSubprocedure 
extends Subprocedure {
-048  private static final Log LOG = 
LogFactory.getLog(FlushSnapshotSubprocedure.class);
-049
-050  private final ListRegion 
regions;
-051  private final SnapshotDescription 
snapshot;
-052  private final SnapshotSubprocedurePool 
taskManager;
-053  private boolean snapshotSkipFlush = 
false;
-054
-055  public 
FlushSnapshotSubprocedure(ProcedureMember member,
-056  ForeignExceptionDispatcher 
errorListener, long wakeFrequency, long timeout,
-057  ListRegion regions, 
SnapshotDescription snapshot,
-058  SnapshotSubprocedurePool 
taskManager) {
-059super(member, snapshot.getName(), 
errorListener, wakeFrequency, timeout);
-060this.snapshot = snapshot;
-061
-062if (this.snapshot.getType() == 
SnapshotDescription.Type.SKIPFLUSH) {
-063  snapshotSkipFlush = true;
-064}
-065this.regions = regions;
-066this.taskManager = taskManager;
-067  }
-068
-069  /**
-070   * Callable for adding files to 
snapshot manifest working dir.  Ready for multithreading.
-071   */
-072  private class RegionSnapshotTask 
implements CallableVoid {
-073Region region;
-074RegionSnapshotTask(Region region) {
-075  this.region = region;
-076}
-077
-078@Override
-079public Void call() throws Exception 
{
-080  // Taking the region read lock 
prevents the individual region from being closed while a
-081  // snapshot is in progress.  This 
is helpful but not sufficient for preventing races with
-082  // snapshots that involve multiple 
regions and regionservers.  It is still possible to have
-083  // an interleaving such that 
globally regions are missing, so we still need the verification
-084  // step.
-085  LOG.debug("Starting region 
operation on " + region);
-086  region.startRegionOperation();
-087  try {
-088if (snapshotSkipFlush) {
-089/*
-090 * This is to take an 
online-snapshot without force a coordinated flush to prevent pause
-091 * The snapshot type is defined 
inside the snapshot description. FlushSnapshotSubprocedure
-092 * should be renamed to 
distributedSnapshotSubprocedure, and the flush() behavior can be
-093 * turned on/off based on the 
flush type.
-094 * To minimized the code change, 
class name is not changed.
-095 */
-096  LOG.debug("take snapshot 
without flush memstore first");
-097} else {
-098  LOG.debug("Flush Snapshotting 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
index 75db22d..99a09f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
@@ -37,2710 +37,2816 @@
 029import java.util.List;
 030import java.util.Map;
 031import java.util.Optional;
-032import 
java.util.concurrent.CompletableFuture;
-033import java.util.concurrent.TimeUnit;
-034import 
java.util.concurrent.atomic.AtomicReference;
-035import java.util.function.BiConsumer;
-036import java.util.regex.Pattern;
-037import java.util.stream.Collectors;
-038
-039import 
com.google.common.annotations.VisibleForTesting;
-040
-041import io.netty.util.Timeout;
-042import io.netty.util.TimerTask;
-043
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.io.IOUtils;
-047import org.apache.commons.logging.Log;
-048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.ClusterStatus;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLoad;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.ServerName;
-059import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-064import 
org.apache.hadoop.hbase.TableNotDisabledException;
-065import 
org.apache.hadoop.hbase.TableNotEnabledException;
-066import 
org.apache.hadoop.hbase.TableNotFoundException;
-067import 
org.apache.hadoop.hbase.UnknownRegionException;
-068import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-098import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(ListServerName servers) throws IOException {
-4039final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public ListServerName 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallableListServerName(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public ListServerName 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064ListServerName servers = 
new ArrayList();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
-4076final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public ListServerName 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallableListServerName(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public ListServerName 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056ListServerName servers = 
new ArrayList();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
+4068executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public ListTableCFs 
listReplicatedTableCFs() throws IOException {
+4079ListTableCFs 
replicatedTableCFs = new ArrayList();
+4080HTableDescriptor[] tables = 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
index 6831fb2..fe98c7c 100644
--- a/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -597,7 +597,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
 Deprecated.
-Getter for accessing the metadata associated with the 
key
+Getter for accessing the metadata associated with the 
key.
 
 
 
@@ -1228,7 +1228,7 @@ implements 
 
 NAMESPACE_FAMILY_INFO
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NAMESPACE_FAMILY_INFO
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NAMESPACE_FAMILY_INFO
 Deprecated.
 
 See Also:
@@ -1242,7 +1242,7 @@ implements 
 
 NAMESPACE_FAMILY_INFO_BYTES
-public static finalbyte[] NAMESPACE_FAMILY_INFO_BYTES
+public static finalbyte[] NAMESPACE_FAMILY_INFO_BYTES
 Deprecated.
 
 
@@ -1252,7 +1252,7 @@ implements 
 
 NAMESPACE_COL_DESC_BYTES
-public static finalbyte[] NAMESPACE_COL_DESC_BYTES
+public static finalbyte[] NAMESPACE_COL_DESC_BYTES
 Deprecated.
 
 
@@ -1262,7 +1262,7 @@ implements 
 
 NAMESPACE_TABLEDESC
-public static finalHTableDescriptor NAMESPACE_TABLEDESC
+public static finalHTableDescriptor NAMESPACE_TABLEDESC
 Deprecated.
 Table descriptor for namespace table
 
@@ -1421,30 +1421,13 @@ implements 
-
-
-
-
-getValue
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
-Deprecated.
-Getter for accessing the metadata associated with the 
key
-
-Parameters:
-key - The key.
-Returns:
-The value.
-
-
-
 
 
 
 
 
 getValues
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,BytesgetValues()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,BytesgetValues()
 Deprecated.
 
 Specified by:
@@ -1460,7 +1443,7 @@ implements 
 
 setValue
-publicHTableDescriptorsetValue(byte[]key,
+publicHTableDescriptorsetValue(byte[]key,
  byte[]value)
 Deprecated.
 Setter for storing metadata as a (key, value) pair in 
map
@@ -1477,7 +1460,7 @@ implements 
 
 setValue
-publicHTableDescriptorsetValue(Byteskey,
+publicHTableDescriptorsetValue(Byteskey,
  Bytesvalue)
 Deprecated.
 
@@ -1488,7 +1471,7 @@ implements 
 
 setValue
-publicHTableDescriptorsetValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
+publicHTableDescriptorsetValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringvalue)
 Deprecated.
 Setter for storing metadata as a (key, value) pair in 
map
@@ -1505,7 +1488,7 @@ implements 
 
 remove
-publicvoidremove(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
+publicvoidremove(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
 Deprecated.
 Remove metadata represented by the key from the map
 
@@ -1521,7 +1504,7 @@ implements 
 
 remove
-publicvoidremove(Byteskey)
+publicvoidremove(Byteskey)
 Deprecated.
 Remove metadata represented by the key from the map
 
@@ -1537,7 +1520,7 @@ implements 
 
 remove
-publicvoidremove(byte[]key)
+publicvoidremove(byte[]key)
 Deprecated.
 Remove metadata represented by the key from the map
 
@@ -1553,7 +1536,7 @@ implements 
 
 isReadOnly
-publicbooleanisReadOnly()
+publicbooleanisReadOnly()
 Deprecated.
 Check if the readOnly flag of the table is set. If the 
readOnly flag is
  set then the contents of the table can only be read from but not 
modified.
@@ -1571,7 +1554,7 @@ implements 
 
 setReadOnly
-publicHTableDescriptorsetReadOnly(booleanreadOnly)
+publicHTableDescriptorsetReadOnly(booleanreadOnly)
 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
index 43db01d..79dc4e0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
@@ -235,7 +235,7 @@
 227  public BucketCache(String ioEngineName, 
long capacity, int blockSize, int[] bucketSizes,
 228  int writerThreadNum, int 
writerQLen, String persistencePath, int ioErrorsTolerationDuration)
 229  throws FileNotFoundException, 
IOException {
-230this.ioEngine = 
getIOEngineFromName(ioEngineName, capacity);
+230this.ioEngine = 
getIOEngineFromName(ioEngineName, capacity, persistencePath);
 231this.writerThreads = new 
WriterThread[writerThreadNum];
 232long blockNumCapacity = capacity / 
blockSize;
 233if (blockNumCapacity = 
Integer.MAX_VALUE) {
@@ -317,1229 +317,1230 @@
 309   * Get the IOEngine from the IO engine 
name
 310   * @param ioEngineName
 311   * @param capacity
-312   * @return the IOEngine
-313   * @throws IOException
-314   */
-315  private IOEngine 
getIOEngineFromName(String ioEngineName, long capacity)
-316  throws IOException {
-317if (ioEngineName.startsWith("file:") 
|| ioEngineName.startsWith("files:")) {
-318  // In order to make the usage 
simple, we only need the prefix 'files:' in
-319  // document whether one or multiple 
file(s), but also support 'file:' for
-320  // the compatibility
-321  String[] filePaths = 
ioEngineName.substring(ioEngineName.indexOf(":") + 1)
-322  
.split(FileIOEngine.FILE_DELIMITER);
-323  return new FileIOEngine(capacity, 
filePaths);
-324} else if 
(ioEngineName.startsWith("offheap")) {
-325  return new 
ByteBufferIOEngine(capacity, true);
-326} else if 
(ioEngineName.startsWith("heap")) {
-327  return new 
ByteBufferIOEngine(capacity, false);
-328} else if 
(ioEngineName.startsWith("mmap:")) {
-329  return new 
FileMmapEngine(ioEngineName.substring(5), capacity);
-330} else {
-331  throw new 
IllegalArgumentException(
-332  "Don't understand io engine 
name for cache - prefix with file:, heap or offheap");
-333}
-334  }
-335
-336  /**
-337   * Cache the block with the specified 
name and buffer.
-338   * @param cacheKey block's cache key
-339   * @param buf block buffer
-340   */
-341  @Override
-342  public void cacheBlock(BlockCacheKey 
cacheKey, Cacheable buf) {
-343cacheBlock(cacheKey, buf, false, 
false);
-344  }
-345
-346  /**
-347   * Cache the block with the specified 
name and buffer.
-348   * @param cacheKey block's cache key
-349   * @param cachedItem block buffer
-350   * @param inMemory if block is 
in-memory
-351   * @param cacheDataInL1
-352   */
-353  @Override
-354  public void cacheBlock(BlockCacheKey 
cacheKey, Cacheable cachedItem, boolean inMemory,
-355  final boolean cacheDataInL1) {
-356cacheBlockWithWait(cacheKey, 
cachedItem, inMemory, wait_when_cache);
-357  }
-358
-359  /**
-360   * Cache the block to ramCache
-361   * @param cacheKey block's cache key
-362   * @param cachedItem block buffer
-363   * @param inMemory if block is 
in-memory
-364   * @param wait if true, blocking wait 
when queue is full
-365   */
-366  public void 
cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean 
inMemory,
-367  boolean wait) {
-368if (LOG.isTraceEnabled()) 
LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem);
-369if (!cacheEnabled) {
-370  return;
-371}
-372
-373if (backingMap.containsKey(cacheKey)) 
{
-374  return;
-375}
-376
-377/*
-378 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-379 */
-380RAMQueueEntry re =
-381new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-382if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-383  return;
-384}
-385int queueNum = (cacheKey.hashCode() 
 0x7FFF) % writerQueues.size();
-386BlockingQueueRAMQueueEntry bq 
= writerQueues.get(queueNum);
-387boolean successfulAddition = false;
-388if (wait) {
-389  try {
-390successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-391  } catch (InterruptedException e) 
{
-392
Thread.currentThread().interrupt();
-393  }
-394} else {
-395  successfulAddition = 
bq.offer(re);
-396}
-397if (!successfulAddition) {
-398  ramCache.remove(cacheKey);
-399  cacheStats.failInsert();
-400} else {
-401  

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
new file mode 100644
index 000..4c7b7db
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
@@ -0,0 +1,339 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class 
RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.TableProcedureBiConsumer
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/util/function/BiConsumer.html?is-external=true;
 title="class or interface in java.util.function">BiConsumerhttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void,http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+
+
+Enclosing class:
+RawAsyncHBaseAdmin
+
+
+
+private class RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer
+extends RawAsyncHBaseAdmin.TableProcedureBiConsumer
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.TableProcedureBiConsumer
+tableName
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+admin
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+DeleteColumnFamilyProcedureBiConsumer(AsyncAdminadmin,
+ TableNametableName)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+(package private) http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getOperationType()
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.TableProcedureBiConsumer
+getDescription,
 onError,
 onFinished
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+accept
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-104import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
index e65748d..91a0ffa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
@@ -372,1874 +372,1873 @@
 364   * is stored in the name, so the 
returned object should only be used for the fields
 365   * in the regionName.
 366   */
-367  protected static HRegionInfo 
parseRegionInfoFromRegionName(byte[] regionName)
-368throws IOException {
-369byte[][] fields = 
HRegionInfo.parseRegionName(regionName);
-370long regionId =  
Long.parseLong(Bytes.toString(fields[2]));
-371int replicaId = fields.length  3 
? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
-372return new HRegionInfo(
-373  TableName.valueOf(fields[0]), 
fields[1], fields[1], false, regionId, replicaId);
-374  }
-375
-376  /**
-377   * Gets the result in hbase:meta for 
the specified region.
-378   * @param connection connection we're 
using
-379   * @param regionName region we're 
looking for
-380   * @return result of the specified 
region
-381   * @throws IOException
-382   */
-383  public static Result 
getRegionResult(Connection connection,
-384  byte[] regionName) throws 
IOException {
-385Get get = new Get(regionName);
-386
get.addFamily(HConstants.CATALOG_FAMILY);
-387return get(getMetaHTable(connection), 
get);
-388  }
-389
-390  /**
-391   * Get regions from the merge qualifier 
of the specified merged region
-392   * @return null if it doesn't contain 
merge qualifier, else two merge regions
-393   * @throws IOException
-394   */
-395  @Nullable
-396  public static PairHRegionInfo, 
HRegionInfo getRegionsFromMergeQualifier(
-397  Connection connection, byte[] 
regionName) throws IOException {
-398Result result = 
getRegionResult(connection, regionName);
-399HRegionInfo mergeA = 
getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
-400HRegionInfo mergeB = 
getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
-401if (mergeA == null  mergeB 
== null) {
-402  return null;
-403}
-404return new Pair(mergeA, 
mergeB);
-405 }
-406
-407  /**
-408   * Checks if the specified table 
exists.  Looks at the hbase:meta table hosted on
-409   * the specified server.
-410   * @param connection connection we're 
using
-411   * @param tableName table to check
-412   * @return true if the table exists in 
meta, false if not
-413   * @throws IOException
-414   */
-415  public static boolean 
tableExists(Connection connection,
-416  final TableName tableName)
-417  throws IOException {
-418// Catalog tables always exist.
-419return 
tableName.equals(TableName.META_TABLE_NAME)
-420|| getTableState(connection, 
tableName) != null;
-421  }
-422
-423  /**
-424   * Lists all of the regions currently 
in META.
-425   *
-426   * @param connection to connect with
-427   * @param excludeOfflinedSplitParents 
False if we are to include offlined/splitparents regions,
-428   *
true and we'll leave out offlined regions from returned list
-429   * @return List of all user-space 
regions.
-430   * @throws IOException
-431   */
-432  @VisibleForTesting
-433  public static ListHRegionInfo 
getAllRegions(Connection connection,
-434  boolean 
excludeOfflinedSplitParents)
-435  throws IOException {
-436ListPairHRegionInfo, 
ServerName result;
-437
-438result = 
getTableRegionsAndLocations(connection, null,
-439excludeOfflinedSplitParents);
-440
-441return 
getListOfHRegionInfos(result);
-442
-443  }
-444
-445  /**
-446   * Gets all of the regions of the 
specified table. Do not use this method
-447   * to get meta table regions, use 
methods in MetaTableLocator instead.
-448   * @param connection connection we're 
using
-449   * @param tableName table we're looking 
for
-450   * @return Ordered list of {@link 
HRegionInfo}.
-451   * @throws IOException
-452   */
-453  public static ListHRegionInfo 
getTableRegions(Connection connection, TableName tableName)
-454  throws IOException {
-455return getTableRegions(connection, 
tableName, false);
-456  }
-457
-458  /**
-459   * Gets all of the regions of the 
specified table. Do not use this method
-460   * to get meta table regions, use 
methods in MetaTableLocator instead.
-461   * @param connection connection we're 
using
-462   * @param tableName table we're looking 
for
-463   * @param excludeOfflinedSplitParents 
If true, do not include offlined split
-464   * parents in the return.
-465   * @return Ordered list of 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
index 6de986f..c895448 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
@@ -26,1592 +26,1693 @@
 018package 
org.apache.hadoop.hbase.master.balancer;
 019
 020import java.util.ArrayDeque;
-021import java.util.Arrays;
-022import java.util.Collection;
-023import java.util.Deque;
-024import java.util.HashMap;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.Map.Entry;
-029import java.util.Random;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterStatus;
-035import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.RegionLoad;
-039import 
org.apache.hadoop.hbase.ServerLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-043import 
org.apache.hadoop.hbase.master.MasterServices;
-044import 
org.apache.hadoop.hbase.master.RegionPlan;
-045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052
-053import com.google.common.collect.Lists;
-054
-055/**
-056 * pThis is a best effort load 
balancer. Given a Cost function F(C) =gt; x It will
-057 * randomly try and mutate the cluster to 
Cprime. If F(Cprime) lt; F(C) then the
-058 * new cluster state becomes the plan. It 
includes costs functions to compute the cost of:/p
-059 * ul
-060 * liRegion Load/li
-061 * liTable Load/li
-062 * liData Locality/li
-063 * liMemstore Sizes/li
-064 * liStorefile Sizes/li
-065 * /ul
-066 *
-067 *
-068 * pEvery cost function returns a 
number between 0 and 1 inclusive; where 0 is the lowest cost
-069 * best solution, and 1 is the highest 
possible cost and the worst solution.  The computed costs are
-070 * scaled by their respective 
multipliers:/p
+021import java.util.ArrayList;
+022import java.util.Arrays;
+023import java.util.Collection;
+024import java.util.Collections;
+025import java.util.Deque;
+026import java.util.HashMap;
+027import java.util.LinkedList;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Map.Entry;
+031import java.util.Random;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.hbase.ClusterStatus;
+037import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import 
org.apache.hadoop.hbase.HRegionInfo;
+040import 
org.apache.hadoop.hbase.RegionLoad;
+041import 
org.apache.hadoop.hbase.ServerLoad;
+042import 
org.apache.hadoop.hbase.ServerName;
+043import 
org.apache.hadoop.hbase.TableName;
+044import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+045import 
org.apache.hadoop.hbase.master.MasterServices;
+046import 
org.apache.hadoop.hbase.master.RegionPlan;
+047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
+048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
+049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
+050import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
+051import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
+052import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
+053import 
org.apache.hadoop.hbase.util.Bytes;
+054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+055
+056import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 05ad3ba..4c523d9 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public interface AsyncAdmin
+public interface AsyncAdmin
 The asynchronous administrative API for HBase.
  
  This feature is still under development, so marked as IA.Private. Will change 
to public when
@@ -138,8 +138,8 @@ public interface 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-addColumnFamily(TableNametableName,
-   HColumnDescriptorcolumnFamily)
+addColumnFamily(TableNametableName,
+   ColumnFamilyDescriptorcolumnFamily)
 Add a column family to an existing table.
 
 
@@ -642,8 +642,8 @@ public interface 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-modifyColumnFamily(TableNametableName,
-  HColumnDescriptorcolumnFamily)
+modifyColumnFamily(TableNametableName,
+  ColumnFamilyDescriptorcolumnFamily)
 Modify an existing column family on a table.
 
 
@@ -797,7 +797,7 @@ public interface 
 
 getConnection
-AsyncConnectionImplgetConnection()
+AsyncConnectionImplgetConnection()
 
 Returns:
 Async Connection used by this object.
@@ -810,7 +810,7 @@ public interface 
 
 tableExists
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">BooleantableExists(TableNametableName)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">BooleantableExists(TableNametableName)
 
 Parameters:
 tableName - Table to check.
@@ -826,7 +826,7 @@ public interface 
 
 listTables
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]listTables()
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]listTables()
 List all the userspace tables.
 
 Returns:
@@ -842,7 +842,7 @@ public interface 
 
 listTables
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]listTables(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]listTables(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex,
 
booleanincludeSysTables)
 List all the tables matching the given pattern.
 
@@ -862,7 +862,7 @@ public interface 
 
 listTables
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]listTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]listTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index fde0305..95efd69 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2017 The Apache Software Foundation
 
-  File: 2227,
- Errors: 14468,
+  File: 2228,
+ Errors: 14494,
  Warnings: 0,
  Infos: 0
   
@@ -783,7 +783,7 @@ under the License.
   0
 
 
-  58
+  60
 
   
   
@@ -3373,7 +3373,7 @@ under the License.
   0
 
 
-  7
+  8
 
   
   
@@ -8964,6 +8964,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.Cursor.java;>org/apache/hadoop/hbase/client/Cursor.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint.java;>org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
 
 
@@ -13789,7 +13803,7 @@ under the License.
   0
 
 
-  37
+  53
 
   
   
@@ -16239,7 +16253,7 @@ under the License.
   0
 
 
-  40
+  41
 
   
   
@@ -21923,7 +21937,7 @@ under the License.
   0
 
 
-  234
+  235
 
   
   
@@ -23897,7 +23911,7 @@ under the License.
   0
 
 
-  132
+  133
 
   
   
@@ -28657,7 +28671,7 @@ under the License.
   0
 
 
-  10
+  12
 
   
   
@@ -30827,7 +30841,7 @@ under the License.
   0
 
 
-  12
+  14
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/coc.html
--
diff --git a/coc.html b/coc.html
index cfb2a16..2a27411 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-06-06
+  Last Published: 
2017-06-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 1b710a7..1138359 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-06-06
+  Last Published: 
2017-06-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 6c91a5c..628b753 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -362,7 +362,7 @@
 
 Dependency Tree
 
-org.apache.hbase:hbase:pom:2.0.0-SNAPSHOT 
+org.apache.hbase:hbase:pom:3.0.0-SNAPSHOT 
 
 
 Apache HBase
@@ -524,7 +524,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/mob/MobFile.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/mob/MobFile.html 
b/devapidocs/org/apache/hadoop/hbase/mob/MobFile.html
index c347d5b..3790bd8 100644
--- a/devapidocs/org/apache/hadoop/hbase/mob/MobFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/mob/MobFile.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MobFile
+public class MobFile
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 The mob file.
 
@@ -136,7 +136,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Field and Description
 
 
-private StoreFile
+private StoreFile
 sf
 
 
@@ -160,7 +160,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 protected 
-MobFile(StoreFilesf)
+MobFile(StoreFilesf)
 
 
 
@@ -253,7 +253,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 sf
-privateStoreFile sf
+privateStoreFile sf
 
 
 
@@ -270,7 +270,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MobFile
-protectedMobFile()
+protectedMobFile()
 
 
 
@@ -279,7 +279,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MobFile
-protectedMobFile(StoreFilesf)
+protectedMobFile(StoreFilesf)
 
 
 
@@ -296,7 +296,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getScanner
-publicStoreFileScannergetScanner()
+publicStoreFileScannergetScanner()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Internal use only. This is used by the sweeper.
 
@@ -313,7 +313,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 readCell
-publicCellreadCell(Cellsearch,
+publicCellreadCell(Cellsearch,
  booleancacheMobBlocks)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Reads a cell from the mob file.
@@ -334,7 +334,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 readCell
-publicCellreadCell(Cellsearch,
+publicCellreadCell(Cellsearch,
  booleancacheMobBlocks,
  longreadPt)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -357,7 +357,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getFileName
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFileName()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFileName()
 Gets the file name.
 
 Returns:
@@ -371,7 +371,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 open
-publicvoidopen()
+publicvoidopen()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Opens the underlying reader.
  It's not thread-safe. Use MobFileCache.openFile() instead.
@@ -387,7 +387,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 close
-publicvoidclose()
+publicvoidclose()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Closes the underlying reader, but do no evict blocks 
belonging to this file.
  It's not thread-safe. Use MobFileCache.closeFile() instead.
@@ -403,7 +403,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 create
-public staticMobFilecreate(org.apache.hadoop.fs.FileSystemfs,
+public staticMobFilecreate(org.apache.hadoop.fs.FileSystemfs,
  org.apache.hadoop.fs.Pathpath,
  org.apache.hadoop.conf.Configurationconf,
  CacheConfigcacheConf)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/mob/MobUtils.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/mob/MobUtils.html 
b/devapidocs/org/apache/hadoop/hbase/mob/MobUtils.html
index e5162d9..4007600 100644
--- a/devapidocs/org/apache/hadoop/hbase/mob/MobUtils.html
+++ b/devapidocs/org/apache/hadoop/hbase/mob/MobUtils.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public final class 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.WriterFactory.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.WriterFactory.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.WriterFactory.html
index b6c2fe3..1765903 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.WriterFactory.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.WriterFactory.html
@@ -60,892 +60,917 @@
 052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 053import 
org.apache.hadoop.hbase.fs.HFileSystem;
 054import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-055import 
org.apache.hadoop.hbase.io.compress.Compression;
-056import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-057import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-058import 
org.apache.hadoop.hbase.regionserver.CellSink;
-059import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
-065import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import org.apache.hadoop.io.Writable;
-069
-070import 
com.google.common.annotations.VisibleForTesting;
-071import 
com.google.common.base.Preconditions;
-072
-073/**
-074 * File format for hbase.
-075 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
-076 * p
-077 * The memory footprint of a HFile 
includes the following (below is taken from the
-078 * a
-079 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
-080 * but applies also to HFile):
-081 * ul
-082 * liSome constant overhead of 
reading or writing a compressed block.
+055import 
org.apache.hadoop.hbase.io.MetricsIO;
+056import 
org.apache.hadoop.hbase.io.MetricsIOWrapperImpl;
+057import 
org.apache.hadoop.hbase.io.compress.Compression;
+058import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+059import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+060import 
org.apache.hadoop.hbase.regionserver.CellSink;
+061import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
+062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
+066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
+067import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
+068import 
org.apache.hadoop.hbase.util.Bytes;
+069import 
org.apache.hadoop.hbase.util.FSUtils;
+070import org.apache.hadoop.io.Writable;
+071
+072import 
com.google.common.annotations.VisibleForTesting;
+073import 
com.google.common.base.Preconditions;
+074
+075/**
+076 * File format for hbase.
+077 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
+078 * p
+079 * The memory footprint of a HFile 
includes the following (below is taken from the
+080 * a
+081 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
+082 * but applies also to HFile):
 083 * ul
-084 * liEach compressed block 
requires one compression/decompression codec for
-085 * I/O.
-086 * liTemporary space to buffer 
the key.
-087 * liTemporary space to buffer 
the value.
-088 * /ul
-089 * liHFile index, which is 
proportional to the total number of Data Blocks.
-090 * The total amount of memory needed to 
hold the index can be estimated as
-091 * (56+AvgKeySize)*NumBlocks.
-092 * /ul
-093 * Suggestions on performance 
optimization.
-094 * ul
-095 * liMinimum block size. We 
recommend a setting of minimum block size between
-096 * 8KB to 1MB for general usage. Larger 
block size is preferred if files are
-097 * primarily for sequential access. 
However, it would lead to inefficient random
-098 * access (because there are more data to 
decompress). Smaller blocks are good
-099 * for random access, but require more 
memory to hold the block index, and may
-100 * be slower to create (because we must 
flush the compressor stream at the
-101 * conclusion of each data block, which 
leads to an FS I/O flush). Further, due
-102 * to the internal caching in Compression 
codec, the smallest possible block
-103 * size would be around 20KB-30KB.
-104 * liThe current implementation 
does not offer true multi-threading for
-105 * 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/master/AssignmentManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/AssignmentManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/AssignmentManager.html
deleted file mode 100644
index a1280fb..000
--- a/devapidocs/org/apache/hadoop/hbase/master/AssignmentManager.html
+++ /dev/null
@@ -1,2651 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-AssignmentManager (Apache HBase 2.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":9,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":9,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.master
-Class AssignmentManager
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.master.AssignmentManager
-
-
-
-
-
-
-
-
-@InterfaceAudience.Private
-public class AssignmentManager
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-Manages and performs region assignment.
- Related communications with regionserver are all done over RPC.
-
-
-
-
-
-
-
-
-
-
-
-Nested Class Summary
-
-Nested Classes
-
-Modifier and Type
-Class and Description
-
-
-private class
-AssignmentManager.DelayedAssignCallable
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private RetryCounter.BackoffPolicy
-backoffPolicy
-
-
-private LoadBalancer
-balancer
-
-
-private int
-bulkAssignThresholdRegions
-
-
-private int
-bulkAssignThresholdServers
-
-
-private boolean
-bulkAssignWaitTillAllAssigned
-
-
-private int
-bulkPerRegionOpenTimeGuesstimate
-
-
-private ExecutorService
-executorService
-
-
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger
-failedOpenTracker
-A map to track the count a region fails to open in a 
row.
-
-
-
-protected http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicBoolean
-failoverCleanupDone
-Indicator that AssignmentManager has recovered the region 
states so
- that ServerShutdownHandler can be fully enabled and re-assign regions
- of dead servers.
-
-
-
-private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListAssignmentListener
-listeners
-Listeners that are called on assignment events.
-
-
-
-private KeyLockerhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-locker
-
-
-private static 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.CallWriteListener.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.CallWriteListener.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.CallWriteListener.html
deleted file mode 100644
index 9cc7e38..000
--- 
a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.CallWriteListener.html
+++ /dev/null
@@ -1,331 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-NettyRpcServer.CallWriteListener (Apache HBase 2.0.0-SNAPSHOT 
API)
-
-
-
-
-
-var methods = {"i0":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.ipc
-Class 
NettyRpcServer.CallWriteListener
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.ipc.NettyRpcServer.CallWriteListener
-
-
-
-
-
-
-
-All Implemented Interfaces:
-io.netty.channel.ChannelFutureListener, 
io.netty.util.concurrent.GenericFutureListenerio.netty.channel.ChannelFuture,
 http://docs.oracle.com/javase/8/docs/api/java/util/EventListener.html?is-external=true;
 title="class or interface in java.util">EventListener
-
-
-Enclosing class:
-NettyRpcServer
-
-
-
-private class NettyRpcServer.CallWriteListener
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-implements io.netty.channel.ChannelFutureListener
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private NettyServerCall
-call
-
-
-
-
-
-
-Fields inherited from 
interfaceio.netty.channel.ChannelFutureListener
-CLOSE, CLOSE_ON_FAILURE, FIRE_EXCEPTION_ON_FAILURE
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-CallWriteListener(NettyServerCallcall)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-void
-operationComplete(io.netty.channel.ChannelFuturefuture)
-
-
-
-
-
-
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-Field Detail
-
-
-
-
-
-call
-privateNettyServerCall call
-
-
-
-
-

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
index c7fc597..5732c92 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
@@ -69,15 +69,15 @@
 061  requiredArguments = {
 062@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 063  optionalArguments = {
-064@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-065@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
-066@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
-067@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-068@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-069@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
-070@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-071@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-072@org.jamon.annotations.Argument(name 
= "filter", type = "String")})
+064@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+065@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
+066@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+067@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
+068@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+069@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+070@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
+071@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+072@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer")})
 073public class MasterStatusTmpl
 074  extends 
org.jamon.AbstractTemplateProxy
 075{
@@ -118,159 +118,159 @@
 110  return m_master;
 111}
 112private HMaster m_master;
-113// 25, 1
-114public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
+113// 26, 1
+114public void setFilter(String 
filter)
 115{
-116  // 25, 1
-117  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
-118  
m_catalogJanitorEnabled__IsNotDefault = true;
+116  // 26, 1
+117  m_filter = filter;
+118  m_filter__IsNotDefault = true;
 119}
-120public boolean 
getCatalogJanitorEnabled()
+120public String getFilter()
 121{
-122  return m_catalogJanitorEnabled;
+122  return m_filter;
 123}
-124private boolean 
m_catalogJanitorEnabled;
-125public boolean 
getCatalogJanitorEnabled__IsNotDefault()
+124private String m_filter;
+125public boolean 
getFilter__IsNotDefault()
 126{
-127  return 
m_catalogJanitorEnabled__IsNotDefault;
+127  return m_filter__IsNotDefault;
 128}
-129private boolean 
m_catalogJanitorEnabled__IsNotDefault;
-130// 21, 1
-131public void 
setFrags(MapString,Integer frags)
+129private boolean 
m_filter__IsNotDefault;
+130// 23, 1
+131public void 
setServers(ListServerName servers)
 132{
-133  // 21, 1
-134  m_frags = frags;
-135  m_frags__IsNotDefault = true;
+133  // 23, 1
+134  m_servers = servers;
+135  m_servers__IsNotDefault = true;
 136}
-137public MapString,Integer 
getFrags()
+137public ListServerName 
getServers()
 138{
-139  return m_frags;
+139  return m_servers;
 140}
-141private MapString,Integer 
m_frags;
-142public boolean 
getFrags__IsNotDefault()
+141private ListServerName 
m_servers;
+142public boolean 
getServers__IsNotDefault()
 143{
-144  return m_frags__IsNotDefault;
+144  return m_servers__IsNotDefault;
 145}
-146private boolean 
m_frags__IsNotDefault;
-147// 23, 1
-148public void 
setServers(ListServerName servers)
+146private boolean 
m_servers__IsNotDefault;
+147// 29, 1
+148public void 
setAssignmentManager(AssignmentManager assignmentManager)
 149{
-150  // 23, 1
-151  m_servers = servers;
-152  m_servers__IsNotDefault = true;
+150  // 29, 1
+151  m_assignmentManager = 
assignmentManager;
+152  m_assignmentManager__IsNotDefault = 
true;
 153}
-154public ListServerName 
getServers()
+154public AssignmentManager 
getAssignmentManager()
 155{
-156  return m_servers;
+156  return m_assignmentManager;
 157}
-158 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b4cf63f/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
index 67bc4c0..5196960 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
@@ -853,8 +853,8 @@ implements 
 void
-preCloneSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
-
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshot,
+preCloneSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
+
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshot,
 HTableDescriptorhTableDescriptor)
 Called before a snapshot is cloned.
 
@@ -921,8 +921,8 @@ implements 
 void
-preDeleteSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
- 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshot)
+preDeleteSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
+ 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshot)
 Called before a snapshot is deleted.
 
 
@@ -1064,8 +1064,8 @@ implements 
 void
-preListSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
-   
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshot)
+preListSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
+   
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshot)
 Called before listSnapshots request has been 
processed.
 
 
@@ -1216,8 +1216,8 @@ implements 
 void
-preRestoreSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
-  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshot,
+preRestoreSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshot,
   HTableDescriptorhTableDescriptor)
 Called before a snapshot is restored.
 
@@ -1320,8 +1320,8 @@ implements 
 void
-preSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
-   
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshot,
+preSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
+   
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshot,
HTableDescriptorhTableDescriptor)
 Called before a new snapshot is taken.
 
@@ -1501,7 +1501,7 @@ implements MasterObserver
-postAddColumn,
 postAddColumnFamily,
 postAddColumnHandler,
 postAddReplicationPeer, postAddRSGroup,
 postAssign,
 postBalance,
 postBalanceRSGroup,
 postBalanceSwit
 ch, postCloneSnapshot,
 postCompletedAddColumnFamilyAction,
 postCompletedDeleteColumnFamilyAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyColumnFamilyAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postCreateTableHandler,
 postDeleteColumn,
 postDeleteColumnHandler,
 postDeleteSnapshot,
 postDeleteTableHandler,
 postDisableReplicationPeer,
 postDisableTable, postDisableTableHandler,
 postDispatchMerge,
 postEnableReplicationPeer,
 postEnableTable,
 postEnableTableHandler,
 postGetNamespaceDescriptor,
 postGetReplicationPeerConfig,
 postListLocks,
 postListReplicationPeers, href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#postListSnapshot-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription-">postListSnapshot,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#postLockHeartbeat-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.master.locking.LockProcedure-boolean-">postLockHeartbeat,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#postMergeRegions-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.HRegionInfo:A-">postMergeRegions,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#postMergeRegionsCommitAction-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.HRegionInfo:A-org.apache.hadoop.hbase.HRegionInfo-">postMergeRegion
 sCommitAction, postModifyColumn,
 postModifyColumnFamily,
 postModifyColumnHandler,
 postModifyNamespace, postModifyTableHandler,
 postMove,
 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
index 9e1c66c..27ccbd5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
@@ -28,1601 +28,787 @@
 020
 021import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION;
 022
-023import java.io.ByteArrayInputStream;
-024import java.io.ByteArrayOutputStream;
-025import java.io.Closeable;
-026import java.io.DataOutputStream;
-027import java.io.IOException;
-028import java.net.InetAddress;
-029import java.net.InetSocketAddress;
-030import java.nio.ByteBuffer;
-031import java.nio.channels.Channels;
-032import 
java.nio.channels.GatheringByteChannel;
-033import 
java.nio.channels.ReadableByteChannel;
-034import 
java.nio.channels.WritableByteChannel;
-035import 
java.security.GeneralSecurityException;
-036import 
java.security.PrivilegedExceptionAction;
-037import java.util.ArrayList;
-038import java.util.HashMap;
-039import java.util.List;
-040import java.util.Map;
-041import java.util.Properties;
-042import 
java.util.concurrent.atomic.LongAdder;
-043
-044import javax.security.sasl.Sasl;
-045import 
javax.security.sasl.SaslException;
-046import javax.security.sasl.SaslServer;
-047
-048import 
org.apache.commons.crypto.cipher.CryptoCipherFactory;
-049import 
org.apache.commons.crypto.random.CryptoRandom;
-050import 
org.apache.commons.crypto.random.CryptoRandomFactory;
-051import org.apache.commons.logging.Log;
-052import 
org.apache.commons.logging.LogFactory;
-053import 
org.apache.hadoop.conf.Configuration;
-054import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-055import 
org.apache.hadoop.hbase.CellScanner;
-056import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-057import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import org.apache.hadoop.hbase.Server;
-060import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-061import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-062import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-063import 
org.apache.hadoop.hbase.codec.Codec;
-064import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-065import 
org.apache.hadoop.hbase.exceptions.RequestTooBigException;
-066import 
org.apache.hadoop.hbase.io.ByteBufferOutputStream;
-067import 
org.apache.hadoop.hbase.io.ByteBufferPool;
-068import 
org.apache.hadoop.hbase.io.crypto.aes.CryptoAES;
-069import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-070import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-073import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-074import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-075import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-076import 
org.apache.hadoop.hbase.security.AuthMethod;
-077import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-078import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
-079import 
org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
-080import 
org.apache.hadoop.hbase.security.SaslStatus;
-081import 
org.apache.hadoop.hbase.security.SaslUtil;
-082import 
org.apache.hadoop.hbase.security.User;
-083import 
org.apache.hadoop.hbase.security.UserProvider;
-084import 
org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
-085import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-086import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteInput;
-087import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-088import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream;
-089import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-090import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-091import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-092import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-093import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
-098import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e0a5167/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
index fbffa2c..4f6f813 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
@@ -61,172 +61,175 @@
 053public class FullTableBackupClient 
extends TableBackupClient {
 054  private static final Log LOG = 
LogFactory.getLog(FullTableBackupClient.class);
 055
-056  public FullTableBackupClient(final 
Connection conn, final String backupId, BackupRequest request)
-057  throws IOException {
-058super(conn, backupId, request);
-059  }
-060
-061  /**
-062   * Do snapshot copy.
-063   * @param backupInfo backup info
-064   * @throws Exception exception
-065   */
-066  private void snapshotCopy(BackupInfo 
backupInfo) throws Exception {
-067LOG.info("Snapshot copy is 
starting.");
-068
-069// set overall backup phase: 
snapshot_copy
-070
backupInfo.setPhase(BackupPhase.SNAPSHOTCOPY);
+056  public FullTableBackupClient() {
+057  }
+058
+059  public FullTableBackupClient(final 
Connection conn, final String backupId, BackupRequest request)
+060  throws IOException {
+061super(conn, backupId, request);
+062  }
+063
+064  /**
+065   * Do snapshot copy.
+066   * @param backupInfo backup info
+067   * @throws Exception exception
+068   */
+069  protected void snapshotCopy(BackupInfo 
backupInfo) throws Exception {
+070LOG.info("Snapshot copy is 
starting.");
 071
-072// call ExportSnapshot to copy files 
based on hbase snapshot for backup
-073// ExportSnapshot only support single 
snapshot export, need loop for multiple tables case
-074BackupCopyJob copyService = 
BackupRestoreFactory.getBackupCopyJob(conf);
-075
-076// number of snapshots matches number 
of tables
-077float numOfSnapshots = 
backupInfo.getSnapshotNames().size();
+072// set overall backup phase: 
snapshot_copy
+073
backupInfo.setPhase(BackupPhase.SNAPSHOTCOPY);
+074
+075// call ExportSnapshot to copy files 
based on hbase snapshot for backup
+076// ExportSnapshot only support single 
snapshot export, need loop for multiple tables case
+077BackupCopyJob copyService = 
BackupRestoreFactory.getBackupCopyJob(conf);
 078
-079LOG.debug("There are " + (int) 
numOfSnapshots + " snapshots to be copied.");
-080
-081for (TableName table : 
backupInfo.getTables()) {
-082  // Currently we simply set the sub 
copy tasks by counting the table snapshot number, we can
-083  // calculate the real files' size 
for the percentage in the future.
-084  // 
backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
-085  int res = 0;
-086  String[] args = new String[4];
-087  args[0] = "-snapshot";
-088  args[1] = 
backupInfo.getSnapshotName(table);
-089  args[2] = "-copy-to";
-090  args[3] = 
backupInfo.getTableBackupDir(table);
-091
-092  LOG.debug("Copy snapshot " + 
args[1] + " to " + args[3]);
-093  res = copyService.copy(backupInfo, 
backupManager, conf, BackupType.FULL, args);
-094  // if one snapshot export failed, 
do not continue for remained snapshots
-095  if (res != 0) {
-096LOG.error("Exporting Snapshot " + 
args[1] + " failed with return code: " + res + ".");
-097
-098throw new IOException("Failed of 
exporting snapshot " + args[1] + " to " + args[3]
-099+ " with reason code " + 
res);
-100  }
-101  LOG.info("Snapshot copy " + args[1] 
+ " finished.");
-102}
-103  }
-104
-105  /**
-106   * Backup request execution
-107   * @throws IOException
-108   */
-109  @Override
-110  public void execute() throws 
IOException {
-111
-112try (Admin admin = conn.getAdmin();) 
{
-113
-114  // Begin BACKUP
-115  beginBackup(backupManager, 
backupInfo);
-116  String savedStartCode = null;
-117  boolean firstBackup = false;
-118  // do snapshot for full table 
backup
-119
-120  savedStartCode = 
backupManager.readBackupStartCode();
-121  firstBackup = savedStartCode == 
null || Long.parseLong(savedStartCode) == 0L;
-122  if (firstBackup) {
-123// This is our first backup. 
Let's put some marker to system table so that we can hold the logs
-124// while we do the backup.
-125
backupManager.writeBackupStartCode(0L);
-126  }
-127  // We roll log here before we do 
the snapshot. It is possible there is duplicate data
-128  // in the log that is already in 
the snapshot. But if we do it after the snapshot, we
-129  // could have data loss.
-130  // A better approach is to do the 
roll log on each RS in the 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/709b8fcc/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
index f2c44db..6cf2fc8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
@@ -2581,7 +2581,7 @@
 2573try {
 2574  // Restore snapshot
 2575  get(
-2576
internalRestoreSnapshotAsync(snapshotName, tableName, false),
+2576
internalRestoreSnapshotAsync(snapshotName, tableName),
 2577syncWaitTimeout,
 2578TimeUnit.MILLISECONDS);
 2579} catch (IOException e) {
@@ -2590,7 +2590,7 @@
 2582  if (takeFailSafeSnapshot) {
 2583try {
 2584  get(
-2585
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, false),
+2585
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName),
 2586syncWaitTimeout,
 2587TimeUnit.MILLISECONDS);
 2588  String msg = "Restore 
snapshot=" + snapshotName +
@@ -2633,7 +2633,7 @@
 2625  throw new 
TableNotDisabledException(tableName);
 2626}
 2627
-2628return 
internalRestoreSnapshotAsync(snapshotName, tableName, false);
+2628return 
internalRestoreSnapshotAsync(snapshotName, tableName);
 2629  }
 2630
 2631  @Override
@@ -2643,1621 +2643,1614 @@
 2635  }
 2636
 2637  @Override
-2638  public void cloneSnapshot(String 
snapshotName, TableName tableName, boolean restoreAcl)
+2638  public void cloneSnapshot(final String 
snapshotName, final TableName tableName)
 2639  throws IOException, 
TableExistsException, RestoreSnapshotException {
 2640if (tableExists(tableName)) {
 2641  throw new 
TableExistsException(tableName);
 2642}
 2643get(
-2644  
internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl),
+2644  
internalRestoreSnapshotAsync(snapshotName, tableName),
 2645  Integer.MAX_VALUE,
 2646  TimeUnit.MILLISECONDS);
 2647  }
 2648
 2649  @Override
-2650  public void cloneSnapshot(final String 
snapshotName, final TableName tableName)
-2651  throws IOException, 
TableExistsException, RestoreSnapshotException {
-2652cloneSnapshot(snapshotName, 
tableName, false);
-2653  }
-2654
-2655  @Override
-2656  public FutureVoid 
cloneSnapshotAsync(final String snapshotName, final TableName tableName)
-2657  throws IOException, 
TableExistsException {
-2658if (tableExists(tableName)) {
-2659  throw new 
TableExistsException(tableName);
-2660}
-2661return 
internalRestoreSnapshotAsync(snapshotName, tableName, false);
-2662  }
-2663
-2664  @Override
-2665  public byte[] 
execProcedureWithRet(String signature, String instance, MapString, 
String props)
-2666  throws IOException {
-2667ProcedureDescription desc = 
ProtobufUtil.buildProcedureDescription(signature, instance, props);
-2668final ExecProcedureRequest request 
=
-2669
ExecProcedureRequest.newBuilder().setProcedure(desc).build();
-2670// run the procedure on the master
-2671ExecProcedureResponse response = 
executeCallable(
-2672  new 
MasterCallableExecProcedureResponse(getConnection(), 
getRpcControllerFactory()) {
-2673@Override
-2674protected ExecProcedureResponse 
rpcCall() throws Exception {
-2675  return 
master.execProcedureWithRet(getRpcController(), request);
-2676}
-2677  });
-2678
-2679return response.hasReturnData() ? 
response.getReturnData().toByteArray() : null;
-2680  }
-2681
-2682  @Override
-2683  public void execProcedure(String 
signature, String instance, MapString, String props)
-2684  throws IOException {
-2685ProcedureDescription desc = 
ProtobufUtil.buildProcedureDescription(signature, instance, props);
-2686final ExecProcedureRequest request 
=
-2687
ExecProcedureRequest.newBuilder().setProcedure(desc).build();
-2688// run the procedure on the master
-2689ExecProcedureResponse response = 
executeCallable(new MasterCallableExecProcedureResponse(
-2690getConnection(), 
getRpcControllerFactory()) {
-2691  @Override
-2692  protected ExecProcedureResponse 
rpcCall() throws Exception {
-2693return 
master.execProcedure(getRpcController(), request);
-2694  }
-2695});
-2696
-2697long start = 
EnvironmentEdgeManager.currentTime();
-2698long max = 
response.getExpectedTimeout();
-2699long maxPauseTime = max / 
this.numRetries;
-2700int tries = 0;
-2701LOG.debug("Waiting a max of " + max 
+ " ms for procedure '" +
-2702signature + " : " + instance + 
"'' to complete. (max " + maxPauseTime + " ms per 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
index b01aa5a..8090868 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
@@ -597,215 +597,221 @@
 589return reader;
 590  }
 591
-592  public StoreFileScanner 
getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
-593  boolean pread, boolean 
isCompaction, long readPt, long scannerOrder,
-594  boolean 
canOptimizeForNonNullColumn) throws IOException {
-595return 
createStreamReader(canUseDropBehind).getStoreFileScanner(
-596  cacheBlocks, pread, isCompaction, 
readPt, scannerOrder, canOptimizeForNonNullColumn);
-597  }
-598
-599  /**
-600   * @return Current reader.  Must call 
initReader first else returns null.
-601   * @see #initReader()
-602   */
-603  public StoreFileReader getReader() {
-604return this.reader;
-605  }
-606
-607  /**
-608   * @param evictOnClose whether to evict 
blocks belonging to this file
-609   * @throws IOException
-610   */
-611  public synchronized void 
closeReader(boolean evictOnClose)
-612  throws IOException {
-613if (this.reader != null) {
-614  this.reader.close(evictOnClose);
-615  this.reader = null;
-616}
-617  }
-618
-619  /**
-620   * Marks the status of the file as 
compactedAway.
-621   */
-622  public void markCompactedAway() {
-623this.compactedAway = true;
-624  }
-625
-626  /**
-627   * Delete this file
-628   * @throws IOException
-629   */
-630  public void deleteReader() throws 
IOException {
-631boolean evictOnClose =
-632cacheConf != null? 
cacheConf.shouldEvictOnClose(): true;
-633closeReader(evictOnClose);
-634this.fs.delete(getPath(), true);
-635  }
-636
-637  @Override
-638  public String toString() {
-639return this.fileInfo.toString();
-640  }
-641
-642  /**
-643   * @return a length description of this 
StoreFile, suitable for debug output
-644   */
-645  public String toStringDetailed() {
-646StringBuilder sb = new 
StringBuilder();
-647
sb.append(this.getPath().toString());
-648sb.append(", 
isReference=").append(isReference());
-649sb.append(", 
isBulkLoadResult=").append(isBulkLoadResult());
-650if (isBulkLoadResult()) {
-651  sb.append(", 
bulkLoadTS=").append(getBulkLoadTimestamp());
-652} else {
-653  sb.append(", 
seqid=").append(getMaxSequenceId());
-654}
-655sb.append(", 
majorCompaction=").append(isMajorCompaction());
-656
-657return sb.toString();
-658  }
-659
-660  /**
-661   * Gets whether to skip resetting the 
sequence id for cells.
-662   * @param skipResetSeqId The byte array 
of boolean.
-663   * @return Whether to skip resetting 
the sequence id.
-664   */
-665  private boolean isSkipResetSeqId(byte[] 
skipResetSeqId) {
-666if (skipResetSeqId != null  
skipResetSeqId.length == 1) {
-667  return 
Bytes.toBoolean(skipResetSeqId);
-668}
-669return false;
-670  }
-671
-672  /**
-673   * @param fs
-674   * @param dir Directory to create file 
in.
-675   * @return random filename inside 
passed codedir/code
-676   */
-677  public static Path getUniqueFile(final 
FileSystem fs, final Path dir)
-678  throws IOException {
-679if 
(!fs.getFileStatus(dir).isDirectory()) {
-680  throw new IOException("Expecting " 
+ dir.toString() +
-681" to be a directory");
-682}
-683return new Path(dir, 
UUID.randomUUID().toString().replaceAll("-", ""));
-684  }
-685
-686  public Long getMinimumTimestamp() {
-687return getReader().timeRange == null? 
null: getReader().timeRange.getMin();
-688  }
-689
-690  public Long getMaximumTimestamp() {
-691return getReader().timeRange == null? 
null: getReader().timeRange.getMax();
-692  }
-693
-694
-695  /**
-696   * Gets the approximate mid-point of 
this file that is optimal for use in splitting it.
-697   * @param comparator Comparator used to 
compare KVs.
-698   * @return The split point row, or null 
if splitting is not possible, or reader is null.
-699   */
-700  byte[] getFileSplitPoint(CellComparator 
comparator) throws IOException {
-701if (this.reader == null) {
-702  LOG.warn("Storefile " + this + " 
Reader is null; cannot get split point");
-703  return null;
-704}
-705// Get first, last, and mid keys.  
Midkey is the key that starts block
-706// in middle of hfile.  Has column 
and timestamp.  Need to return just
-707// the row we want to split on as 
midkey.
-708Cell midkey = 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index 30c0dec..d77512a 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -32,1996 +32,2020 @@
 024import java.util.Collection;
 025import java.util.List;
 026import java.util.Map;
-027import java.util.concurrent.Future;
-028import java.util.regex.Pattern;
-029
-030import 
org.apache.hadoop.conf.Configuration;
-031import 
org.apache.hadoop.hbase.Abortable;
-032import 
org.apache.hadoop.hbase.ClusterStatus;
-033import 
org.apache.hadoop.hbase.HColumnDescriptor;
-034import 
org.apache.hadoop.hbase.HRegionInfo;
-035import 
org.apache.hadoop.hbase.HTableDescriptor;
-036import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-037import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-038import 
org.apache.hadoop.hbase.ProcedureInfo;
-039import 
org.apache.hadoop.hbase.RegionLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableExistsException;
-042import 
org.apache.hadoop.hbase.TableName;
-043import 
org.apache.hadoop.hbase.TableNotFoundException;
-044import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-045import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-046import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-047import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-048import 
org.apache.hadoop.hbase.procedure2.LockInfo;
-049import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-050import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-051import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-052import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-053import 
org.apache.hadoop.hbase.replication.ReplicationException;
-054import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-055import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-056import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-057import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-058import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-059import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-060import 
org.apache.hadoop.hbase.util.Pair;
-061
-062/**
-063 * The administrative API for HBase. 
Obtain an instance from an {@link Connection#getAdmin()} and
-064 * call {@link #close()} afterwards.
-065 * pAdmin can be used to create, 
drop, list, enable and disable tables, add and drop table
-066 * column families and other 
administrative operations.
-067 *
-068 * @see ConnectionFactory
-069 * @see Connection
-070 * @see Table
-071 * @since 0.99.0
-072 */
-073@InterfaceAudience.Public
-074public interface Admin extends Abortable, 
Closeable {
-075  int getOperationTimeout();
-076
-077  @Override
-078  void abort(String why, Throwable e);
-079
-080  @Override
-081  boolean isAborted();
-082
-083  /**
-084   * @return Connection used by this 
object.
-085   */
-086  Connection getConnection();
-087
-088  /**
-089   * @param tableName Table to check.
-090   * @return True if table exists 
already.
-091   * @throws IOException
-092   */
-093  boolean tableExists(final TableName 
tableName) throws IOException;
-094
-095  /**
-096   * List all the userspace tables.
-097   *
-098   * @return - returns an array of 
read-only HTableDescriptors
-099   * @throws IOException if a remote or 
network exception occurs
-100   */
-101  HTableDescriptor[] listTables() throws 
IOException;
-102
-103  /**
-104   * List all the userspace tables 
matching the given pattern.
-105   *
-106   * @param pattern The compiled regular 
expression to match against
-107   * @return - returns an array of 
read-only HTableDescriptors
-108   * @throws IOException if a remote or 
network exception occurs
-109   * @see #listTables()
-110   */
-111  HTableDescriptor[] listTables(Pattern 
pattern) throws IOException;
-112
-113  /**
-114   * List all the userspace tables 
matching the given regular expression.
-115   *
-116   * @param regex The regular expression 
to match against
-117   * @return - returns an array of 
HTableDescriptors
-118   * @throws IOException if a remote or 
network exception occurs
-119   * @see 
#listTables(java.util.regex.Pattern)
-120   */
-121  HTableDescriptor[] listTables(String 
regex) throws IOException;
-122
-123  /**
-124   * List all the tables matching the 
given pattern.
-125   *
-126   * @param pattern The compiled regular 
expression to match against
-127   * @param includeSysTables False to 
match only against userspace tables
-128   * @return - returns an array of 
read-only HTableDescriptors
-129   * @throws IOException if a remote 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ef4c5a9/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.html
index 27e0dee..109b5f3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcServer.html
@@ -67,157 +67,157 @@
 059import 
org.apache.hadoop.hbase.CellScanner;
 060import 
org.apache.hadoop.hbase.HConstants;
 061import org.apache.hadoop.hbase.Server;
-062import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-063import 
org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-064import 
org.apache.hadoop.hbase.nio.ByteBuff;
-065import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-066import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-067import 
org.apache.hadoop.hbase.security.AuthMethod;
-068import 
org.apache.hadoop.hbase.security.HBasePolicyProvider;
-069import 
org.apache.hadoop.hbase.security.SaslStatus;
-070import 
org.apache.hadoop.hbase.security.SaslUtil;
-071import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-072import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-073import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-075import 
org.apache.hadoop.hbase.util.Bytes;
-076import 
org.apache.hadoop.hbase.util.JVM;
-077import 
org.apache.hadoop.hbase.util.Pair;
-078import 
org.apache.hadoop.io.IntWritable;
-079import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-080import org.apache.htrace.TraceInfo;
-081
-082/**
-083 * An RPC server with Netty4 
implementation.
-084 *
-085 */
-086public class NettyRpcServer extends 
RpcServer {
-087
-088  public static final Log LOG = 
LogFactory.getLog(NettyRpcServer.class);
-089
-090  protected final InetSocketAddress 
bindAddress;
-091
-092  private final CountDownLatch closed = 
new CountDownLatch(1);
-093  private final Channel serverChannel;
-094  private final ChannelGroup allChannels 
= new DefaultChannelGroup(GlobalEventExecutor.INSTANCE);;
-095
-096  public NettyRpcServer(final Server 
server, final String name,
-097  final 
ListBlockingServiceAndInterface services,
-098  final InetSocketAddress 
bindAddress, Configuration conf,
-099  RpcScheduler scheduler) throws 
IOException {
-100super(server, name, services, 
bindAddress, conf, scheduler);
-101this.bindAddress = bindAddress;
-102boolean useEpoll = useEpoll(conf);
-103int workerCount = 
conf.getInt("hbase.netty.rpc.server.worker.count",
-104
Runtime.getRuntime().availableProcessors() / 4);
-105EventLoopGroup bossGroup = null;
-106EventLoopGroup workerGroup = null;
-107if (useEpoll) {
-108  bossGroup = new 
EpollEventLoopGroup(1);
-109  workerGroup = new 
EpollEventLoopGroup(workerCount);
-110} else {
-111  bossGroup = new 
NioEventLoopGroup(1);
-112  workerGroup = new 
NioEventLoopGroup(workerCount);
-113}
-114ServerBootstrap bootstrap = new 
ServerBootstrap();
-115bootstrap.group(bossGroup, 
workerGroup);
-116if (useEpoll) {
-117  
bootstrap.channel(EpollServerSocketChannel.class);
-118} else {
-119  
bootstrap.channel(NioServerSocketChannel.class);
-120}
-121
bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
-122
bootstrap.childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive);
-123
bootstrap.childOption(ChannelOption.ALLOCATOR,
-124
PooledByteBufAllocator.DEFAULT);
-125bootstrap.childHandler(new 
Initializer(maxRequestSize));
-126
-127try {
-128  serverChannel = 
bootstrap.bind(this.bindAddress).sync().channel();
-129  LOG.info("NettyRpcServer bind to 
address=" + serverChannel.localAddress()
-130  + ", 
hbase.netty.rpc.server.worker.count=" + workerCount
-131  + ", useEpoll=" + useEpoll);
-132  allChannels.add(serverChannel);
-133} catch (InterruptedException e) {
-134  throw new 
InterruptedIOException(e.getMessage());
-135}
-136initReconfigurable(conf);
-137this.scheduler.init(new 
RpcSchedulerContext(this));
-138  }
-139
-140  private static boolean 
useEpoll(Configuration conf) {
-141// Config to enable native 
transport.
-142boolean epollEnabled = 
conf.getBoolean("hbase.rpc.server.nativetransport",
-143true);
-144// Use the faster native epoll 
transport mechanism on linux if enabled
-145return epollEnabled  
JVM.isLinux()  JVM.isAmd64();
-146  }
-147
-148  @Override
-149  public synchronized void start() {
-150if (started) {
-151  return;
-152}
-153authTokenSecretMgr = 
createSecretManager();
-154if (authTokenSecretMgr != 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index f3f7a46..8750fa2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -56,2015 +56,2125 @@
 048import 
org.apache.hadoop.hbase.MetaTableAccessor;
 049import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
 050import 
org.apache.hadoop.hbase.NotServingRegionException;
-051import 
org.apache.hadoop.hbase.RegionLocations;
-052import 
org.apache.hadoop.hbase.ServerName;
-053import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-054import 
org.apache.hadoop.hbase.HConstants;
-055import 
org.apache.hadoop.hbase.TableExistsException;
-056import 
org.apache.hadoop.hbase.TableName;
-057import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-058import 
org.apache.hadoop.hbase.TableNotDisabledException;
-059import 
org.apache.hadoop.hbase.TableNotFoundException;
-060import 
org.apache.hadoop.hbase.UnknownRegionException;
-061import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-062import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-063import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-064import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-065import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-066import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-067import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-068import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-069import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-070import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-071import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-072import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-073import 
org.apache.hadoop.hbase.replication.ReplicationException;
-074import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-075import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-076import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-102import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6f2e75f2/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
index 6c52543..f3f7a46 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
@@ -31,1797 +31,2040 @@
 023import java.util.ArrayList;
 024import java.util.Arrays;
 025import java.util.Collection;
-026import java.util.HashMap;
-027import java.util.LinkedList;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Optional;
-031import 
java.util.concurrent.CompletableFuture;
-032import java.util.concurrent.TimeUnit;
-033import 
java.util.concurrent.atomic.AtomicReference;
-034import java.util.function.BiConsumer;
-035import java.util.regex.Pattern;
-036import java.util.stream.Collectors;
-037
-038import 
com.google.common.annotations.VisibleForTesting;
-039
-040import io.netty.util.Timeout;
-041import io.netty.util.TimerTask;
-042import org.apache.commons.logging.Log;
-043import 
org.apache.commons.logging.LogFactory;
-044import 
org.apache.hadoop.hbase.HColumnDescriptor;
-045import 
org.apache.hadoop.hbase.HRegionInfo;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor;
-048import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-049import 
org.apache.hadoop.hbase.NotServingRegionException;
-050import 
org.apache.hadoop.hbase.RegionLocations;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-053import 
org.apache.hadoop.hbase.HConstants;
-054import 
org.apache.hadoop.hbase.TableExistsException;
-055import 
org.apache.hadoop.hbase.TableName;
-056import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-057import 
org.apache.hadoop.hbase.TableNotFoundException;
-058import 
org.apache.hadoop.hbase.UnknownRegionException;
-059import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-060import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-061import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-062import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-063import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-064import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-065import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-066import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-067import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-068import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-069import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-070import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-071import 
org.apache.hadoop.hbase.replication.ReplicationException;
-072import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-073import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-074import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-092import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/efd0601e/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
index 3f49e6d..835aecb 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
@@ -512,6 +512,16 @@
 void
 FavoredNodesManager.initialize(SnapshotOfRegionAssignmentFromMetasnapshotOfRegionAssignment)
 
+
+ServerName
+FavoredNodeLoadBalancer.randomAssignment(HRegionInforegionInfo,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+FavoredNodeLoadBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegions,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
+
 
 
 
@@ -722,6 +732,54 @@
 void
 BaseLoadBalancer.initialize()
 
+
+void
+FavoredStochasticBalancer.initialize()
+
+
+ServerName
+BaseLoadBalancer.randomAssignment(HRegionInforegionInfo,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
+Used to assign a single region to a random server.
+
+
+
+ServerName
+FavoredStochasticBalancer.randomAssignment(HRegionInforegionInfo,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+BaseLoadBalancer.retainAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapHRegionInfo,ServerNameregions,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
+Generates a bulk assignment startup plan, attempting to 
reuse the existing
+ assignment information from META, but adjusting for the specified list of
+ available/online servers available for assignment.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+FavoredStochasticBalancer.retainAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapHRegionInfo,ServerNameregions,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+BaseLoadBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegions,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
+Generates a bulk assignment plan to be used on cluster 
startup using a
+ simple round-robin assignment.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+FavoredStochasticBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegions,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
+
+
+private 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/10601a30/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
index be839b7..72853dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
@@ -45,1639 +45,1784 @@
 037
 038import 
com.google.common.annotations.VisibleForTesting;
 039
-040import org.apache.commons.logging.Log;
-041import 
org.apache.commons.logging.LogFactory;
-042import 
org.apache.hadoop.hbase.HColumnDescriptor;
-043import 
org.apache.hadoop.hbase.HRegionInfo;
-044import 
org.apache.hadoop.hbase.HRegionLocation;
-045import 
org.apache.hadoop.hbase.HTableDescriptor;
-046import 
org.apache.hadoop.hbase.MetaTableAccessor;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-048import 
org.apache.hadoop.hbase.NotServingRegionException;
-049import 
org.apache.hadoop.hbase.RegionLocations;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-055import 
org.apache.hadoop.hbase.TableNotFoundException;
-056import 
org.apache.hadoop.hbase.UnknownRegionException;
-057import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-059import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-060import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-061import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-062import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-063import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-064import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-065import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-066import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-067import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-068import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-069import 
org.apache.hadoop.hbase.replication.ReplicationException;
-070import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-071import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-072import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-095import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662ea7dc/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
index ac4a9b3..be839b7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
@@ -30,212 +30,212 @@
 022import java.io.IOException;
 023import java.util.ArrayList;
 024import java.util.Arrays;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Optional;
-028import 
java.util.concurrent.CompletableFuture;
-029import java.util.concurrent.TimeUnit;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031import java.util.function.BiConsumer;
-032import java.util.regex.Pattern;
-033
-034import 
com.google.common.annotations.VisibleForTesting;
-035import org.apache.commons.logging.Log;
-036import 
org.apache.commons.logging.LogFactory;
-037import 
org.apache.hadoop.hbase.HColumnDescriptor;
-038import 
org.apache.hadoop.hbase.HRegionInfo;
-039import 
org.apache.hadoop.hbase.HRegionLocation;
-040import 
org.apache.hadoop.hbase.HTableDescriptor;
-041import 
org.apache.hadoop.hbase.MetaTableAccessor;
-042import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-043import 
org.apache.hadoop.hbase.NotServingRegionException;
-044import 
org.apache.hadoop.hbase.RegionLocations;
-045import 
org.apache.hadoop.hbase.ServerName;
-046import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-047import 
org.apache.hadoop.hbase.HConstants;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-050import 
org.apache.hadoop.hbase.TableNotFoundException;
-051import 
org.apache.hadoop.hbase.UnknownRegionException;
-052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-053import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-054import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-055import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-056import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-057import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-058import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-059import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-060import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-061import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-085import 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b4bae59/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
index a754d1e..e9c690b 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
@@ -1540,7 +1540,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 getMergesDir
-publicorg.apache.hadoop.fs.PathgetMergesDir()
+publicorg.apache.hadoop.fs.PathgetMergesDir()
 
 Returns:
 Path to the temp directory used during merge operations
@@ -1553,7 +1553,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 getMergesDir
-org.apache.hadoop.fs.PathgetMergesDir(HRegionInfohri)
+org.apache.hadoop.fs.PathgetMergesDir(HRegionInfohri)
 
 
 
@@ -1562,7 +1562,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 cleanupMergesDir
-voidcleanupMergesDir()
+voidcleanupMergesDir()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Clean up any merge detritus that may have been left around 
from previous merge attempts.
 
@@ -1577,7 +1577,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 cleanupMergedRegion
-publicvoidcleanupMergedRegion(HRegionInfomergedRegion)
+publicvoidcleanupMergedRegion(HRegionInfomergedRegion)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Remove merged region
 
@@ -1594,7 +1594,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 mkdirs
-staticbooleanmkdirs(org.apache.hadoop.fs.FileSystemfs,
+staticbooleanmkdirs(org.apache.hadoop.fs.FileSystemfs,
   org.apache.hadoop.conf.Configurationconf,
   org.apache.hadoop.fs.Pathdir)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -1610,7 +1610,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 createMergesDir
-publicvoidcreateMergesDir()
+publicvoidcreateMergesDir()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Create the region merges directory.
 
@@ -1627,7 +1627,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 mergeStoreFile
-publicorg.apache.hadoop.fs.PathmergeStoreFile(HRegionInfomergedRegion,
+publicorg.apache.hadoop.fs.PathmergeStoreFile(HRegionInfomergedRegion,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfamilyName,
 StoreFilef,
 
org.apache.hadoop.fs.PathmergedDir)
@@ -1653,7 +1653,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 commitMergedRegion
-publicvoidcommitMergedRegion(HRegionInfomergedRegionInfo)
+publicvoidcommitMergedRegion(HRegionInfomergedRegionInfo)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Commit a merged region, moving it from the merges temporary 
directory to
  the proper location in the filesystem.
@@ -1671,7 +1671,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 logFileSystemState
-voidlogFileSystemState(org.apache.commons.logging.LogLOG)
+voidlogFileSystemState(org.apache.commons.logging.LogLOG)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Log the current state of the region
 
@@ -1688,7 +1688,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 getRegionInfoFileContent
-private staticbyte[]getRegionInfoFileContent(HRegionInfohri)
+private staticbyte[]getRegionInfoFileContent(HRegionInfohri)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Parameters:
@@ -1706,7 +1706,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 loadRegionInfoFileContent
-public staticHRegionInfoloadRegionInfoFileContent(org.apache.hadoop.fs.FileSystemfs,
+public staticHRegionInfoloadRegionInfoFileContent(org.apache.hadoop.fs.FileSystemfs,
  

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2fcc2ae0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index a58f559..98b388b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -756,2562 +756,2560 @@
 748
 749this.masterActiveTime = 
System.currentTimeMillis();
 750// TODO: Do this using Dependency 
Injection, using PicoContainer, Guice or Spring.
-751// Initialize the chunkCreator
-752initializeMemStoreChunkCreator();
-753this.fileSystemManager = new 
MasterFileSystem(this);
-754this.walManager = new 
MasterWalManager(this);
-755
-756// enable table descriptors cache
-757this.tableDescriptors.setCacheOn();
-758// set the META's descriptor to the 
correct replication
-759
this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
-760
conf.getInt(HConstants.META_REPLICAS_NUM, 
HConstants.DEFAULT_META_REPLICA_NUM));
-761// warm-up HTDs cache on master 
initialization
-762if (preLoadTableDescriptors) {
-763  status.setStatus("Pre-loading table 
descriptors");
-764  this.tableDescriptors.getAll();
-765}
-766
-767// publish cluster ID
-768status.setStatus("Publishing Cluster 
ID in ZooKeeper");
-769
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
-770this.initLatch.countDown();
+751this.fileSystemManager = new 
MasterFileSystem(this);
+752this.walManager = new 
MasterWalManager(this);
+753
+754// enable table descriptors cache
+755this.tableDescriptors.setCacheOn();
+756// set the META's descriptor to the 
correct replication
+757
this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
+758
conf.getInt(HConstants.META_REPLICAS_NUM, 
HConstants.DEFAULT_META_REPLICA_NUM));
+759// warm-up HTDs cache on master 
initialization
+760if (preLoadTableDescriptors) {
+761  status.setStatus("Pre-loading table 
descriptors");
+762  this.tableDescriptors.getAll();
+763}
+764
+765// publish cluster ID
+766status.setStatus("Publishing Cluster 
ID in ZooKeeper");
+767
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
+768this.initLatch.countDown();
+769
+770this.serverManager = 
createServerManager(this);
 771
-772this.serverManager = 
createServerManager(this);
+772this.tableStateManager = new 
TableStateManager(this);
 773
-774this.tableStateManager = new 
TableStateManager(this);
-775
-776status.setStatus("Initializing ZK 
system trackers");
-777initializeZKBasedSystemTrackers();
-778
-779// This is for backwards 
compatibility
-780// See HBASE-11393
-781status.setStatus("Update TableCFs 
node in ZNode");
-782TableCFsUpdater tableCFsUpdater = new 
TableCFsUpdater(zooKeeper,
-783conf, 
this.clusterConnection);
-784tableCFsUpdater.update();
-785
-786// initialize master side 
coprocessors before we start handling requests
-787status.setStatus("Initializing master 
coprocessors");
-788this.cpHost = new 
MasterCoprocessorHost(this, this.conf);
-789
-790// start up all service threads.
-791status.setStatus("Initializing master 
service threads");
-792startServiceThreads();
-793
-794// Wake up this server to check in
-795sleeper.skipSleepCycle();
-796
-797// Wait for region servers to report 
in
-798status.setStatus("Wait for region 
servers to report in");
-799waitForRegionServers(status);
-800
-801// get a list for previously failed 
RS which need log splitting work
-802// we recover hbase:meta region 
servers inside master initialization and
-803// handle other failed servers in SSH 
in order to start up master node ASAP
-804MasterMetaBootstrap metaBootstrap = 
createMetaBootstrap(this, status);
-805
metaBootstrap.splitMetaLogsBeforeAssignment();
+774status.setStatus("Initializing ZK 
system trackers");
+775initializeZKBasedSystemTrackers();
+776
+777// This is for backwards 
compatibility
+778// See HBASE-11393
+779status.setStatus("Update TableCFs 
node in ZNode");
+780TableCFsUpdater tableCFsUpdater = new 
TableCFsUpdater(zooKeeper,
+781conf, 
this.clusterConnection);
+782tableCFsUpdater.update();
+783
+784// initialize master side 
coprocessors before we start handling requests
+785status.setStatus("Initializing master 
coprocessors");
+786this.cpHost = new 
MasterCoprocessorHost(this, this.conf);
+787
+788// start up all service threads.
+789status.setStatus("Initializing master 
service 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
index 03ab5da..da0a2cd 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MemStoreLABImpl
+public class MemStoreLABImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements MemStoreLAB
 A memstore-local allocation buffer.
@@ -136,8 +136,8 @@ implements MemStoreChunkPool.
- When the Chunk comes pool, it can be either an on heap or an off heap backed 
chunk. The chunks,
+ The chunks created by this MemStoreLAB can get pooled at ChunkCreator.
+ When the Chunk comes from pool, it can be either an on heap or an off heap 
backed chunk. The chunks,
  which this MemStoreLAB creates on its own (when no chunk available from 
pool), those will be
  always on heap backed.
 
@@ -159,21 +159,29 @@ implements Field and Description
 
 
-private MemStoreChunkPool
-chunkPool
+private ChunkCreator
+chunkCreator
 
 
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer
+chunks
+
+
 private int
 chunkSize
 
-
+
 private boolean
 closed
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicReferenceChunk
 curChunk
 
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
 title="class or interface in 
java.util.concurrent.locks">ReentrantLock
+lock
+
 
 (package private) static 
org.apache.commons.logging.Log
 LOG
@@ -187,10 +195,6 @@ implements openScannerCount
 
 
-(package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true;
 title="class or interface in java.util.concurrent">BlockingQueueChunk
-pooledChunkQueue
-
-
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicBoolean
 reclaimed
 
@@ -250,33 +254,47 @@ implements 
+private Cell
+copyToChunkCell(Cellcell,
+   http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuf,
+   intoffset,
+   intlen)
+Clone the passed cell by copying its data into the passed 
buf and create a cell with a chunkid
+ out of it
+
+
+
 void
 decScannerCount()
 Called when closing a scanner on the data of this 
MemStoreLAB
 
 
-
+
 (package private) Chunk
 getCurrentChunk()
 
-
+
 private Chunk
 getOrMakeChunk()
 Get the current chunk, or, if there is no current chunk,
  allocate a new one from the JVM.
 
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true;
 title="class or interface in java.util.concurrent">BlockingQueueChunk
 getPooledChunks()
 
-
+
 void
 incScannerCount()
 Called when opening a scanner on the data of this 
MemStoreLAB
 
 
-
+
+private void
+recycleChunks()
+
+
 private void
 tryRetireChunk(Chunkc)
 Try to retire the current chunk if it is still
@@ -318,7 +336,7 @@ implements 
 
 LOG
-static finalorg.apache.commons.logging.Log LOG
+static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -327,16 +345,25 @@ implements 
 
 curChunk
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicReferenceChunk curChunk
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicReferenceChunk curChunk
 
 
-
+
 
 
 
 
-pooledChunkQueue
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true;
 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html 
b/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
deleted file mode 100644
index 627b2ed..000
--- a/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
+++ /dev/null
@@ -1,649 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class org.apache.hadoop.hbase.HBaseIOException (Apache HBase 
2.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses of Classorg.apache.hadoop.hbase.HBaseIOException
-
-
-
-
-
-Packages that use HBaseIOException
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase
-
-
-
-org.apache.hadoop.hbase.client
-
-Provides HBase Client
-
-
-
-org.apache.hadoop.hbase.coprocessor
-
-Table of Contents
-
-
-
-org.apache.hadoop.hbase.exceptions
-
-
-
-org.apache.hadoop.hbase.ipc
-
-Tools to help define network clients and servers.
-
-
-
-org.apache.hadoop.hbase.quotas
-
-
-
-org.apache.hadoop.hbase.regionserver
-
-
-
-org.apache.hadoop.hbase.security
-
-
-
-org.apache.hadoop.hbase.snapshot
-
-
-
-org.apache.hadoop.hbase.util
-
-
-
-
-
-
-
-
-
-
-Uses of HBaseIOException in org.apache.hadoop.hbase
-
-Subclasses of HBaseIOException in org.apache.hadoop.hbase
-
-Modifier and Type
-Class and Description
-
-
-
-class
-DoNotRetryIOException
-Subclass if exception is not meant to be retried: e.g.
-
-
-
-class
-InvalidFamilyOperationException
-Thrown if a request is table schema modification is 
requested but
- made for an invalid family name.
-
-
-
-class
-NamespaceExistException
-Thrown when a namespace exists but should not
-
-
-
-class
-NamespaceNotFoundException
-Thrown when a namespace can not be located
-
-
-
-class
-NotAllMetaRegionsOnlineException
-Thrown when an operation requires the root and all meta 
regions to be online
-
-
-
-class
-PleaseHoldException
-This exception is thrown by the master when a region server 
was shut down and
- restarted so fast that the master still hasn't processed the server shutdown
- of the first instance, or when master is initializing and client call admin
- operations, or when an operation is performed on a region server that is 
still starting.
-
-
-
-class
-RegionException
-Thrown when something happens related to region 
handling.
-
-
-
-class
-ReplicationPeerNotFoundException
-Thrown when a replication peer can not be found
-
-
-
-class
-TableExistsException
-Thrown when a table exists but should not
-
-
-
-class
-TableInfoMissingException
-Failed to find .tableinfo file under table dir
-
-
-
-class
-TableNotDisabledException
-Thrown if a table should be offline but is not
-
-
-
-class
-TableNotEnabledException
-Thrown if a table should be enabled but is not
-
-
-
-class
-TableNotFoundException
-Thrown when a table can not be located
-
-
-
-class
-UnknownRegionException
-Thrown when we are asked to operate on a region we know 
nothing about.
-
-
-
-class
-UnknownScannerException
-Thrown if a region server is passed an unknown scanner 
id.
-
-
-
-
-
-
-
-
-Uses of HBaseIOException in org.apache.hadoop.hbase.client
-
-Subclasses of HBaseIOException in org.apache.hadoop.hbase.client
-
-Modifier and Type
-Class and Description
-
-
-
-class
-DoNotRetryRegionException
-Similar to RegionException, but disables retries.
-
-
-
-class
-NoServerForRegionException
-Thrown when no region server can be found for a region
-
-
-
-class
-RegionOfflineException
-Thrown when a table can not be located
-
-
-
-class
-RowTooBigException
-Gets or Scans throw this exception if running without 
in-row scan flag
- set and row size appears to exceed max configured size (configurable via
- hbase.table.max.rowsize).
-
-
-
-class
-WrongRowIOException
-
-
-
-
-
-
-
-Uses of HBaseIOException in org.apache.hadoop.hbase.coprocessor
-
-Subclasses of HBaseIOException in org.apache.hadoop.hbase.coprocessor
-
-Modifier and Type
-Class and Description
-
-
-
-class
-BypassCoprocessorException
-Thrown if a coprocessor rules we should bypass an 
operation
-
-
-
-class
-CoprocessorException
-Thrown if a coprocessor encounters any exception.
-
-
-
-
-
-
-
-
-Uses of HBaseIOException in