[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
index 8e52a62..0ae39da 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -902,56 +902,60 @@ implements 
 long
+getTotalRowActionRequestCount() 
+
+
+long
 getTotalStaticBloomSize()
 Get the size (in bytes) of the static bloom filters.
 
 
-
+
 long
 getTotalStaticIndexSize()
 Get the size (in bytes) of of the static indexes including 
the roots.
 
 
-
+
 long
 getTrailerHitCount() 
 
-
+
 long
 getTrailerMissCount() 
 
-
+
 long
 getUpdatesBlockedTime()
 Get the amount of time that updates were blocked.
 
 
-
+
 long
 getWALFileSize()
 Get the size of WAL files of this region server.
 
 
-
+
 long
 getWriteRequestsCount()
 Get the number of write requests to regions hosted on this 
region server.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 getZookeeperQuorum()
 Get the ZooKeeper Quorum Info
 
 
-
+
 private void
 initBlockCache()
 It's possible that due to threading the block cache could 
not be initialized
  yet (testing multiple region servers in one jvm).
 
 
-
+
 private void
 initMobFileCache()
 Initializes the mob file cache.
@@ -1635,13 +1639,26 @@ implements 
+
+
+
+
+getTotalRowActionRequestCount
+public long getTotalRowActionRequestCount()
+
+Specified by:
+getTotalRowActionRequestCount in
 interface MetricsRegionServerWrapper
+
+
+
 
 
 
 
 
 getSplitQueueSize
-public int getSplitQueueSize()
+public int getSplitQueueSize()
 Description copied from 
interface: MetricsRegionServerWrapper
 Get the size of the split queue
 
@@ -1656,7 +1673,7 @@ implements 
 
 getCompactionQueueSize
-public int getCompactionQueueSize()
+public int getCompactionQueueSize()
 Description copied from 
interface: MetricsRegionServerWrapper
 Get the size of the compaction queue
 
@@ -1671,7 +1688,7 @@ implements 
 
 getSmallCompactionQueueSize
-public int getSmallCompactionQueueSize()
+public int getSmallCompactionQueueSize()
 
 Specified by:
 getSmallCompactionQueueSize in
 interface MetricsRegionServerWrapper
@@ -1684,7 +1701,7 @@ implements 
 
 getLargeCompactionQueueSize
-public int getLargeCompactionQueueSize()
+public int getLargeCompactionQueueSize()
 
 Specified by:
 getLargeCompactionQueueSize in
 interface MetricsRegionServerWrapper
@@ -1697,7 +1714,7 @@ implements 
 
 getFlushQueueSize
-public int getFlushQueueSize()
+public int getFlushQueueSize()
 Description copied from 
interface: Me

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/class-use/Stoppable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Stoppable.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Stoppable.html
index 52c6106..17f74bd 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Stoppable.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Stoppable.html
@@ -1016,7 +1016,7 @@
 
 
 void
-RecoveredReplicationSource.init(org.apache.hadoop.conf.Configuration conf,
+ReplicationSource.init(org.apache.hadoop.conf.Configuration conf,
 org.apache.hadoop.fs.FileSystem fs,
 ReplicationSourceManager manager,
 ReplicationQueues replicationQueues,
@@ -1025,11 +1025,13 @@
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String peerClusterZnode,
 http://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true";
 title="class or interface in java.util">UUID clusterId,
 ReplicationEndpoint replicationEndpoint,
-MetricsSource metrics) 
+MetricsSource metrics)
+Instantiation method used by region servers
+
 
 
 void
-ReplicationSource.init(org.apache.hadoop.conf.Configuration conf,
+RecoveredReplicationSource.init(org.apache.hadoop.conf.Configuration conf,
 org.apache.hadoop.fs.FileSystem fs,
 ReplicationSourceManager manager,
 ReplicationQueues replicationQueues,
@@ -1038,9 +1040,7 @@
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String peerClusterZnode,
 http://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true";
 title="class or interface in java.util">UUID clusterId,
 ReplicationEndpoint replicationEndpoint,
-MetricsSource metrics)
-Instantiation method used by region servers
-
+MetricsSource metrics) 
 
 
 void

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
index 3ba1c0a..89f3db5 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
@@ -126,11 +126,11 @@
 
 
 TableDescriptors
-MasterServices.getTableDescriptors() 
+HMaster.getTableDescriptors() 
 
 
 TableDescriptors
-HMaster.getTableDescriptors() 
+MasterServices.getTableDescriptors() 
 
 
 



[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html
index f355960..13d9b4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.WakeupFlushThread.html
@@ -360,478 +360,480 @@
 352
 353  @Override
 354  public void requestFlush(Region r, 
boolean forceFlushAllStores) {
-355synchronized (regionsInQueue) {
-356  if (!regionsInQueue.containsKey(r)) 
{
-357// This entry has no delay so it 
will be added at the top of the flush
-358// queue.  It'll come out near 
immediately.
-359FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-360this.regionsInQueue.put(r, 
fqe);
-361this.flushQueue.add(fqe);
-362  }
-363}
-364  }
-365
-366  @Override
-367  public void requestDelayedFlush(Region 
r, long delay, boolean forceFlushAllStores) {
-368synchronized (regionsInQueue) {
-369  if (!regionsInQueue.containsKey(r)) 
{
-370// This entry has some delay
-371FlushRegionEntry fqe = new 
FlushRegionEntry(r, forceFlushAllStores);
-372fqe.requeue(delay);
-373this.regionsInQueue.put(r, 
fqe);
-374this.flushQueue.add(fqe);
-375  }
-376}
-377  }
-378
-379  public int getFlushQueueSize() {
-380return flushQueue.size();
-381  }
-382
-383  /**
-384   * Only interrupt once it's done with a 
run through the work loop.
-385   */
-386  void interruptIfNecessary() {
-387lock.writeLock().lock();
-388try {
-389  for (FlushHandler flushHander : 
flushHandlers) {
-390if (flushHander != null) 
flushHander.interrupt();
-391  }
-392} finally {
-393  lock.writeLock().unlock();
-394}
-395  }
-396
-397  synchronized void 
start(UncaughtExceptionHandler eh) {
-398ThreadFactory flusherThreadFactory = 
Threads.newDaemonThreadFactory(
-399
server.getServerName().toShortString() + "-MemStoreFlusher", eh);
-400for (int i = 0; i < 
flushHandlers.length; i++) {
-401  flushHandlers[i] = new 
FlushHandler("MemStoreFlusher." + i);
-402  
flusherThreadFactory.newThread(flushHandlers[i]);
-403  flushHandlers[i].start();
-404}
-405  }
-406
-407  boolean isAlive() {
-408for (FlushHandler flushHander : 
flushHandlers) {
-409  if (flushHander != null && 
flushHander.isAlive()) {
-410return true;
-411  }
-412}
-413return false;
-414  }
-415
-416  void join() {
-417for (FlushHandler flushHander : 
flushHandlers) {
-418  if (flushHander != null) {
-419
Threads.shutdown(flushHander.getThread());
-420  }
-421}
-422  }
-423
-424  /**
-425   * A flushRegion that checks store file 
count.  If too many, puts the flush
-426   * on delay queue to retry later.
-427   * @param fqe
-428   * @return true if the region was 
successfully flushed, false otherwise. If
-429   * false, there will be accompanying 
log messages explaining why the region was
-430   * not flushed.
-431   */
-432  private boolean flushRegion(final 
FlushRegionEntry fqe) {
-433Region region = fqe.region;
-434if 
(!region.getRegionInfo().isMetaRegion() &&
-435isTooManyStoreFiles(region)) {
-436  if 
(fqe.isMaximumWait(this.blockingWaitTime)) {
-437LOG.info("Waited " + 
(EnvironmentEdgeManager.currentTime() - fqe.createTime) +
-438  "ms on a compaction to clean up 
'too many store files'; waited " +
-439  "long enough... proceeding with 
flush of " +
-440  
region.getRegionInfo().getRegionNameAsString());
-441  } else {
-442// If this is first time we've 
been put off, then emit a log message.
-443if (fqe.getRequeueCount() <= 
0) {
-444  // Note: We don't impose 
blockingStoreFiles constraint on meta regions
-445  LOG.warn("Region " + 
region.getRegionInfo().getRegionNameAsString() + " has too many " +
-446"store files; delaying flush 
up to " + this.blockingWaitTime + "ms");
-447  if 
(!this.server.compactSplitThread.requestSplit(region)) {
-448try {
-449  
this.server.compactSplitThread.requestSystemCompaction(
-450  region, 
Thread.currentThread().getName());
-451} catch (IOException e) {
-452  e = e instanceof 
RemoteException ?
-453  
((RemoteException)e).unwrapRemoteException() : e;
-454  LOG.error("Cache flush 
failed for region " +
-455
Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
-456}
-45

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
index a8c351f..f95 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html
@@ -105,704 +105,721 @@
 097  public static final boolean 
DEFAULT_EVICT_BLOCKS_ON_CLOSE = 
ColumnFamilyDescriptorBuilder.DEFAULT_EVICT_BLOCKS_ON_CLOSE;
 098  public static final boolean 
DEFAULT_COMPRESS_TAGS = ColumnFamilyDescriptorBuilder.DEFAULT_COMPRESS_TAGS;
 099  public static final boolean 
DEFAULT_PREFETCH_BLOCKS_ON_OPEN = 
ColumnFamilyDescriptorBuilder.DEFAULT_PREFETCH_BLOCKS_ON_OPEN;
-100  protected final 
ModifyableColumnFamilyDescriptor delegatee;
-101  /**
-102   * Construct a column descriptor 
specifying only the family name
-103   * The other attributes are 
defaulted.
-104   *
-105   * @param familyName Column family 
name. Must be 'printable' -- digit or
-106   * letter -- and may not contain a 
:
-107   * @deprecated use {@link 
ColumnFamilyDescriptorBuilder#of(String)}
-108   */
-109  public HColumnDescriptor(final String 
familyName) {
-110this(Bytes.toBytes(familyName));
-111  }
-112
-113  /**
-114   * Construct a column descriptor 
specifying only the family name
-115   * The other attributes are 
defaulted.
-116   *
-117   * @param familyName Column family 
name. Must be 'printable' -- digit or
-118   * letter -- and may not contain a 
:
-119   * @deprecated use {@link 
ColumnFamilyDescriptorBuilder#of(byte[])}
-120   */
-121  public HColumnDescriptor(final byte [] 
familyName) {
-122this(new 
ModifyableColumnFamilyDescriptor(familyName));
-123  }
-124
-125  /**
-126   * Constructor.
-127   * Makes a deep copy of the supplied 
descriptor.
-128   * Can make a modifiable descriptor 
from an UnmodifyableHColumnDescriptor.
-129   * @param desc The descriptor.
-130   * @deprecated use {@link 
ColumnFamilyDescriptorBuilder#copy(ColumnFamilyDescriptor)}
-131   */
-132  public 
HColumnDescriptor(HColumnDescriptor desc) {
-133this(desc, true);
-134  }
-135
-136  protected 
HColumnDescriptor(HColumnDescriptor desc, boolean deepClone) {
-137this(deepClone ? new 
ModifyableColumnFamilyDescriptor(desc)
-138: desc.delegatee);
-139  }
-140
-141  protected 
HColumnDescriptor(ModifyableColumnFamilyDescriptor delegate) {
-142this.delegatee = delegate;
-143  }
-144
-145  /**
-146   * @param b Family name.
-147   * @return b
-148   * @throws IllegalArgumentException If 
not null and not a legitimate family
-149   * name: i.e. 'printable' and ends in a 
':' (Null passes are allowed because
-150   * b can be 
null when deserializing).  Cannot start with a '.'
-151   * either. Also Family can not be an 
empty value or equal "recovered.edits".
-152   * @deprecated Use {@link 
ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])}.
-153   */
-154  @Deprecated
-155  public static byte [] 
isLegalFamilyName(final byte [] b) {
-156return 
ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(b);
-157  }
-158
-159  /**
-160   * @return Name of this column family
-161   */
-162  @Override
-163  public byte [] getName() {
-164return delegatee.getName();
-165  }
-166
-167  /**
-168   * @return The name string of this 
column family
-169   */
-170  @Override
-171  public String getNameAsString() {
-172return delegatee.getNameAsString();
-173  }
-174
-175  /**
-176   * @param key The key.
-177   * @return The value.
-178   */
-179  @Override
-180  public byte[] getValue(byte[] key) {
-181return delegatee.getValue(key);
-182  }
-183
-184  /**
-185   * @param key The key.
-186   * @return The value as a string.
-187   */
-188  public String getValue(String key) {
-189byte[] value = 
getValue(Bytes.toBytes(key));
-190return value == null ? null : 
Bytes.toString(value);
-191  }
-192
-193  @Override
-194  public Map 
getValues() {
-195return delegatee.getValues();
-196  }
-197
-198  /**
-199   * @param key The key.
-200   * @param value The value.
-201   * @return this (for chained 
invocation)
-202   */
-203  public HColumnDescriptor 
setValue(byte[] key, byte[] value) {
-204
getDelegateeForModification().setValue(key, value);
-205return this;
-206  }
-207
-208  /**
-209   * @param key Key whose key and value 
we're to remove from HCD parameters.
-210   */
-211  public HColumnDescriptor remove(final 
byte [] key) {
-212
getDelegateeForModification().removeValue(new Bytes(key));
-213return this;
-214  }
-215
-216  /**
-217   * @param key The key.
-218   * @param value The value.
-219   * @return this (for chained 
invocation)
-220   */
-221  public HColu

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.html
index 7af3762..9837a0e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.html
@@ -40,208 +40,207 @@
 032import 
org.apache.hadoop.hbase.CoordinatedStateException;
 033import 
org.apache.hadoop.hbase.HRegionInfo;
 034import 
org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-035import 
org.apache.hadoop.hbase.ProcedureInfo;
-036import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-037import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-038import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-039import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-040import 
org.apache.hadoop.hbase.procedure2.Procedure;
-041import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-042import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-043import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-044
-045/**
-046 * Helper to synchronously wait on 
conditions.
-047 * This will be removed in the future 
(mainly when the AssignmentManager will be
-048 * replaced with a Procedure version) by 
using ProcedureYieldException,
-049 * and the queue will handle waiting and 
scheduling based on events.
-050 */
-051@InterfaceAudience.Private
-052@InterfaceStability.Evolving
-053public final class ProcedureSyncWait {
-054  private static final Log LOG = 
LogFactory.getLog(ProcedureSyncWait.class);
-055
-056  private ProcedureSyncWait() {}
-057
-058  @InterfaceAudience.Private
-059  public interface Predicate {
-060T evaluate() throws IOException;
-061  }
-062
-063  private static class ProcedureFuture 
implements Future {
-064  private final 
ProcedureExecutor procExec;
-065  private final long procId;
-066
-067  private boolean hasResult = 
false;
-068  private byte[] result = null;
-069
-070  public 
ProcedureFuture(ProcedureExecutor procExec, long 
procId) {
-071this.procExec = procExec;
-072this.procId = procId;
-073  }
-074
-075  @Override
-076  public boolean cancel(boolean 
mayInterruptIfRunning) { return false; }
-077
-078  @Override
-079  public boolean isCancelled() { 
return false; }
-080
-081  @Override
-082  public boolean isDone() { return 
hasResult; }
-083
-084  @Override
-085  public byte[] get() throws 
InterruptedException, ExecutionException {
-086if (hasResult) return result;
-087try {
-088  return 
waitForProcedureToComplete(procExec, procId, Long.MAX_VALUE);
-089} catch (Exception e) {
-090  throw new 
ExecutionException(e);
-091}
-092  }
-093
-094  @Override
-095  public byte[] get(long timeout, 
TimeUnit unit)
-096  throws InterruptedException, 
ExecutionException, TimeoutException {
-097if (hasResult) return result;
-098try {
-099  result = 
waitForProcedureToComplete(procExec, procId, unit.toMillis(timeout));
-100  hasResult = true;
-101  return result;
-102} catch (TimeoutIOException e) 
{
-103  throw new 
TimeoutException(e.getMessage());
-104} catch (Exception e) {
-105  throw new 
ExecutionException(e);
-106}
-107  }
-108}
-109
-110  public static Future 
submitProcedure(final ProcedureExecutor procExec,
-111  final Procedure proc) {
-112if (proc.isInitializing()) {
-113  procExec.submitProcedure(proc);
-114}
-115return new ProcedureFuture(procExec, 
proc.getProcId());
-116  }
-117
-118  public static byte[] 
submitAndWaitProcedure(ProcedureExecutor procExec,
-119  final Procedure proc) throws 
IOException {
-120if (proc.isInitializing()) {
-121  procExec.submitProcedure(proc);
-122}
-123return 
waitForProcedureToCompleteIOE(procExec, proc.getProcId(), Long.MAX_VALUE);
-124  }
-125
-126  public static byte[] 
waitForProcedureToCompleteIOE(
-127  final 
ProcedureExecutor procExec, final long procId, final 
long timeout)
-128  throws IOException {
-129try {
-130  return 
waitForProcedureToComplete(procExec, procId, timeout);
-131} catch (IOException e) {
-132  throw e;
-133} catch (Exception e) {
-134  throw new IOException(e);
-135}
-136  }
-137
-138  public static byte[] 
waitForProcedureToComplete(
-139  final 
ProcedureExecutor procExec, final long procId, final 
lon

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
index e690c2d..ec75aa9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
@@ -818,1092 +818,1070 @@
 810});
 811  }
 812
-813  public void preDispatchMerge(final 
HRegionInfo regionInfoA, final HRegionInfo regionInfoB)
-814  throws IOException {
+813  public void preMergeRegions(final 
HRegionInfo[] regionsToMerge)
+814  throws IOException {
 815execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
 816  @Override
 817  public void call(MasterObserver 
oserver, ObserverContext ctx)
 818  throws IOException {
-819oserver.preDispatchMerge(ctx, 
regionInfoA, regionInfoB);
+819oserver.preMergeRegions(ctx, 
regionsToMerge);
 820  }
 821});
 822  }
 823
-824  public void postDispatchMerge(final 
HRegionInfo regionInfoA, final HRegionInfo regionInfoB)
-825  throws IOException {
+824  public void postMergeRegions(final 
HRegionInfo[] regionsToMerge)
+825  throws IOException {
 826execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
 827  @Override
 828  public void call(MasterObserver 
oserver, ObserverContext ctx)
 829  throws IOException {
-830oserver.postDispatchMerge(ctx, 
regionInfoA, regionInfoB);
+830oserver.postMergeRegions(ctx, 
regionsToMerge);
 831  }
 832});
 833  }
 834
-835  public void preMergeRegions(final 
HRegionInfo[] regionsToMerge)
-836  throws IOException {
-837execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
-838  @Override
-839  public void call(MasterObserver 
oserver, ObserverContext ctx)
-840  throws IOException {
-841oserver.preMergeRegions(ctx, 
regionsToMerge);
-842  }
-843});
-844  }
-845
-846  public void postMergeRegions(final 
HRegionInfo[] regionsToMerge)
-847  throws IOException {
-848execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
-849  @Override
-850  public void call(MasterObserver 
oserver, ObserverContext ctx)
-851  throws IOException {
-852oserver.postMergeRegions(ctx, 
regionsToMerge);
-853  }
-854});
-855  }
-856
-857  public boolean preBalance() throws 
IOException {
-858return 
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
-859  @Override
-860  public void call(MasterObserver 
oserver, ObserverContext ctx)
-861  throws IOException {
-862oserver.preBalance(ctx);
-863  }
-864});
-865  }
-866
-867  public void postBalance(final 
List plans) throws IOException {
+835  public boolean preBalance() throws 
IOException {
+836return 
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+837  @Override
+838  public void call(MasterObserver 
oserver, ObserverContext ctx)
+839  throws IOException {
+840oserver.preBalance(ctx);
+841  }
+842});
+843  }
+844
+845  public void postBalance(final 
List plans) throws IOException {
+846execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
+847  @Override
+848  public void call(MasterObserver 
oserver, ObserverContext ctx)
+849  throws IOException {
+850oserver.postBalance(ctx, 
plans);
+851  }
+852});
+853  }
+854
+855  public boolean 
preSetSplitOrMergeEnabled(final boolean newValue,
+856  final MasterSwitchType switchType) 
throws IOException {
+857return 
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+858  @Override
+859  public void call(MasterObserver 
oserver, ObserverContext ctx)
+860  throws IOException {
+861
oserver.preSetSplitOrMergeEnabled(ctx, newValue, switchType);
+862  }
+863});
+864  }
+865
+866  public void 
postSetSplitOrMergeEnabled(final boolean newValue,
+867  final MasterSwitchType switchType) 
throws IOException {
 868execOperation(coprocessors.isEmpty() 
? null : new CoprocessorOperation() {
 869  @Override
 870  public void call(MasterObserver 
oserver, ObserverContext ctx)
 871  throws IOE

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/client/Delete.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Delete.html 
b/devapidocs/org/apache/hadoop/hbase/client/Delete.html
index 0563719..3156a2a 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Delete.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Delete.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -352,17 +352,21 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Delete
+setPriority(int priority) 
+
+
+Delete
 setTimestamp(long timestamp)
 Set the timestamp of the delete.
 
 
-
+
 Delete
 setTTL(long ttl)
 Set the TTL desired for the result of the mutation, in 
milliseconds.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object>
 toMap(int maxCols)
 Compile the details beyond the scope of getFingerprint 
(row, columns,
@@ -382,7 +386,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Methods inherited from class org.apache.hadoop.hbase.client.OperationWithAttributes
-getAttribute,
 getAttributeSize,
 getAttributesMap,
 getId
+getAttribute,
 getAttributeSize,
 getAttributesMap,
 getId,
 getPriority
 
 
 
@@ -541,7 +545,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 addDeleteMarker
-public Delete addDeleteMarker(Cell kv)
+public Delete addDeleteMarker(Cell kv)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Advanced use only.
  Add an existing delete marker to this Delete object.
@@ -561,7 +565,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 addFamily
-public Delete addFamily(byte[] family)
+public Delete addFamily(byte[] family)
 Delete all versions of all columns of the specified family.
  
  Overrides previous calls to deleteColumn and deleteColumns for the
@@ -580,7 +584,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 addFamily
-public Delete addFamily(byte[] family,
+public Delete addFamily(byte[] family,
 long timestamp)
 Delete all columns of the specified family with a timestamp 
less than
  or equal to the specified timestamp.
@@ -602,7 +606,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 addFamilyVersion
-public Delete addFamilyVersion(byte[] family,
+public Delete addFamilyVersion(byte[] family,
long timestamp)
 Delete all columns of the specified family with a timestamp 
equal to
  the specified timestamp.
@@ -621,7 +625,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 addColumns
-public Delete addColumns(byte[] family,
+public Delete addColumns(byte[] family,
  byte[] qualifier)
 Delete all versions of the specified column.
 
@@ -639,7 +643,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 addColumns
-public Delete addColumns(byte[] family,
+public Delete addColumns(byte[] family,
  byte[] qualifier,
  long timestamp)
 Delete all versions of the specified column with a 
timestamp less than
@@ -660,7 +664,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 addColumn
-public Delete addColumn(byte[] family,
+public Delete addColumn(byte[] family,
 byte[] qualifier)
 Delete the latest version of the specified column.
  This is an expensive call in that on the server-side, it first does a
@@ -681,7 +685,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 addColumn
-public Delete addColumn(byte[] family,
+public Delete addColumn(byte[] family,
 byte[] qualifier,
 long timestamp)
 Delete the specified version of the specified column.
@@ -701,7 +705,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/security/access/TableAuthManager.PermissionCache.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/TableAuthManager.PermissionCache.html
 
b/devapidocs/org/apache/hadoop/hbase/security/access/TableAuthManager.PermissionCache.html
index 8a9a80c..5880360 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/access/TableAuthManager.PermissionCache.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/access/TableAuthManager.PermissionCache.html
@@ -134,13 +134,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Field and Description
 
 
-private 
com.google.common.collect.ListMultimapString,T>
+private 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimapString,T>
 groupCache
 Cache of group permissions
 
 
 
-private 
com.google.common.collect.ListMultimapString,T>
+private 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimapString,T>
 userCache
 Cache of user permissions
 
@@ -180,7 +180,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Method and Description
 
 
-com.google.common.collect.ListMultimapString,T>
+org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimapString,T>
 getAllPermissions()
 Returns a combined map of user and group permissions, with 
group names
  distinguished according to AuthUtil.isGroupPrincipal(String).
@@ -242,7 +242,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 userCache
-private com.google.common.collect.ListMultimapString,T extends Permission> userCache
+private org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimapString,T extends Permission> userCache
 Cache of user permissions
 
 
@@ -252,7 +252,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 groupCache
-private com.google.common.collect.ListMultimapString,T extends Permission> groupCache
+private org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimapString,T extends Permission> groupCache
 Cache of group permissions
 
 
@@ -349,7 +349,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getAllPermissions
-public com.google.common.collect.ListMultimapString,T> getAllPermissions()
+public org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimapString,T> getAllPermissions()
 Returns a combined map of user and group permissions, with 
group names
  distinguished according to AuthUtil.isGroupPrincipal(String).
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/security/access/TableAuthManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/TableAuthManager.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/TableAuthManager.html
index 54a9426..566da9c 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/TableAuthManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/TableAuthManager.html
@@ -463,14 +463,14 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
 
 
 private void
-updateGlobalCache(com.google.common.collect.ListMultimap[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 504e470..38667c0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -2866,5375 +2866,5371 @@
 2858checkResources();
 2859
startRegionOperation(Operation.DELETE);
 2860try {
-2861  delete.getRow();
-2862  // All edits for the given row 
(across all column families) must happen atomically.
-2863  doBatchMutate(delete);
-2864} finally {
-2865  
closeRegionOperation(Operation.DELETE);
-2866}
-2867  }
-2868
-2869  /**
-2870   * Row needed by below method.
-2871   */
-2872  private static final byte [] 
FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
-2873
-2874  /**
-2875   * This is used only by unit tests. 
Not required to be a public API.
-2876   * @param familyMap map of family to 
edits for the given family.
-2877   * @throws IOException
-2878   */
-2879  void delete(NavigableMap> familyMap,
-2880  Durability durability) throws 
IOException {
-2881Delete delete = new 
Delete(FOR_UNIT_TESTS_ONLY);
-2882
delete.setFamilyCellMap(familyMap);
-2883delete.setDurability(durability);
-2884doBatchMutate(delete);
-2885  }
-2886
-2887  @Override
-2888  public void 
prepareDeleteTimestamps(Mutation mutation, Map> 
familyMap,
-2889  byte[] byteNow) throws IOException 
{
-2890for (Map.Entry> e : familyMap.entrySet()) {
-2891
-2892  byte[] family = e.getKey();
-2893  List cells = 
e.getValue();
-2894  assert cells instanceof 
RandomAccess;
-2895
-2896  Map kvCount 
= new TreeMap<>(Bytes.BYTES_COMPARATOR);
-2897  int listSize = cells.size();
-2898  for (int i=0; i < listSize; 
i++) {
-2899Cell cell = cells.get(i);
-2900//  Check if time is LATEST, 
change to time of most recent addition if so
-2901//  This is expensive.
-2902if (cell.getTimestamp() == 
HConstants.LATEST_TIMESTAMP && CellUtil.isDeleteType(cell)) {
-2903  byte[] qual = 
CellUtil.cloneQualifier(cell);
-2904  if (qual == null) qual = 
HConstants.EMPTY_BYTE_ARRAY;
-2905
-2906  Integer count = 
kvCount.get(qual);
-2907  if (count == null) {
-2908kvCount.put(qual, 1);
-2909  } else {
-2910kvCount.put(qual, count + 
1);
-2911  }
-2912  count = kvCount.get(qual);
-2913
-2914  Get get = new 
Get(CellUtil.cloneRow(cell));
-2915  get.setMaxVersions(count);
-2916  get.addColumn(family, qual);
-2917  if (coprocessorHost != null) 
{
-2918if 
(!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell,
-2919byteNow, get)) {
-2920  
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2921}
-2922  } else {
-2923
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2924  }
-2925} else {
-2926  
CellUtil.updateLatestStamp(cell, byteNow, 0);
-2927}
-2928  }
-2929}
-2930  }
-2931
-2932  void 
updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] 
byteNow)
-2933  throws IOException {
-2934List result = get(get, 
false);
-2935
-2936if (result.size() < count) {
-2937  // Nothing to delete
-2938  CellUtil.updateLatestStamp(cell, 
byteNow, 0);
-2939  return;
-2940}
-2941if (result.size() > count) {
-2942  throw new 
RuntimeException("Unexpected size: " + result.size());
-2943}
-2944Cell getCell = result.get(count - 
1);
-2945CellUtil.setTimestamp(cell, 
getCell.getTimestamp());
-2946  }
-2947
-2948  @Override
-2949  public void put(Put put) throws 
IOException {
-2950checkReadOnly();
-2951
-2952// Do a rough check that we have 
resources to accept a write.  The check is
-2953// 'rough' in that between the 
resource check and the call to obtain a
-2954// read lock, resources may run out. 
 For now, the thought is that this
-2955// will be extremely rare; we'll 
deal with it when it happens.
-2956checkResources();
-2957
startRegionOperation(Operation.PUT);
-2958try {
-2959  // All edits for the given row 
(across all column families) must happen atomically.
-2960  doBatchMutate(put);
-2961} finally {
-2962  
closeRegionOperation(Operation.PUT);
-2963}
-2964  }
-2965
-2966  /**
-2967   * Struct-like class that tracks the 
progress of a batch operation,
-2968   * accumulating st

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
index feb42ea..4bd98f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
@@ -185,4189 +185,4266 @@
 177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-188import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-189import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-190import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-191import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-192import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-193import 
org.apache.hadoop.hbase.util.Addressing;
-194import 
org.apache.hadoop.hbase.util.Bytes;
-195import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-196import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-197import 
org.apache.hadoop.hbase.util.Pair;
-198import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-199import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-200import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-201import 
org.apache.hadoop.ipc.RemoteException;
-202import 
org.apache.hadoop.util.StringUtils;
-203import 
org.apache.zookeeper.KeeperException;
-204
-205import 
com.google.common.annotations.VisibleForTesting;
-206import com.google.protobuf.Descriptors;
-207import com.google.protobuf.Message;
-208import 
com.google.protobuf.RpcController;
-209import java.util.stream.Collectors;
-210
-211/**
-212 * HBaseAdmin is no longer a client API. 
It is marked InterfaceAudience.Private indicating that
-213 * this is an HBase-internal class as 
defined in
-214 * 
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
-215 * There are no guarantees for backwards 
source / binary compatibility and methods or class can
-216 * change or go away without 
deprecation.
-217 * Use {@link Connection#getAdmin()} to 
obtain an instance of {@link Admin} instead of constructing
-218 * an HBaseAdmin directly.
-219 *
-220 * 

Connection should be an unmanaged connection obtained via -221 * {@link ConnectionFactory#createConnection(Configuration)} -222 * -223 * @see ConnectionFactory -224 * @see Connection -225 * @see Admin -226 */ -227@InterfaceAudience.Private -228@InterfaceStability.Evolving -229public class HBaseAdmin implements Admin { -230 private static final Log LOG = LogFactory.getLog(HBaseAdmin.class); -231 -232 private static final String ZK_IDENTIFIER_PREFIX = "hbase-admin-on-"; +180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; +181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; +182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; +183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; +184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; +185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; +186import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; +187import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; +188import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +189import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; +190import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +191import org.apache.hadoop.hba


[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/hbase-archetypes/hbase-client-project/project-reports.html
--
diff --git a/hbase-archetypes/hbase-client-project/project-reports.html 
b/hbase-archetypes/hbase-client-project/project-reports.html
index 4441ed0..a9fc929 100644
--- a/hbase-archetypes/hbase-client-project/project-reports.html
+++ b/hbase-archetypes/hbase-client-project/project-reports.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-11
+Last Published: 2017-07-12
    | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/hbase-archetypes/hbase-client-project/project-summary.html
--
diff --git a/hbase-archetypes/hbase-client-project/project-summary.html 
b/hbase-archetypes/hbase-client-project/project-summary.html
index 9be0626..9048836 100644
--- a/hbase-archetypes/hbase-client-project/project-summary.html
+++ b/hbase-archetypes/hbase-client-project/project-summary.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-11
+Last Published: 2017-07-12
    | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/hbase-archetypes/hbase-client-project/source-repository.html
--
diff --git a/hbase-archetypes/hbase-client-project/source-repository.html 
b/hbase-archetypes/hbase-client-project/source-repository.html
index 7ff8082..2e7feb1 100644
--- a/hbase-archetypes/hbase-client-project/source-repository.html
+++ b/hbase-archetypes/hbase-client-project/source-repository.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-11
+Last Published: 2017-07-12
    | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/hbase-archetypes/hbase-client-project/team-list.html
--
diff --git a/hbase-archetypes/hbase-client-project/team-list.html 
b/hbase-archetypes/hbase-client-project/team-list.html
index 0d974de..0046907 100644
--- a/hbase-archetypes/hbase-client-project/team-list.html
+++ b/hbase-archetypes/hbase-client-project/team-list.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-11
+Last Published: 2017-07-12
    | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/checkstyle.html 
b/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
index 47c062c..47261ae 100644
--- a/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
+++ b/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 201

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
index 75db22d..99a09f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
@@ -37,2710 +37,2816 @@
 029import java.util.List;
 030import java.util.Map;
 031import java.util.Optional;
-032import 
java.util.concurrent.CompletableFuture;
-033import java.util.concurrent.TimeUnit;
-034import 
java.util.concurrent.atomic.AtomicReference;
-035import java.util.function.BiConsumer;
-036import java.util.regex.Pattern;
-037import java.util.stream.Collectors;
-038
-039import 
com.google.common.annotations.VisibleForTesting;
-040
-041import io.netty.util.Timeout;
-042import io.netty.util.TimerTask;
-043
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.io.IOUtils;
-047import org.apache.commons.logging.Log;
-048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.ClusterStatus;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLoad;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.ServerName;
-059import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-064import 
org.apache.hadoop.hbase.TableNotDisabledException;
-065import 
org.apache.hadoop.hbase.TableNotEnabledException;
-066import 
org.apache.hadoop.hbase.TableNotFoundException;
-067import 
org.apache.hadoop.hbase.UnknownRegionException;
-068import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.Mast

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(List servers) throws IOException {
-4039final 
List pbServers = new 
ArrayList<>(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public List 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallable>(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public List 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064List servers = 
new ArrayList<>();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(List servers) throws IOException 
{
-4076final 
List pbServers = new 
ArrayList<>(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public List 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallable>(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public List 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056List servers = 
new ArrayList<>();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(List servers) throws IOException 
{
+4068executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public List 
listReplicatedTableCFs() throws IOException {
+4079List 
replicatedTableCFs = new ArrayList<>();
+4080HTableDescriptor[] tables = 
listTables();
+4081for (HTableDescriptor

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
index 85f763f..850c918 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
@@ -5059,16 +5059,16 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 static HDFSBlocksDistribution
-HRegion.computeHDFSBlocksDistribution(org.apache.hadoop.conf.Configuration conf,
- HTableDescriptor tableDescriptor,
+HRegion.computeHDFSBlocksDistribution(org.apache.hadoop.conf.Configuration conf,
+ TableDescriptor tableDescriptor,
  HRegionInfo regionInfo)
 This is a helper function to compute HDFS block 
distribution on demand
 
 
 
 static HDFSBlocksDistribution
-HRegion.computeHDFSBlocksDistribution(org.apache.hadoop.conf.Configuration conf,
- HTableDescriptor tableDescriptor,
+HRegion.computeHDFSBlocksDistribution(org.apache.hadoop.conf.Configuration conf,
+ TableDescriptor tableDescriptor,
  HRegionInfo regionInfo,
  org.apache.hadoop.fs.Path tablePath)
 This is a helper function to compute HDFS block 
distribution on demand
@@ -5090,18 +5090,18 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 static HRegion
-HRegion.createHRegion(HRegionInfo info,
+HRegion.createHRegion(HRegionInfo info,
  org.apache.hadoop.fs.Path rootDir,
  org.apache.hadoop.conf.Configuration conf,
- HTableDescriptor hTableDescriptor,
+ TableDescriptor hTableDescriptor,
  WAL wal) 
 
 
 static HRegion
-HRegion.createHRegion(HRegionInfo info,
+HRegion.createHRegion(HRegionInfo info,
  org.apache.hadoop.fs.Path rootDir,
  org.apache.hadoop.conf.Configuration conf,
- HTableDescriptor hTableDescriptor,
+ TableDescriptor hTableDescriptor,
  WAL wal,
  boolean initialize)
 Convenience method creating new HRegions.
@@ -5200,12 +5200,12 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 (package private) static HRegion
-HRegion.newHRegion(org.apache.hadoop.fs.Path tableDir,
+HRegion.newHRegion(org.apache.hadoop.fs.Path tableDir,
   WAL wal,
   org.apache.hadoop.fs.FileSystem fs,
   org.apache.hadoop.conf.Configuration conf,
   HRegionInfo regionInfo,
-  HTableDescriptor htd,
+  TableDescriptor htd,
   RegionServerServices rsServices)
 A utility method to create new instances of HRegion based 
on the
  HConstants.REGION_IMPL
 configuration property.
@@ -5213,22 +5213,22 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 static HRegion
-HRegion.openHRegion(org.apache.hadoop.conf.Configuration conf,
+HRegion.openHRegion(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootDir,
HRegionInfo info,
-   HTableDescriptor htd,
+   TableDescriptor htd,
WAL wal)
 Open a Region.
 
 
 
 static HRegion
-HRegion.openHRegion(org.apache.hadoop.conf.Configuration conf,
+HRegion.openHRegion(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootDir,
HRegionInfo info,
-   HTableDescriptor htd,
+   TableDescriptor htd,
WAL wal,
RegionServerServices rsServices,
CancelableProgressable reporter)
@@ -5237,12 +5237,12 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 static HRegion
-HRegion.openHRegion(org.apache.hadoop.conf.Configuration conf,
+HRegion.openHRegion(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path rootDir,
org.apache.hadoop.fs.Path tableDir,
HRegionInfo info,
-   HTableDescriptor htd,
+   TableDescriptor htd,
WAL wal,
RegionServerServices rsServices,
CancelableProgressable reporter)
@@ -5251,8 +5251,8 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static HRegion
-HRegion.openHRegion(HRegionInfo info,
-   HTableDescriptor htd,
+HRegion.openHRegion(HRegionInfo info,
+   TableDescriptor htd,
WAL wal,
org.apache.hadoop.conf.Configuration conf)
 Open a Region.
@@ -5260,8 +5260,8 @@ Input/OutputFormats, a table indexing MapReduce job, an

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
index 43db01d..79dc4e0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
@@ -235,7 +235,7 @@
 227  public BucketCache(String ioEngineName, 
long capacity, int blockSize, int[] bucketSizes,
 228  int writerThreadNum, int 
writerQLen, String persistencePath, int ioErrorsTolerationDuration)
 229  throws FileNotFoundException, 
IOException {
-230this.ioEngine = 
getIOEngineFromName(ioEngineName, capacity);
+230this.ioEngine = 
getIOEngineFromName(ioEngineName, capacity, persistencePath);
 231this.writerThreads = new 
WriterThread[writerThreadNum];
 232long blockNumCapacity = capacity / 
blockSize;
 233if (blockNumCapacity >= 
Integer.MAX_VALUE) {
@@ -317,1229 +317,1230 @@
 309   * Get the IOEngine from the IO engine 
name
 310   * @param ioEngineName
 311   * @param capacity
-312   * @return the IOEngine
-313   * @throws IOException
-314   */
-315  private IOEngine 
getIOEngineFromName(String ioEngineName, long capacity)
-316  throws IOException {
-317if (ioEngineName.startsWith("file:") 
|| ioEngineName.startsWith("files:")) {
-318  // In order to make the usage 
simple, we only need the prefix 'files:' in
-319  // document whether one or multiple 
file(s), but also support 'file:' for
-320  // the compatibility
-321  String[] filePaths = 
ioEngineName.substring(ioEngineName.indexOf(":") + 1)
-322  
.split(FileIOEngine.FILE_DELIMITER);
-323  return new FileIOEngine(capacity, 
filePaths);
-324} else if 
(ioEngineName.startsWith("offheap")) {
-325  return new 
ByteBufferIOEngine(capacity, true);
-326} else if 
(ioEngineName.startsWith("heap")) {
-327  return new 
ByteBufferIOEngine(capacity, false);
-328} else if 
(ioEngineName.startsWith("mmap:")) {
-329  return new 
FileMmapEngine(ioEngineName.substring(5), capacity);
-330} else {
-331  throw new 
IllegalArgumentException(
-332  "Don't understand io engine 
name for cache - prefix with file:, heap or offheap");
-333}
-334  }
-335
-336  /**
-337   * Cache the block with the specified 
name and buffer.
-338   * @param cacheKey block's cache key
-339   * @param buf block buffer
-340   */
-341  @Override
-342  public void cacheBlock(BlockCacheKey 
cacheKey, Cacheable buf) {
-343cacheBlock(cacheKey, buf, false, 
false);
-344  }
-345
-346  /**
-347   * Cache the block with the specified 
name and buffer.
-348   * @param cacheKey block's cache key
-349   * @param cachedItem block buffer
-350   * @param inMemory if block is 
in-memory
-351   * @param cacheDataInL1
-352   */
-353  @Override
-354  public void cacheBlock(BlockCacheKey 
cacheKey, Cacheable cachedItem, boolean inMemory,
-355  final boolean cacheDataInL1) {
-356cacheBlockWithWait(cacheKey, 
cachedItem, inMemory, wait_when_cache);
-357  }
-358
-359  /**
-360   * Cache the block to ramCache
-361   * @param cacheKey block's cache key
-362   * @param cachedItem block buffer
-363   * @param inMemory if block is 
in-memory
-364   * @param wait if true, blocking wait 
when queue is full
-365   */
-366  public void 
cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean 
inMemory,
-367  boolean wait) {
-368if (LOG.isTraceEnabled()) 
LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem);
-369if (!cacheEnabled) {
-370  return;
-371}
-372
-373if (backingMap.containsKey(cacheKey)) 
{
-374  return;
-375}
-376
-377/*
-378 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-379 */
-380RAMQueueEntry re =
-381new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-382if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-383  return;
-384}
-385int queueNum = (cacheKey.hashCode() 
& 0x7FFF) % writerQueues.size();
-386BlockingQueue bq 
= writerQueues.get(queueNum);
-387boolean successfulAddition = false;
-388if (wait) {
-389  try {
-390successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-391  } catch (InterruptedException e) 
{
-392
Thread.currentThread().interrupt();
-393  }
-394} else {
-395  successfulAddition = 
bq.offer(re);
-396}
-397if (!successfulAddition) {
-398  ramCache.remove(cacheKey);
-399  cacheStats.failInsert();
-400} else {
-401  
this.blockNumber.incre

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
new file mode 100644
index 000..7959dac
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
@@ -0,0 +1,339 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
+
+
+org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/util/function/BiConsumer.html?is-external=true";
 title="class or interface in java.util.function">BiConsumerVoid,http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable>
+
+
+Enclosing class:
+RawAsyncHBaseAdmin
+
+
+
+private class RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer
+extends RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from class org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
+namespaceName
+
+
+
+
+
+Fields inherited from class org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+admin
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+ModifyNamespaceProcedureBiConsumer(AsyncAdmin admin,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String namespaceName) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+(package private) http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+getOperationType() 
+
+
+
+
+
+
+Methods inherited from class org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
+getDescription,
 onError,
 onFinished
+
+
+
+
+
+Methods inherited from class org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer
+accept
+
+
+
+
+
+Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
index e65748d..91a0ffa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
@@ -372,1874 +372,1873 @@
 364   * is stored in the name, so the 
returned object should only be used for the fields
 365   * in the regionName.
 366   */
-367  protected static HRegionInfo 
parseRegionInfoFromRegionName(byte[] regionName)
-368throws IOException {
-369byte[][] fields = 
HRegionInfo.parseRegionName(regionName);
-370long regionId =  
Long.parseLong(Bytes.toString(fields[2]));
-371int replicaId = fields.length > 3 
? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
-372return new HRegionInfo(
-373  TableName.valueOf(fields[0]), 
fields[1], fields[1], false, regionId, replicaId);
-374  }
-375
-376  /**
-377   * Gets the result in hbase:meta for 
the specified region.
-378   * @param connection connection we're 
using
-379   * @param regionName region we're 
looking for
-380   * @return result of the specified 
region
-381   * @throws IOException
-382   */
-383  public static Result 
getRegionResult(Connection connection,
-384  byte[] regionName) throws 
IOException {
-385Get get = new Get(regionName);
-386
get.addFamily(HConstants.CATALOG_FAMILY);
-387return get(getMetaHTable(connection), 
get);
-388  }
-389
-390  /**
-391   * Get regions from the merge qualifier 
of the specified merged region
-392   * @return null if it doesn't contain 
merge qualifier, else two merge regions
-393   * @throws IOException
-394   */
-395  @Nullable
-396  public static Pair getRegionsFromMergeQualifier(
-397  Connection connection, byte[] 
regionName) throws IOException {
-398Result result = 
getRegionResult(connection, regionName);
-399HRegionInfo mergeA = 
getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
-400HRegionInfo mergeB = 
getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
-401if (mergeA == null && mergeB 
== null) {
-402  return null;
-403}
-404return new Pair<>(mergeA, 
mergeB);
-405 }
-406
-407  /**
-408   * Checks if the specified table 
exists.  Looks at the hbase:meta table hosted on
-409   * the specified server.
-410   * @param connection connection we're 
using
-411   * @param tableName table to check
-412   * @return true if the table exists in 
meta, false if not
-413   * @throws IOException
-414   */
-415  public static boolean 
tableExists(Connection connection,
-416  final TableName tableName)
-417  throws IOException {
-418// Catalog tables always exist.
-419return 
tableName.equals(TableName.META_TABLE_NAME)
-420|| getTableState(connection, 
tableName) != null;
-421  }
-422
-423  /**
-424   * Lists all of the regions currently 
in META.
-425   *
-426   * @param connection to connect with
-427   * @param excludeOfflinedSplitParents 
False if we are to include offlined/splitparents regions,
-428   *
true and we'll leave out offlined regions from returned list
-429   * @return List of all user-space 
regions.
-430   * @throws IOException
-431   */
-432  @VisibleForTesting
-433  public static List 
getAllRegions(Connection connection,
-434  boolean 
excludeOfflinedSplitParents)
-435  throws IOException {
-436List> result;
-437
-438result = 
getTableRegionsAndLocations(connection, null,
-439excludeOfflinedSplitParents);
-440
-441return 
getListOfHRegionInfos(result);
-442
-443  }
-444
-445  /**
-446   * Gets all of the regions of the 
specified table. Do not use this method
-447   * to get meta table regions, use 
methods in MetaTableLocator instead.
-448   * @param connection connection we're 
using
-449   * @param tableName table we're looking 
for
-450   * @return Ordered list of {@link 
HRegionInfo}.
-451   * @throws IOException
-452   */
-453  public static List 
getTableRegions(Connection connection, TableName tableName)
-454  throws IOException {
-455return getTableRegions(connection, 
tableName, false);
-456  }
-457
-458  /**
-459   * Gets all of the regions of the 
specified table. Do not use this method
-460   * to get meta table regions, use 
methods in MetaTableLocator instead.
-461   * @param connection connection we're 
using
-462   * @param tableName table we're looking 
for
-463   * @param excludeOfflinedSplitParents 
If true, do not include offlined split
-464   * parents in the return.
-465   * @return Ordered list of {

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityCostFunction.html
deleted file mode 100644
index 6de986f..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityCostFunction.html
+++ /dev/null
@@ -1,1678 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018package 
org.apache.hadoop.hbase.master.balancer;
-019
-020import java.util.ArrayDeque;
-021import java.util.Arrays;
-022import java.util.Collection;
-023import java.util.Deque;
-024import java.util.HashMap;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.Map.Entry;
-029import java.util.Random;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterStatus;
-035import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.RegionLoad;
-039import 
org.apache.hadoop.hbase.ServerLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-043import 
org.apache.hadoop.hbase.master.MasterServices;
-044import 
org.apache.hadoop.hbase.master.RegionPlan;
-045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052
-053import com.google.common.collect.Lists;
-054
-055/**
-056 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -057 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -058 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-059 *
    -060 *
  • Region Load
  • -061 *
  • Table Load
  • -062 *
  • Data Locality
  • -063 *
  • Memstore Sizes
  • -064 *
  • Storefile Sizes
  • -065 *
-066 * -067 * -068 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -069 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -070 * scaled by their respective multipliers:

-071 * -072 *
    -073 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -074 *
  • hbase.master.balancer.stochastic.moveCost
  • -075 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -076 *
  • hbase.master.balancer.stochastic.localityCost
  • -077 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -078 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -079 *
-080 * -081 *

In addition to the above configurations, the balancer can be tuned by the following -082 * configuration values:

-083 *
    -084 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -085 * controls what the max number of regions that can be moved in a single invocation of this -086 * balancer.
  • -087 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -088 * regions is multiplied to try and

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.QuotasVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.QuotasVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.QuotasVisitor.html
index 509b93c..3c6f9b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.QuotasVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.QuotasVisitor.html
@@ -53,606 +53,717 @@
 045import 
org.apache.hadoop.hbase.client.ResultScanner;
 046import 
org.apache.hadoop.hbase.client.Scan;
 047import 
org.apache.hadoop.hbase.client.Table;
-048import 
org.apache.hadoop.hbase.filter.CompareFilter;
-049import 
org.apache.hadoop.hbase.filter.Filter;
-050import 
org.apache.hadoop.hbase.filter.FilterList;
-051import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-052import 
org.apache.hadoop.hbase.filter.RegexStringComparator;
-053import 
org.apache.hadoop.hbase.filter.RowFilter;
-054import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-055import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-056import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-057import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.util.Strings;
-070
-071/**
-072 * Helper class to interact with the 
quota table.
-073 * 
-074 * ROW-KEY  FAM/QUAL
DATA
-075 *   n.<namespace> q:s  
   <global-quotas>
-076 *   t.<namespace> u:p  
  <namespace-quota policy>
-077 *   t.<table> q:s  
   <global-quotas>
-078 *   t.<table> u:p  
  <table-quota policy>
-079 *   u.<user>  q:s  
   <global-quotas>
-080 *   u.<user>  
q:s.<table> <table-quotas>
-081 *   u.<user>  
q:s.<ns>:   <namespace-quotas>
-082 * 
-083 */ -084@InterfaceAudience.Private -085@InterfaceStability.Evolving -086public class QuotaTableUtil { -087 private static final Log LOG = LogFactory.getLog(QuotaTableUtil.class); -088 -089 /** System table for quotas */ -090 public static final TableName QUOTA_TABLE_NAME = -091 TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota"); -092 -093 protected static final byte[] QUOTA_FAMILY_INFO = Bytes.toBytes("q"); -094 protected static final byte[] QUOTA_FAMILY_USAGE = Bytes.toBytes("u"); -095 protected static final byte[] QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s"); -096 protected static final byte[] QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s."); -097 protected static final byte[] QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p"); -098 protected static final String QUOTA_POLICY_COLUMN = -099 Bytes.toString(QUOTA_FAMILY_USAGE) + ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY); -100 protected static final byte[] QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u."); -101 protected static final byte[] QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t."); -102 protected static final byte[] QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n."); -103 -104 /* = -105 * Quota "settings" helpers -106 */ -107 public static Quotas getTableQuota(final Connection connection, final TableName table) -108 throws IOException { -109return getQuotas(connection, getTableRowKey(table)); -110 } -111 -112 public static Quotas getNamespaceQuota(final Connection connection, final String namespace) -113 throws IOException { -114return getQuotas(connection, getNamespaceRowKey(namespace)); -115 } -116 -117 public static Quotas getUserQuota(final Connection connection, final String user) -118 throws IOException { -119return

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
new file mode 100644
index 000..3f64215
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
@@ -0,0 +1,2038 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor (Apache 
HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":9,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":9,"i79":10,"i80":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class 
ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable,
 ColumnFamilyDescriptor
+
+
+Enclosing class:
+ColumnFamilyDescriptorBuilder
+
+
+
+@InterfaceAudience.Private
+public static class ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+implements ColumnFamilyDescriptor, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable
+An ModifyableFamilyDescriptor contains information about a 
column family such as the
+ number of versions, compression settings, etc.
+
+ It is used as input when creating a table or adding a column.
+ TODO: make this package-private after removing the HColumnDescriptor
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String>
+configuration
+A map which holds the configuration specific to the column 
family.
+
+
+
+private byte[]
+name 
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
+values 
+
+
+
+
+
+
+Fields inherited from interface org.apache.hadoop.hbase.client.ColumnFamilyDescriptor
+COMPARATOR
+
+
+
+
+
+
+
+
+Constructor Summary
+
+

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/org/apache/hadoop/hbase/AuthUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/AuthUtil.html 
b/devapidocs/org/apache/hadoop/hbase/AuthUtil.html
index befdaa9..ab66289 100644
--- a/devapidocs/org/apache/hadoop/hbase/AuthUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/AuthUtil.html
@@ -4,7 +4,7 @@
 
 
 
-AuthUtil (Apache HBase 2.0.0-SNAPSHOT API)
+AuthUtil (Apache HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
-BaseConfigurable (Apache HBase 2.0.0-SNAPSHOT API)
+BaseConfigurable (Apache HBase 3.0.0-SNAPSHOT API)