[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 2b1a8b7..50028a7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class HRegion.RegionScannerImpl
+class HRegion.RegionScannerImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements RegionScanner, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -429,7 +429,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -438,7 +438,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -449,7 +449,7 @@ implements 
 
 joinedContinuationRow
-protected Cell joinedContinuationRow
+protected Cell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -460,7 +460,7 @@ implements 
 
 filterClosed
-private boolean filterClosed
+private boolean filterClosed
 
 
 
@@ -469,7 +469,7 @@ implements 
 
 stopRow
-protected final byte[] stopRow
+protected final byte[] stopRow
 
 
 
@@ -478,7 +478,7 @@ implements 
 
 includeStopRow
-protected final boolean includeStopRow
+protected final boolean includeStopRow
 
 
 
@@ -487,7 +487,7 @@ implements 
 
 region
-protected final HRegion region
+protected final HRegion region
 
 
 
@@ -496,7 +496,7 @@ implements 
 
 comparator
-protected final CellComparator comparator
+protected final CellComparator comparator
 
 
 
@@ -505,7 +505,7 @@ implements 
 
 readPt
-private final long readPt
+private final long readPt
 
 
 
@@ -514,7 +514,7 @@ implements 
 
 maxResultSize
-private final long maxResultSize
+private final long maxResultSize
 
 
 
@@ -523,7 +523,7 @@ implements 
 
 defaultScannerContext
-private final ScannerContext defaultScannerContext
+private final ScannerContext defaultScannerContext
 
 
 
@@ -532,7 +532,7 @@ implements 
 
 filter
-private final FilterWrapper filter
+private final FilterWrapper filter
 
 
 
@@ -549,7 +549,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scan scan,
+RegionScannerImpl(Scan scan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners,
   HRegion region)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -565,7 +565,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scan scan,
+RegionScannerImpl(Scan scan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners,
   HRegion region,
   long nonceGroup,
@@ -591,7 +591,7 @@ implements 
 
 getRegionInfo
-public HRegionInfo getRegionInfo()
+public HRegionInfo getRegionInfo()
 
 Specified by:
 getRegionInfo in
 interface RegionScanner
@@ -606,7 +606,7 @@ implements 
 
 initializeScanners
-protected void initializeScanners(Scan scan,
+protected void initializeScanners(Scan scan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -621,7 +621,7 @@ implements 
 
 initializeKVHeap
-protected void initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
+protected void initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List joinedScanners,
 HRegion region)
  throws http://docs.orac

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
index c91f40a..e65c773 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
@@ -772,30 +772,25 @@
 
 
 void
-FavoredStochasticBalancer.initialize() 
+BaseLoadBalancer.initialize() 
 
 
 void
-BaseLoadBalancer.initialize() 
+FavoredStochasticBalancer.initialize() 
 
 
 ServerName
-FavoredStochasticBalancer.randomAssignment(HRegionInfo regionInfo,
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers) 
-
-
-ServerName
 BaseLoadBalancer.randomAssignment(HRegionInfo regionInfo,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers)
 Used to assign a single region to a random server.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>
-FavoredStochasticBalancer.retainAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map regions,
+
+ServerName
+FavoredStochasticBalancer.randomAssignment(HRegionInfo regionInfo,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>
 BaseLoadBalancer.retainAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map regions,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers)
@@ -804,12 +799,12 @@
  available/online servers available for assignment.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>
-FavoredStochasticBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regions,
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers) 
+FavoredStochasticBalancer.retainAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map regions,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>
 BaseLoadBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regions,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers)
@@ -817,6 +812,11 @@
  simple round-robin assignment.
 
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>
+FavoredStochasticBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regions,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class o

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescri

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMap scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For each store, we calculate
-341  // the maxSeqId up to which the store 
wa

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.TagInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.TagInfo.html
 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.TagInfo.html
new file mode 100644
index 000..0b6b941
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.TagInfo.html
@@ -0,0 +1,306 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+VisibilityNewVersionBehaivorTracker.TagInfo (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.security.visibility
+Class VisibilityNewVersionBehaivorTracker.TagInfo
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.security.visibility.VisibilityNewVersionBehaivorTracker.TagInfo
+
+
+
+
+
+
+
+Enclosing class:
+VisibilityNewVersionBehaivorTracker
+
+
+
+private static class VisibilityNewVersionBehaivorTracker.TagInfo
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+(package private) http://docs.oracle.com/javase/8/docs/api/java/lang/Byte.html?is-external=true";
 title="class or interface in java.lang">Byte
+format 
+
+
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+tags 
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Modifier
+Constructor and Description
+
+
+private 
+TagInfo() 
+
+
+private 
+TagInfo(Cell c) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+
+
+
+Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--";
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--";
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-";
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-";
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field Detail
+
+
+
+
+
+tags
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tags
+
+
+
+
+
+
+
+format
+http://docs.oracle.com/javase/8/docs/api/java/lang/Byte.html?is-external=true";
 title="class or interface in java.lang">Byte format
+
+
+
+
+
+
+
+
+
+Constructor Detail
+
+
+
+
+
+TagInfo
+private TagInfo(Cell c)

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 49ef112..b3d1843 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -141,3316 +141,3314 @@
 133import 
org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 134import 
org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
 135import 
org.apache.hadoop.hbase.procedure2.LockInfo;
-136import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-137import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-138import 
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-139import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-140import 
org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
-141import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-142import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-143import 
org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
-144import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
-145import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
-146import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-147import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-148import 
org.apache.hadoop.hbase.regionserver.HStore;
-149import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-150import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-151import 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
-152import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
-153import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
-154import 
org.apache.hadoop.hbase.replication.ReplicationException;
-155import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-156import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-157import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-158import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-159import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
-160import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-161import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-162import 
org.apache.hadoop.hbase.security.UserProvider;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-169import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-170import 
org.apache.hadoop.hbase.util.Addressing;
-171import 
org.apache.hadoop.hbase.util.Bytes;
-172import 
org.apache.hadoop.hbase.util.CompressionTest;
-173import 
org.apache.hadoop.hbase.util.EncryptionTest;
-174import 
org.apache.hadoop.hbase.util.FSUtils;
-175import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-176import 
org.apache.hadoop.hbase.util.HasThread;
-177import 
org.apache.hadoop.hbase.util.IdLock;
-178import 
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-179import 
org.apache.hadoop.hbase.util.Pair;
-180import 
org.apache.hadoop.hbase.util.Threads;
-181import 
org.apache.hadoop.hbase.util.VersionInfo;
-182import 
org.apache.hadoop.hbase.util.ZKDataMigrator;
-183import 
org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
-184import 
org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-185import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-186import 
org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-187import 
org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
-188import 
org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
-189import 
org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
-190import 
org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-191import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-192import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-193import 
org.apache.zookeeper.KeeperException;
-194import org.eclipse.jetty.server.Server;
-195import 
org.eclipse.jetty.server.ServerConnector;
-196import 
org.eclipse.jetty.servlet.ServletHolder;
-197import 
org.eclipse.jetty.webapp.WebAppContext;
-198
-199import 
org.apache.ha

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
index 9677c93..54e0624 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.html
@@ -513,13 +513,7 @@
 505  throws ServiceException {
 506return stub.splitRegion(controller, 
request);
 507  }
-508
-509  @Override
-510  public DispatchMergingRegionsResponse 
dispatchMergingRegions(RpcController controller,
-511  DispatchMergingRegionsRequest 
request) throws ServiceException {
-512return 
stub.dispatchMergingRegions(controller, request);
-513  }
-514}
+508}
 
 
 



[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
index bdad4fe..8477715 100644
--- a/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/HColumnDescriptor.html
@@ -500,9 +500,9 @@ implements 
  
 HColumnDescriptor(byte[] familyName)
-Deprecated. 
-Construct a column descriptor specifying only the family 
name
- The other attributes are defaulted.
+Deprecated. 
+use ColumnFamilyDescriptorBuilder.of(byte[])
+
 
 
 
@@ -514,8 +514,9 @@ implements 
  
 HColumnDescriptor(HColumnDescriptor desc)
-Deprecated. 
-Constructor.
+Deprecated. 
+use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor)
+
 
 
 
@@ -528,9 +529,9 @@ implements 
  
 HColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String familyName)
-Deprecated. 
-Construct a column descriptor specifying only the family 
name
- The other attributes are defaulted.
+Deprecated. 
+use ColumnFamilyDescriptorBuilder.of(String)
+
 
 
 
@@ -1840,8 +1841,8 @@ implements 
 
 HColumnDescriptor
-public HColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String familyName)
-Deprecated. 
+public HColumnDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String familyName)
+Deprecated. use ColumnFamilyDescriptorBuilder.of(String)
 Construct a column descriptor specifying only the family 
name
  The other attributes are defaulted.
 
@@ -1857,8 +1858,8 @@ implements 
 
 HColumnDescriptor
-public HColumnDescriptor(byte[] familyName)
-Deprecated. 
+public HColumnDescriptor(byte[] familyName)
+Deprecated. use ColumnFamilyDescriptorBuilder.of(byte[])
 Construct a column descriptor specifying only the family 
name
  The other attributes are defaulted.
 
@@ -1874,8 +1875,8 @@ implements 
 
 HColumnDescriptor
-public HColumnDescriptor(HColumnDescriptor desc)
-Deprecated. 
+public HColumnDescriptor(HColumnDescriptor desc)
+Deprecated. use ColumnFamilyDescriptorBuilder.copy(ColumnFamilyDescriptor)
 Constructor.
  Makes a deep copy of the supplied descriptor.
  Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
@@ -1891,7 +1892,7 @@ implements 
 
 HColumnDescriptor
-protected HColumnDescriptor(HColumnDescriptor desc,
+protected HColumnDescriptor(HColumnDescriptor desc,
 boolean deepClone)
 Deprecated. 
 
@@ -1902,7 +1903,7 @@ implements 
 
 HColumnDescriptor
-protected HColumnDescriptor(ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor delegate)
+protected HColumnDescriptor(ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor delegate)
 Deprecated. 
 
 
@@ -1921,7 +1922,7 @@ implements 
 isLegalFamilyName
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
-public static byte[] isLegalFamilyName(byte[] b)
+public static byte[] isLegalFamilyName(byte[] b)
 Deprecated. Use ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(byte[]).
 
 Parameters:
@@ -1942,7 +1943,7 @@ public static byte[] 
 
 getName
-public byte[] getName()
+public byte[] getName()
 Deprecated. 
 
 Specified by:
@@ -1958,7 +1959,7 @@ public static byte[] 
 
 getNameAsString
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getNameAsString()
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getNameAsString()
 Deprecated. 
 
 Specified by:
@@ -1974,7 +1975,7 @@ public static byte[] 
 
 getValue
-public byte[] getValue(byte[] key)
+public byte[] getValue(byte[] key)
 Deprecated. 
 
 Specified by:
@@ -1992,7 +1993,7 @@ public static byte[] 
 
 getValue
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key)
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String key)
 Deprecated. 
 
 Parameters:
@@ -2008,7 +2009,7 @@ public static byte[] 
 
 getValues
-public http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 073a715..7c48389 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -474,7 +474,7 @@ implements clearCompactedfiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List filesToRemove) 
 
 
-com.google.common.collect.ImmutableCollection
+org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableCollection
 close()
 Close all the readers We don't need to worry about 
subsequent requests because the Region
  holds a write lock that will prevent any more reads or writes.
@@ -2413,8 +2413,8 @@ public static org.apache.hadoop.fs.Path 
 
 close
-public com.google.common.collect.ImmutableCollection close()
-   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
+public org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableCollection close()
+   
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
interface: Store
 Close all the readers We don't need to worry about 
subsequent requests because the Region
  holds a write lock that will prevent any more reads or writes.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
index 8b0bc7e..c99c156 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
@@ -207,7 +207,7 @@ implements clearCompactionQueues 
 
 
-private com.google.common.cache.CacheString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String>
+private 
org.apache.hadoop.hbase.shaded.com.google.common.cache.CacheString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String>
 closedScanners 
 
 
@@ -1093,7 +1093,7 @@ implements 
 
 closedScanners
-private final com.google.common.cache.CacheString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String> closedScanners
+private 
final org.apache.hadoop.hbase.shaded.com.google.common.cache.CacheString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String> closedScanners
 
 
 
@@ -1170,7 +1170,7 @@ implements 
 SCANNER_ALREADY_CLOSED
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
-private static final http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException SCANNER_ALREADY_CLOSED
+private static final http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException SCANNER_ALREADY_CLOSED
 Deprecated. 
 
 
@@ -1647,7 +1647,7 @@ private static final http://docs.oracle.com/javase/8/docs/api/java
 
 
 getRegion
-public Region getRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionSpecifier)
+public Region getRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionSpecifier)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Find the HRegion based on a region

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 504e470..38667c0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -2866,5375 +2866,5371 @@
 2858checkResources();
 2859
startRegionOperation(Operation.DELETE);
 2860try {
-2861  delete.getRow();
-2862  // All edits for the given row 
(across all column families) must happen atomically.
-2863  doBatchMutate(delete);
-2864} finally {
-2865  
closeRegionOperation(Operation.DELETE);
-2866}
-2867  }
-2868
-2869  /**
-2870   * Row needed by below method.
-2871   */
-2872  private static final byte [] 
FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
-2873
-2874  /**
-2875   * This is used only by unit tests. 
Not required to be a public API.
-2876   * @param familyMap map of family to 
edits for the given family.
-2877   * @throws IOException
-2878   */
-2879  void delete(NavigableMap> familyMap,
-2880  Durability durability) throws 
IOException {
-2881Delete delete = new 
Delete(FOR_UNIT_TESTS_ONLY);
-2882
delete.setFamilyCellMap(familyMap);
-2883delete.setDurability(durability);
-2884doBatchMutate(delete);
-2885  }
-2886
-2887  @Override
-2888  public void 
prepareDeleteTimestamps(Mutation mutation, Map> 
familyMap,
-2889  byte[] byteNow) throws IOException 
{
-2890for (Map.Entry> e : familyMap.entrySet()) {
-2891
-2892  byte[] family = e.getKey();
-2893  List cells = 
e.getValue();
-2894  assert cells instanceof 
RandomAccess;
-2895
-2896  Map kvCount 
= new TreeMap<>(Bytes.BYTES_COMPARATOR);
-2897  int listSize = cells.size();
-2898  for (int i=0; i < listSize; 
i++) {
-2899Cell cell = cells.get(i);
-2900//  Check if time is LATEST, 
change to time of most recent addition if so
-2901//  This is expensive.
-2902if (cell.getTimestamp() == 
HConstants.LATEST_TIMESTAMP && CellUtil.isDeleteType(cell)) {
-2903  byte[] qual = 
CellUtil.cloneQualifier(cell);
-2904  if (qual == null) qual = 
HConstants.EMPTY_BYTE_ARRAY;
-2905
-2906  Integer count = 
kvCount.get(qual);
-2907  if (count == null) {
-2908kvCount.put(qual, 1);
-2909  } else {
-2910kvCount.put(qual, count + 
1);
-2911  }
-2912  count = kvCount.get(qual);
-2913
-2914  Get get = new 
Get(CellUtil.cloneRow(cell));
-2915  get.setMaxVersions(count);
-2916  get.addColumn(family, qual);
-2917  if (coprocessorHost != null) 
{
-2918if 
(!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell,
-2919byteNow, get)) {
-2920  
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2921}
-2922  } else {
-2923
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2924  }
-2925} else {
-2926  
CellUtil.updateLatestStamp(cell, byteNow, 0);
-2927}
-2928  }
-2929}
-2930  }
-2931
-2932  void 
updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] 
byteNow)
-2933  throws IOException {
-2934List result = get(get, 
false);
-2935
-2936if (result.size() < count) {
-2937  // Nothing to delete
-2938  CellUtil.updateLatestStamp(cell, 
byteNow, 0);
-2939  return;
-2940}
-2941if (result.size() > count) {
-2942  throw new 
RuntimeException("Unexpected size: " + result.size());
-2943}
-2944Cell getCell = result.get(count - 
1);
-2945CellUtil.setTimestamp(cell, 
getCell.getTimestamp());
-2946  }
-2947
-2948  @Override
-2949  public void put(Put put) throws 
IOException {
-2950checkReadOnly();
-2951
-2952// Do a rough check that we have 
resources to accept a write.  The check is
-2953// 'rough' in that between the 
resource check and the call to obtain a
-2954// read lock, resources may run out. 
 For now, the thought is that this
-2955// will be extremely rare; we'll 
deal with it when it happens.
-2956checkResources();
-2957
startRegionOperation(Operation.PUT);
-2958try {
-2959  // All edits for the given row 
(across all column families) must happen atomically.
-2960  doBatchMutate(put);
-2961} finally {
-2962  
closeRegionOperation(Operation.PUT);
-2963}
-2964  }
-2965
-2966  /**
-2967   * Struct-like clas

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
index feb42ea..4bd98f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
@@ -185,4189 +185,4266 @@
 177import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-181import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-182import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-183import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-184import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-185import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
-186import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-187import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-188import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-189import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-190import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-191import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-192import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-193import 
org.apache.hadoop.hbase.util.Addressing;
-194import 
org.apache.hadoop.hbase.util.Bytes;
-195import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-196import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-197import 
org.apache.hadoop.hbase.util.Pair;
-198import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-199import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-200import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-201import 
org.apache.hadoop.ipc.RemoteException;
-202import 
org.apache.hadoop.util.StringUtils;
-203import 
org.apache.zookeeper.KeeperException;
-204
-205import 
com.google.common.annotations.VisibleForTesting;
-206import com.google.protobuf.Descriptors;
-207import com.google.protobuf.Message;
-208import 
com.google.protobuf.RpcController;
-209import java.util.stream.Collectors;
-210
-211/**
-212 * HBaseAdmin is no longer a client API. 
It is marked InterfaceAudience.Private indicating that
-213 * this is an HBase-internal class as 
defined in
-214 * 
https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
-215 * There are no guarantees for backwards 
source / binary compatibility and methods or class can
-216 * change or go away without 
deprecation.
-217 * Use {@link Connection#getAdmin()} to 
obtain an instance of {@link Admin} instead of constructing
-218 * an HBaseAdmin directly.
-219 *
-220 * 

Connection should be an unmanaged connection obtained via -221 * {@link ConnectionFactory#createConnection(Configuration)} -222 * -223 * @see ConnectionFactory -224 * @see Connection -225 * @see Admin -226 */ -227@InterfaceAudience.Private -228@InterfaceStability.Evolving -229public class HBaseAdmin implements Admin { -230 private static final Log LOG = LogFactory.getLog(HBaseAdmin.class); -231 -232 private static final String ZK_IDENTIFIER_PREFIX = "hbase-admin-on-"; +180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; +181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; +182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; +183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; +184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; +185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; +186import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; +187import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; +188import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +189import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; +190import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +191import org.ap


[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.RpcChannelImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.RpcChannelImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.RpcChannelImplementation.html
index 30d5dd8..beacb44 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.RpcChannelImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.RpcChannelImplementation.html
@@ -109,497 +109,499 @@
 101  private static final 
ScheduledExecutorService IDLE_CONN_SWEEPER = Executors
 102  .newScheduledThreadPool(1, 
Threads.newDaemonThreadFactory("Idle-Rpc-Conn-Sweeper"));
 103
-104  protected final static Map> TOKEN_HANDLERS = new 
HashMap<>();
-105
-106  static {
-107
TOKEN_HANDLERS.put(Kind.HBASE_AUTH_TOKEN, new AuthenticationTokenSelector());
-108  }
-109
-110  protected boolean running = true; // if 
client runs
+104  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_MUTABLE_COLLECTION_PKGPROTECT",
+105  justification="the rest of the 
system which live in the different package can use")
+106  protected final static Map> TOKEN_HANDLERS = new 
HashMap<>();
+107
+108  static {
+109
TOKEN_HANDLERS.put(Kind.HBASE_AUTH_TOKEN, new AuthenticationTokenSelector());
+110  }
 111
-112  protected final Configuration conf;
-113  protected final String clusterId;
-114  protected final SocketAddress 
localAddr;
-115  protected final MetricsConnection 
metrics;
-116
-117  protected final UserProvider 
userProvider;
-118  protected final CellBlockBuilder 
cellBlockBuilder;
-119
-120  protected final int 
minIdleTimeBeforeClose; // if the connection is idle for more than this
-121  // time (in ms), it will be closed at 
any moment.
-122  protected final int maxRetries; // the 
max. no. of retries for socket connections
-123  protected final long failureSleep; // 
Time to sleep before retry on failure.
-124  protected final boolean tcpNoDelay; // 
if T then disable Nagle's Algorithm
-125  protected final boolean tcpKeepAlive; 
// if T then use keepalives
-126  protected final Codec codec;
-127  protected final CompressionCodec 
compressor;
-128  protected final boolean 
fallbackAllowed;
-129
-130  protected final FailedServers 
failedServers;
+112  protected boolean running = true; // if 
client runs
+113
+114  protected final Configuration conf;
+115  protected final String clusterId;
+116  protected final SocketAddress 
localAddr;
+117  protected final MetricsConnection 
metrics;
+118
+119  protected final UserProvider 
userProvider;
+120  protected final CellBlockBuilder 
cellBlockBuilder;
+121
+122  protected final int 
minIdleTimeBeforeClose; // if the connection is idle for more than this
+123  // time (in ms), it will be closed at 
any moment.
+124  protected final int maxRetries; // the 
max. no. of retries for socket connections
+125  protected final long failureSleep; // 
Time to sleep before retry on failure.
+126  protected final boolean tcpNoDelay; // 
if T then disable Nagle's Algorithm
+127  protected final boolean tcpKeepAlive; 
// if T then use keepalives
+128  protected final Codec codec;
+129  protected final CompressionCodec 
compressor;
+130  protected final boolean 
fallbackAllowed;
 131
-132  protected final int connectTO;
-133  protected final int readTO;
-134  protected final int writeTO;
-135
-136  protected final 
PoolMap connections;
+132  protected final FailedServers 
failedServers;
+133
+134  protected final int connectTO;
+135  protected final int readTO;
+136  protected final int writeTO;
 137
-138  private final AtomicInteger callIdCnt = 
new AtomicInteger(0);
+138  protected final 
PoolMap connections;
 139
-140  private final ScheduledFuture 
cleanupIdleConnectionTask;
+140  private final AtomicInteger callIdCnt = 
new AtomicInteger(0);
 141
-142  private int 
maxConcurrentCallsPerServer;
+142  private final ScheduledFuture 
cleanupIdleConnectionTask;
 143
-144  private static final 
LoadingCache concurrentCounterCache =
-145  
CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS).
-146  build(new 
CacheLoader() {
-147@Override public 
AtomicInteger load(InetSocketAddress key) throws Exception {
-148  return new 
AtomicInteger(0);
-149}
-150  });
-151
-152  /**
-153   * Construct an IPC client for the 
cluster clusterId
-154   * @param conf configuration
-155   * @param clusterId the cluster id
-156   * @param localAddr client socket bind 
address.
-157   * @param metrics the connection 
met

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
index 75db22d..99a09f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
@@ -37,2710 +37,2816 @@
 029import java.util.List;
 030import java.util.Map;
 031import java.util.Optional;
-032import 
java.util.concurrent.CompletableFuture;
-033import java.util.concurrent.TimeUnit;
-034import 
java.util.concurrent.atomic.AtomicReference;
-035import java.util.function.BiConsumer;
-036import java.util.regex.Pattern;
-037import java.util.stream.Collectors;
-038
-039import 
com.google.common.annotations.VisibleForTesting;
-040
-041import io.netty.util.Timeout;
-042import io.netty.util.TimerTask;
-043
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.io.IOUtils;
-047import org.apache.commons.logging.Log;
-048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.ClusterStatus;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLoad;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.ServerName;
-059import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-060import 
org.apache.hadoop.hbase.HConstants;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-064import 
org.apache.hadoop.hbase.TableNotDisabledException;
-065import 
org.apache.hadoop.hbase.TableNotEnabledException;
-066import 
org.apache.hadoop.hbase.TableNotFoundException;
-067import 
org.apache.hadoop.hbase.UnknownRegionException;
-068import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProt

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDes

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(List servers) throws IOException {
-4039final 
List pbServers = new 
ArrayList<>(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public List 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallable>(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public List 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064List servers = 
new ArrayList<>();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(List servers) throws IOException 
{
-4076final 
List pbServers = new 
ArrayList<>(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public List 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallable>(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public List 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056List servers = 
new ArrayList<>();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(List servers) throws IOException 
{
+4068executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public List 
listReplicatedTableCFs() throws IOException {
+4079List 
replicatedTableCFs = new ArrayLi

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/book.html
--
diff --git a/book.html b/book.html
index 9eeedb5..3cf6127 100644
--- a/book.html
+++ b/book.html
@@ -1381,11 +1381,10 @@ To check for well-formedness and only print output if 
errors exist, use the comm
 
 Keep Configuration In Sync Across the Cluster
 
-When running in distributed mode, after you make an edit to an HBase 
configuration, make sure you copy the content of the conf/ directory 
to all nodes of the cluster.
+When running in distributed mode, after you make an edit to an HBase 
configuration, make sure you copy the contents of the conf/ directory 
to all nodes of the cluster.
 HBase will not do this for you.
 Use rsync, scp, or another secure mechanism for 
copying the configuration files to your nodes.
-For most configuration, a restart is needed for servers to pick up changes An 
exception is dynamic configuration.
-to be described later below.
+For most configurations, a restart is needed for servers to pick up changes. 
Dynamic configuration is an exception to this, to be described later below.
 
 
 
@@ -1473,12 +1472,12 @@ You must set JAVA_HOME on each node of 
your cluster. hbase-env.
 
 Loopback IP
 
-Prior to hbase-0.96.0, HBase only used the IP address 
127.0.0.1 to refer to localhost, and this could not 
be configured.
+Prior to hbase-0.96.0, HBase only used the IP address 
127.0.0.1 to refer to localhost, and this was not 
configurable.
 See Loopback IP for more details.
 
 NTP
 
-The clocks on cluster nodes should be synchronized. A small amount of 
variation is acceptable, but larger amounts of skew can cause erratic and 
unexpected behavior. Time synchronization is one of the first things to check 
if you see unexplained problems in your cluster. It is recommended that you run 
a Network Time Protocol (NTP) service, or another time-synchronization 
mechanism, on your cluster, and that all nodes look to the same service for 
time synchronization. See the http://www.tldp.org/LDP/sag/html/basic-ntp-config.html";>Basic NTP 
Configuration at The Linux Documentation Project 
(TLDP) to set up NTP.
+The clocks on cluster nodes should be synchronized. A small amount of 
variation is acceptable, but larger amounts of skew can cause erratic and 
unexpected behavior. Time synchronization is one of the first things to check 
if you see unexplained problems in your cluster. It is recommended that you run 
a Network Time Protocol (NTP) service, or another time-synchronization 
mechanism on your cluster and that all nodes look to the same service for time 
synchronization. See the http://www.tldp.org/LDP/sag/html/basic-ntp-config.html";>Basic NTP 
Configuration at The Linux Documentation Project 
(TLDP) to set up NTP.
 
 
 
@@ -1540,8 +1539,8 @@ hadoop  -   nproc   32000
 
 Windows
 
-Prior to HBase 0.96, testing for running HBase on Microsoft Windows was 
limited.
-Running a on Windows nodes is not recommended for production systems.
+Prior to HBase 0.96, running HBase on Microsoft Windows was limited only 
for testing purposes.
+Running production systems on Windows machines is not recommended.
 
 
 
@@ -1774,8 +1773,8 @@ data loss. This patch is present in Apache Hadoop 
releases 2.6.1+.
 The bundled jar is ONLY for use in standalone mode.
 In distributed mode, it is critical that the version of Hadoop that 
is out on your cluster match what is under HBase.
 Replace the hadoop jar found in the HBase lib directory with the hadoop jar 
you are running on your cluster to avoid version mismatch issues.
-Make sure you replace the jar in HBase everywhere on your cluster.
-Hadoop version mismatch issues have various manifestations but often all looks 
like its hung up.
+Make sure you replace the jar in HBase across your whole cluster.
+Hadoop version mismatch issues have various manifestations but often all look 
like its hung.
 
 
 
@@ -1860,7 +1859,7 @@ HDFS where data is replicated ensures the latter.
 
 
 To configure this standalone variant, edit your hbase-site.xml
-setting the hbase.rootdir to point at a directory in your
+setting hbase.rootdir  to point at a directory in your
 HDFS instance but then set hbase.cluster.distributed
 to false. For example:
 
@@ -1912,8 +1911,8 @@ Some of the information that was originally in this 
section has been moved there
 
 
 A pseudo-distributed mode is simply a fully-distributed mode run on a 
single host.
-Use this configuration testing and prototyping on HBase.
-Do not use this configuration for production nor for evaluating HBase 
performance.
+Use this HBase configuration for testing and prototyping purposes only.
+Do not use this configuration for production or for performance evaluation.
 
 
 
@@ -1922,11 +1921,11 @@ Do not use this configuration for production nor for 
evaluating HBase performanc
 
 By default, HBase runs in standalone mode.
 Both standalone mode and pseudo-distributed mode are provid

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
index 249d4a0..7369fdf 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
@@ -65,12 +65,12 @@
 057import 
com.google.common.base.Preconditions;
 058
 059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * 

Version 1 was the original file block. Version 2 was introduced when we changed the hbase file -065 * format to support multi-level block indexes and compound bloom filters (HBASE-3857). +060 * Cacheable Blocks of an {@link HFile} version 2 file. +061 * Version 2 was introduced in hbase-0.92.0. +062 * +063 *

Version 1 was the original file block. Version 2 was introduced when we changed the hbase file +064 * format to support multi-level block indexes and compound bloom filters (HBASE-3857). Support +065 * for Version 1 was removed in hbase-1.3.0. 066 * 067 *

HFileBlock: Version 2

068 * In version 2, a block is structured as follows: @@ -120,582 +120,582 @@ 112public class HFileBlock implements Cacheable { 113 private static final Log LOG = LogFactory.getLog(HFileBlock.class); 114 -115 /** Type of block. Header field 0. */ -116 private BlockType blockType; -117 -118 /** -119 * Size on disk excluding header, including checksum. Header field 1. -120 * @see Writer#putHeader(byte[], int, int, int, int) -121 */ -122 private int onDiskSizeWithoutHeader; -123 -124 /** -125 * Size of pure data. Does not include header or checksums. Header field 2. -126 * @see Writer#putHeader(byte[], int, int, int, int) -127 */ -128 private int uncompressedSizeWithoutHeader; -129 -130 /** -131 * The offset of the previous block on disk. Header field 3. -132 * @see Writer#putHeader(byte[], int, int, int, int) -133 */ -134 private long prevBlockOffset; -135 -136 /** -137 * Size on disk of header + data. Excludes checksum. Header field 6, -138 * OR calculated from {@link #onDiskSizeWithoutHeader} when using HDFS checksum. -139 * @see Writer#putHeader(byte[], int, int, int, int) -140 */ -141 private int onDiskDataSizeWithHeader; -142 -143 -144 /** -145 * The in-memory representation of the hfile block. Can be on or offheap. Can be backed by -146 * a single ByteBuffer or by many. Make no assumptions. -147 * -148 *

Be careful reading from this buf. Duplicate and work on the duplicate or if -149 * not, be sure to reset position and limit else trouble down the road. -150 * -151 *

TODO: Make this read-only once made. -152 * -153 *

We are using the ByteBuff type. ByteBuffer is not extensible yet we need to be able to have -154 * a ByteBuffer-like API across multiple ByteBuffers reading from a cache such as BucketCache. -155 * So, we have this ByteBuff type. Unfortunately, it is spread all about HFileBlock. Would be -156 * good if could be confined to cache-use only but hard-to-do. -157 */ -158 private ByteBuff buf; -159 -160 /** Meta data that holds meta information on the hfileblock. -161 */ -162 private HFileContext fileContext; -163 -164 /** -165 * The offset of this block in the file. Populated by the reader for -166 * convenience of access. This offset is not part of the block header. -167 */ -168 private long offset = UNSET; -169 -170 private MemoryType memType = MemoryType.EXCLUSIVE; -171 -172 /** -173 * The on-disk size of the next block, including the header and checksums if present, obtained by -174 * peeking into the first {@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the next block's -175 * header, or UNSET if unknown. -176 * -177 * Blocks try to carry the size of the next block to read in this data member. They will even have -178 * this value when served from cache. Could save a seek in the case where we are iterating through -179 * a file and some of the blocks come from cache. If from cache, then having this info to hand -180 * will save us doing a seek to read the header so we can read the body of a block. -181 * TODO: see how effective this is at saving seeks. -182 */ -183 private int nextBlockOnDiskSize = UNSET; -184 -185 /** -186 * On a checksum failure, do these many succeeding read requests using hdfs checksums before -187 * auto-reenabling hbase checksum verification. -188 */ -189 static final int CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD = 3; -19


[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
deleted file mode 100644
index 8f0eb1f..000
--- 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
+++ /dev/null
@@ -1,339 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer (Apache HBase 
3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev Class
-Next Class
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-Summary: 
-Nested | 
-Field | 
-Constr | 
-Method
-
-
-Detail: 
-Field | 
-Constr | 
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.client
-Class AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.client.AsyncHBaseAdmin.ProcedureBiConsumer
-
-
-org.apache.hadoop.hbase.client.AsyncHBaseAdmin.TableProcedureBiConsumer
-
-
-org.apache.hadoop.hbase.client.AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer
-
-
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/util/function/BiConsumer.html?is-external=true";
 title="class or interface in java.util.function">BiConsumerVoid,http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable>
-
-
-Enclosing class:
-AsyncHBaseAdmin
-
-
-
-private class AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer
-extends AsyncHBaseAdmin.TableProcedureBiConsumer
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-
-
-
-Fields inherited from class org.apache.hadoop.hbase.client.AsyncHBaseAdmin.TableProcedureBiConsumer
-tableName
-
-
-
-
-
-Fields inherited from class org.apache.hadoop.hbase.client.AsyncHBaseAdmin.ProcedureBiConsumer
-admin
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors 
-
-Constructor and Description
-
-
-ModifyColumnFamilyProcedureBiConsumer(AsyncAdmin admin,
- TableName tableName) 
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All Methods Instance Methods Concrete Methods 
-
-Modifier and Type
-Method and Description
-
-
-(package private) http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
-getOperationType() 
-
-
-
-
-
-
-Methods inherited from class org.apache.hadoop.hbase.client.AsyncHBaseAdmin.TableProcedureBiConsumer
-getDescription,
 onError,
 onFinished
-
-
-
-
-
-Methods inherited from class org.apache.hadoop.hbase.client.AsyncHBaseAdmin.ProcedureBiConsumer
-accept
-
-
-
-
-
-Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-externa

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRespons

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/AsyncMetaTableAccessor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/AsyncMetaTableAccessor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/AsyncMetaTableAccessor.html
index baee8c1..a0256ee 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/AsyncMetaTableAccessor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/AsyncMetaTableAccessor.html
@@ -30,551 +30,607 @@
 022import java.io.IOException;
 023import java.util.ArrayList;
 024import java.util.Arrays;
-025import java.util.List;
-026import java.util.Map;
-027import java.util.NavigableMap;
-028import java.util.Optional;
-029import java.util.SortedMap;
-030import 
java.util.concurrent.CompletableFuture;
-031import java.util.regex.Matcher;
-032import java.util.regex.Pattern;
-033
-034import org.apache.commons.logging.Log;
-035import 
org.apache.commons.logging.LogFactory;
-036import 
org.apache.hadoop.hbase.MetaTableAccessor.CollectingVisitor;
-037import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-038import 
org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
-039import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-040import 
org.apache.hadoop.hbase.client.Consistency;
-041import 
org.apache.hadoop.hbase.client.Get;
-042import 
org.apache.hadoop.hbase.client.RawAsyncTable;
-043import 
org.apache.hadoop.hbase.client.RawScanResultConsumer;
-044import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-045import 
org.apache.hadoop.hbase.client.Result;
-046import 
org.apache.hadoop.hbase.client.Scan;
-047import 
org.apache.hadoop.hbase.client.TableState;
-048import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-049import 
org.apache.hadoop.hbase.util.Bytes;
-050import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-051import 
org.apache.hadoop.hbase.util.Pair;
-052
-053/**
-054 * The asynchronous meta table accessor. 
Used to read/write region and assignment information store
-055 * in 
hbase:meta.
-056 */
-057@InterfaceAudience.Private
-058public class AsyncMetaTableAccessor {
-059
-060  private static final Log LOG = 
LogFactory.getLog(AsyncMetaTableAccessor.class);
-061
-062
-063  /** The delimiter for meta columns for 
replicaIds > 0 */
-064  private static final char 
META_REPLICA_ID_DELIMITER = '_';
+025import java.util.Collections;
+026import java.util.List;
+027import java.util.Map;
+028import java.util.NavigableMap;
+029import java.util.Optional;
+030import java.util.SortedMap;
+031import 
java.util.concurrent.CompletableFuture;
+032import java.util.regex.Matcher;
+033import java.util.regex.Pattern;
+034import java.util.stream.Collectors;
+035
+036import org.apache.commons.logging.Log;
+037import 
org.apache.commons.logging.LogFactory;
+038import 
org.apache.hadoop.hbase.MetaTableAccessor.CollectingVisitor;
+039import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
+040import 
org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
+041import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+042import 
org.apache.hadoop.hbase.client.Connection;
+043import 
org.apache.hadoop.hbase.client.Consistency;
+044import 
org.apache.hadoop.hbase.client.Get;
+045import 
org.apache.hadoop.hbase.client.RawAsyncTable;
+046import 
org.apache.hadoop.hbase.client.RawScanResultConsumer;
+047import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+048import 
org.apache.hadoop.hbase.client.Result;
+049import 
org.apache.hadoop.hbase.client.Scan;
+050import 
org.apache.hadoop.hbase.client.TableState;
+051import 
org.apache.hadoop.hbase.client.Scan.ReadType;
+052import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+053import 
org.apache.hadoop.hbase.util.Bytes;
+054import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+055import 
org.apache.hadoop.hbase.util.Pair;
+056
+057/**
+058 * The asynchronous meta table accessor. 
Used to read/write region and assignment information store
+059 * in 
hbase:meta.
+060 */
+061@InterfaceAudience.Private
+062public class AsyncMetaTableAccessor {
+063
+064  private static final Log LOG = 
LogFactory.getLog(AsyncMetaTableAccessor.class);
 065
-066  /** A regex for parsing server columns 
from meta. See above javadoc for meta layout */
-067  private static final Pattern 
SERVER_COLUMN_PATTERN = Pattern
-068  
.compile("^server(_[0-9a-fA-F]{4})?$");
+066
+067  /** The delimiter for meta columns for 
replicaIds > 0 */
+068  private static final char 
META_REPLICA_ID_DELIMITER = '_';
 069
-070  public static 
CompletableFuture tableExists(RawAsyncTable metaTable, TableName 
tableName) {
-071if 
(tableName.equals(META_TABLE_NAME)) {
-072  return 
CompletableFuture.completedFuture(true);
-073}
-074return getTableState(metaTable, 
tableName).thenApply(Optional::isPresent);
-075  }
-076
-077  public s

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
index 6de986f..c895448 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
@@ -26,1592 +26,1693 @@
 018package 
org.apache.hadoop.hbase.master.balancer;
 019
 020import java.util.ArrayDeque;
-021import java.util.Arrays;
-022import java.util.Collection;
-023import java.util.Deque;
-024import java.util.HashMap;
-025import java.util.LinkedList;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.Map.Entry;
-029import java.util.Random;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterStatus;
-035import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.HRegionInfo;
-038import 
org.apache.hadoop.hbase.RegionLoad;
-039import 
org.apache.hadoop.hbase.ServerLoad;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-043import 
org.apache.hadoop.hbase.master.MasterServices;
-044import 
org.apache.hadoop.hbase.master.RegionPlan;
-045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
-046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
-048import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
-049import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052
-053import com.google.common.collect.Lists;
-054
-055/**
-056 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -057 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -058 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-059 *
    -060 *
  • Region Load
  • -061 *
  • Table Load
  • -062 *
  • Data Locality
  • -063 *
  • Memstore Sizes
  • -064 *
  • Storefile Sizes
  • -065 *
-066 * -067 * -068 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -069 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -070 * scaled by their respective multipliers:

+021import java.util.ArrayList; +022import java.util.Arrays; +023import java.util.Collection; +024import java.util.Collections; +025import java.util.Deque; +026import java.util.HashMap; +027import java.util.LinkedList; +028import java.util.List; +029import java.util.Map; +030import java.util.Map.Entry; +031import java.util.Random; +032 +033import org.apache.commons.logging.Log; +034import org.apache.commons.logging.LogFactory; +035import org.apache.hadoop.conf.Configuration; +036import org.apache.hadoop.hbase.ClusterStatus; +037import org.apache.hadoop.hbase.HBaseInterfaceAudience; +038import org.apache.hadoop.hbase.HConstants; +039import org.apache.hadoop.hbase.HRegionInfo; +040import org.apache.hadoop.hbase.RegionLoad; +041import org.apache.hadoop.hbase.ServerLoad; +042import org.apache.hadoop.hbase.ServerName; +043import org.apache.hadoop.hbase.TableName; +044import org.apache.hadoop.hbase.classification.InterfaceAudience; +045import org.apache.hadoop.hbase.master.MasterServices; +046import org.apache.hadoop.hbase.master.RegionPlan; +047import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; +048import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; +049import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction; +050import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType; +051import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction; +052import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction; +053import org.apache.hadoop.hbase.util.Bytes; +054import org.apache.hadoop.hbase.util.Environment

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 0c3fe3b..d262744 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -149,3339 +149,3348 @@
 141import 
org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
 142import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
 143import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-144import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
-145import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
-146import 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-147import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-148import 
org.apache.hadoop.hbase.regionserver.HStore;
-149import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-150import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-151import 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
-152import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
-153import 
org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
-154import 
org.apache.hadoop.hbase.replication.ReplicationException;
-155import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-156import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-157import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-158import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-159import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
-160import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-161import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-162import 
org.apache.hadoop.hbase.security.UserProvider;
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
-165import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-166import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-167import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
-168import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-169import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-170import 
org.apache.hadoop.hbase.util.Addressing;
-171import 
org.apache.hadoop.hbase.util.Bytes;
-172import 
org.apache.hadoop.hbase.util.CompressionTest;
-173import 
org.apache.hadoop.hbase.util.EncryptionTest;
-174import 
org.apache.hadoop.hbase.util.FSUtils;
-175import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-176import 
org.apache.hadoop.hbase.util.HasThread;
-177import 
org.apache.hadoop.hbase.util.IdLock;
-178import 
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-179import 
org.apache.hadoop.hbase.util.Pair;
-180import 
org.apache.hadoop.hbase.util.Threads;
-181import 
org.apache.hadoop.hbase.util.VersionInfo;
-182import 
org.apache.hadoop.hbase.util.ZKDataMigrator;
-183import 
org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
-184import 
org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
-185import 
org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-186import 
org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
-187import 
org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
-188import 
org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
-189import 
org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
-190import 
org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-191import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-192import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-193import 
org.apache.zookeeper.KeeperException;
-194import org.eclipse.jetty.server.Server;
-195import 
org.eclipse.jetty.server.ServerConnector;
-196import 
org.eclipse.jetty.servlet.ServletHolder;
-197import 
org.eclipse.jetty.webapp.WebAppContext;
-198
-199import 
com.google.common.annotations.VisibleForTesting;
-200import com.google.common.collect.Lists;
-201import com.google.common.collect.Maps;
-202import com.google.protobuf.Descriptors;
-203import com.google.protobuf.Service;
-204
-205/**
-206 * HMaster is the "master server" for 
HBase. An HBase cluster has one active
-207 * master.  If many masters are started, 
all compete.  Whichever wins goes on to
-208 * run the cluster.  All others park 
themselves in their constructor until
-209 * master or cluster shutdown or until 
the active master loses its lease in
-210 * zookeeper.  Thereafter, all running 
master jostle to take over master role.
-211 *
-212 * 

The Master can be as


[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html 
b/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html
index 8cfa92f..451f15f 100644
--- a/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html
+++ b/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html
@@ -262,7 +262,7 @@ the order they are declared.
 
 
 values
-public static KeepDeletedCells[] values()
+public static KeepDeletedCells[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -282,7 +282,7 @@ for (KeepDeletedCells c : KeepDeletedCells.values())
 
 
 valueOf
-public static KeepDeletedCells valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static KeepDeletedCells valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html 
b/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html
index faa94f1..992a314 100644
--- a/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html
+++ b/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html
@@ -263,7 +263,7 @@ the order they are declared.
 
 
 values
-public static MemoryCompactionPolicy[] values()
+public static MemoryCompactionPolicy[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -283,7 +283,7 @@ for (MemoryCompactionPolicy c : 
MemoryCompactionPolicy.values())
 
 
 valueOf
-public static MemoryCompactionPolicy valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static MemoryCompactionPolicy valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index 6562834..746a472 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -166,10 +166,10 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand
-org.apache.hadoop.hbase.backup.BackupInfo.BackupState
 org.apache.hadoop.hbase.backup.BackupType
 org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase
+org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand
+org.apache.hadoop.hbase.backup.BackupInfo.BackupState
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
index ac70770..7c23eaf 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
@@ -99,65 +99,58 @@
 
 
 
-org.apache.hadoop.hbase.io.hfile
-
-Provides implementations of HFile and HFile
- BlockCache.
-
-
-
 org.apache.hadoop.hbase.mapreduce
 
 Provides HBase http://wiki.apache.org/hadoop/HadoopMapRe

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
index 038e1d3..0ed7e8b 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
@@ -35,530 +35,610 @@
 027import org.apache.hadoop.hbase.Cell;
 028import 
org.apache.hadoop.hbase.CellComparator;
 029import 
org.apache.hadoop.hbase.CellUtil;
-030import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-031import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-032import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-033import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-034import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-035
-036/**
-037 * Implementation of {@link Filter} that 
represents an ordered List of Filters
-038 * which will be evaluated with a 
specified boolean operator {@link Operator#MUST_PASS_ALL}
-039 * (AND) or 
{@link Operator#MUST_PASS_ONE} (OR).
-040 * Since you can use Filter Lists as 
children of Filter Lists, you can create a
-041 * hierarchy of filters to be 
evaluated.
-042 *
-043 * 
-044 * {@link Operator#MUST_PASS_ALL} evaluates lazily: evaluation stops as soon as one filter does -045 * not include the KeyValue. -046 * -047 *
-048 * {@link Operator#MUST_PASS_ONE} evaluates non-lazily: all filters are always evaluated. -049 * -050 *
-051 * Defaults to {@link Operator#MUST_PASS_ALL}. -052 */ -053@InterfaceAudience.Public -054final public class FilterList extends FilterBase { -055 /** set operator */ -056 @InterfaceAudience.Public -057 public static enum Operator { -058/** !AND */ -059MUST_PASS_ALL, -060/** !OR */ -061MUST_PASS_ONE -062 } -063 -064 private static final int MAX_LOG_FILTERS = 5; -065 private Operator operator = Operator.MUST_PASS_ALL; -066 private final List filters; -067 private Filter seekHintFilter = null; -068 -069 /** Reference Cell used by {@link #transformCell(Cell)} for validation purpose. */ -070 private Cell referenceCell = null; -071 -072 /** -073 * When filtering a given Cell in {@link #filterKeyValue(Cell)}, -074 * this stores the transformed Cell to be returned by {@link #transformCell(Cell)}. -075 * -076 * Individual filters transformation are applied only when the filter includes the Cell. -077 * Transformations are composed in the order specified by {@link #filters}. -078 */ -079 private Cell transformedCell = null; +030import org.apache.hadoop.hbase.KeyValueUtil; +031import org.apache.hadoop.hbase.classification.InterfaceAudience; +032import org.apache.hadoop.hbase.exceptions.DeserializationException; +033import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +034import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +035import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException; +036 +037/** +038 * Implementation of {@link Filter} that represents an ordered List of Filters +039 * which will be evaluated with a specified boolean operator {@link Operator#MUST_PASS_ALL} +040 * (AND) or {@link Operator#MUST_PASS_ONE} (OR). +041 * Since you can use Filter Lists as children of Filter Lists, you can create a +042 * hierarchy of filters to be evaluated. +043 * +044 *
+045 * {@link Operator#MUST_PASS_ALL} evaluates lazily: evaluation stops as soon as one filter does +046 * not include the KeyValue. +047 * +048 *
+049 * {@link Operator#MUST_PASS_ONE} evaluates non-lazily: all filters are always evaluated. +050 * +051 *
+052 * Defaults to {@link Operator#MUST_PASS_ALL}. +053 */ +054@InterfaceAudience.Public +055final public class FilterList extends FilterBase { +056 /** set operator */ +057 @InterfaceAudience.Public +058 public static enum Operator { +059/** !AND */ +060MUST_PASS_ALL, +061/** !OR */ +062MUST_PASS_ONE +063 } +064 +065 private static final int MAX_LOG_FILTERS = 5; +066 private Operator operator = Operator.MUST_PASS_ALL; +067 private final List filters; +068 private Filter seekHintFilter = null; +069 +070 /** +071 * Save previous return code and previous cell for every filter in filter list. For MUST_PASS_ONE, +072 * we use the previous return code to decide whether we should pass current cell encountered to +073 * the filter. For MUST_PASS_ALL, the two list are meaningless. +074 */ +075 private List prevFilterRCList = null; +076 private List prevCellList = null; +077 +078 /** Reference Cell used by {@link #tran

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
index ca56dfc..ca03f9b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
@@ -408,7 +408,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback, elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcId,
 getPr
 ocIdHashCode, getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch, 
isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId, setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback, elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout, href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeoutTimestamp--">getTimeoutTimestamp,
 > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasChildren--">hasChildren,
 > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasException--">hasException,
 > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasLock-TEnvironment-">hasLock,
 > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasOwner--">hasOwner,
 > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasParent--">hasParent,
 > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasTimeout--">hasTimeout,
 > href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#haveSameParent-org.apache.hadoop.hbase.procedure2.Procedure-org.apache.hadoop.hbase.procedure2.Procedure-">haveSameParent,
 > holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
index addfeeb..e029a4f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
@@ -409,7 +409,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 d
 oReleaseLock, doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcId, getProcIdHashCode,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeoutTimestamp--">getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwne

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
index b6c2fe3..1765903 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
@@ -60,892 +60,917 @@
 052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 053import 
org.apache.hadoop.hbase.fs.HFileSystem;
 054import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-055import 
org.apache.hadoop.hbase.io.compress.Compression;
-056import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-057import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-058import 
org.apache.hadoop.hbase.regionserver.CellSink;
-059import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
-065import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import org.apache.hadoop.io.Writable;
-069
-070import 
com.google.common.annotations.VisibleForTesting;
-071import 
com.google.common.base.Preconditions;
-072
-073/**
-074 * File format for hbase.
-075 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
-076 * 

-077 * The memory footprint of a HFile includes the following (below is taken from the -078 * TFile; documentation -080 * but applies also to HFile): -081 *

    -082 *
  • Some constant overhead of reading or writing a compressed block. +055import org.apache.hadoop.hbase.io.MetricsIO; +056import org.apache.hadoop.hbase.io.MetricsIOWrapperImpl; +057import org.apache.hadoop.hbase.io.compress.Compression; +058import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +059import org.apache.hadoop.hbase.protobuf.ProtobufMagic; +060import org.apache.hadoop.hbase.regionserver.CellSink; +061import org.apache.hadoop.hbase.regionserver.ShipperListener; +062import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; +063import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +064import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +065import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair; +066import org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos; +067import org.apache.hadoop.hbase.util.BloomFilterWriter; +068import org.apache.hadoop.hbase.util.Bytes; +069import org.apache.hadoop.hbase.util.FSUtils; +070import org.apache.hadoop.io.Writable; +071 +072import com.google.common.annotations.VisibleForTesting; +073import com.google.common.base.Preconditions; +074 +075/** +076 * File format for hbase. +077 * A file of sorted key/value pairs. Both keys and values are byte arrays. +078 *

    +079 * The memory footprint of a HFile includes the following (below is taken from the +080 * TFile; documentation +082 * but applies also to HFile): 083 *

      -084 *
    • Each compressed block requires one compression/decompression codec for -085 * I/O. -086 *
    • Temporary space to buffer the key. -087 *
    • Temporary space to buffer the value. -088 *
    -089 *
  • HFile index, which is proportional to the total number of Data Blocks. -090 * The total amount of memory needed to hold the index can be estimated as -091 * (56+AvgKeySize)*NumBlocks. -092 *
-093 * Suggestions on performance optimization. -094 *
    -095 *
  • Minimum block size. We recommend a setting of minimum block size between -096 * 8KB to 1MB for general usage. Larger block size is preferred if files are -097 * primarily for sequential access. However, it would lead to inefficient random -098 * access (because there are more data to decompress). Smaller blocks are good -099 * for random access, but require more memory to hold the block index, and may -100 * be slower to create (because we must flush the compressor stream at the -101 * conclusion of each data block, which leads to an FS I/O flush). Further, due -102 * to the internal caching in Compression codec, the smallest possible block -103 * size would be around 20KB-30KB. -104 *
  • The current implement

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
index c2e9c5e..dc98b76 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
@@ -2498,14 +2498,16 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
boolean treatFailureAsError) 
 
 
-RegionMonitor(Connection connection,
+RegionMonitor(Connection connection,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] monitorTargets,
  boolean useRegExp,
- Canary.Sink sink,
+ Canary.RegionStdOutSink sink,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService executor,
  boolean writeSniffing,
  TableName writeTableName,
- boolean treatFailureAsError) 
+ boolean treatFailureAsError,
+ http://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true";
 title="class or interface in java.util">HashMapString,http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true";
 title="class or interface in 
java.lang">Long> configuredReadTableTimeouts,
+ long configuredWriteTableTimeout) 
 
 
 RegionServerMonitor(Connection connection,
@@ -2524,12 +2526,13 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong successes) 
 
 
-RegionTask(Connection connection,
+RegionTask(Connection connection,
   HRegionInfo region,
   ServerName serverName,
-  Canary.Sink sink,
+  Canary.RegionStdOutSink sink,
   Canary.RegionTask.TaskType taskType,
-  boolean rawScanEnabled) 
+  boolean rawScanEnabled,
+  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong rwLatency) 
 
 
 ZookeeperMonitor(Connection connection,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterSwitchType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterSwitchType.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterSwitchType.html
index d986927..8928861 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterSwitchType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/MasterSwitchType.html
@@ -236,11 +236,15 @@ the order they are declared.
 
 
 
+boolean
+MasterServices.isSplitOrMergeEnabled(MasterSwitchType switchType) 
+
+
 void
 MasterCoprocessorHost.postSetSplitOrMergeEnabled(boolean newValue,
   MasterSwitchType switchType) 
 
-
+
 boolean
 MasterCoprocessorHost.preSetSplitOrMergeEnabled(boolean newValue,
  MasterSwitchType switchType) 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
index 275c9a6..acec8b0 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
@@ -452,7 +452,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 RegionObserver.preSplitBeforePONR(ObserverContext ctx,
   byte[] splitKey,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List metaEntries)
-Deprecated. 
+Deprecated. 
+No longer called in 
hbase2/AMv2 given the master runs splits now;
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
--
diff --git a/devapidocs/org/apac

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.html 
b/devapidocs/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.html
index b53ee9d..061614e 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.html
@@ -148,7 +148,7 @@ extends 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterSer
 
 
 Methods inherited from 
interface org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface
-abortProcedure, addColumn, addReplicationPeer, assignRegion, balance, 
createNamespace, createTable, deleteColumn, deleteNamespace, deleteSnapshot, 
deleteTable, disableReplicationPeer, disableTable, drainRegionServers, 
enableCatalogJanitor, enableReplicationPeer, enableTable, execMasterService, 
execProcedure, execProcedureWithRet, getClusterStatus, getCompletedSnapshots, 
getLastMajorCompactionTimestamp, getLastMajorCompactionTimestampForRegion, 
getNamespaceDescriptor, getProcedureResult, getReplicationPeerConfig, 
getSchemaAlterStatus, getSecurityCapabilities, getTableDescriptors, 
getTableNames, getTableState, isBalancerEnabled, isCatalogJanitorEnabled, 
isCleanerChoreEnabled, isMasterInMaintenanceMode, isMasterRunning, 
isNormalizerEnabled, isProcedureDone, isSnapshotDone, isSplitOrMergeEnabled, 
listDrainingRegionServers, listLocks, listNamespaceDescriptors, listProcedures, 
listReplicationPeers, listTableDescriptorsByNamespace, 
listTableNamesByNamespace, mergeTableRegions, modify
 Column, modifyNamespace, modifyTable, moveRegion, normalize, offlineRegion, 
removeDrainFromRegionServers, removeReplicationPeer, restoreSnapshot, 
runCatalogScan, runCleanerChore, setBalancerRunning, setCleanerChoreRunning, 
setNormalizerRunning, setQuota, setSplitOrMergeEnabled, shutdown, snapshot, 
stopMaster, truncateTable, unassignRegion, 
updateReplicationPeerConfig
+abortProcedure, addColumn, addReplicationPeer, assignRegion, balance, 
createNamespace, createTable, deleteColumn, deleteNamespace, deleteSnapshot, 
deleteTable, disableReplicationPeer, disableTable, drainRegionServers, 
enableCatalogJanitor, enableReplicationPeer, enableTable, execMasterService, 
execProcedure, execProcedureWithRet, getClusterStatus, getCompletedSnapshots, 
getLastMajorCompactionTimestamp, getLastMajorCompactionTimestampForRegion, 
getNamespaceDescriptor, getProcedureResult, getQuotaStates, 
getReplicationPeerConfig, getSchemaAlterStatus, getSecurityCapabilities, 
getSpaceQuotaRegionSizes, getTableDescriptors, getTableNames, getTableState, 
isBalancerEnabled, isCatalogJanitorEnabled, isCleanerChoreEnabled, 
isMasterInMaintenanceMode, isMasterRunning, isNormalizerEnabled, 
isProcedureDone, isSnapshotDone, isSplitOrMergeEnabled, 
listDrainingRegionServers, listLocks, listNamespaceDescriptors, listProcedures, 
listReplicationPeers, listTableDescriptorsByNamespace, listTableN
 amesByNamespace, mergeTableRegions, modifyColumn, modifyNamespace, 
modifyTable, moveRegion, normalize, offlineRegion, 
removeDrainFromRegionServers, removeReplicationPeer, restoreSnapshot, 
runCatalogScan, runCleanerChore, setBalancerRunning, setCleanerChoreRunning, 
setNormalizerRunning, setQuota, setSplitOrMergeEnabled, shutdown, snapshot, 
stopMaster, truncateTable, unassignRegion, 
updateReplicationPeerConfig
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/Query.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Query.html 
b/devapidocs/org/apache/hadoop/hbase/client/Query.html
index 65752f6..6485695 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Query.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Query.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames
@@ -832,7 +832,7 @@ extends 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html 
b/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html
new file mode 100644
index 000..d0a444e
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/client/QuotaStatusCalls.html
@@ -0,0 +1,415 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+QuotaStatusCalls (Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -113,7 +113,8 @@ var activeTableTab = "activeTableTab";
 
 
 
-public final class Canary
+@InterfaceAudience.Private
+public final class Canary
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements org.apache.hadoop.util.Tool
 HBase Canary Tool, that that can be used to do
@@ -406,39 +407,6 @@ implements org.apache.hadoop.util.Tool
 Canary entry point for specified table.
 
 
-
-static void
-sniff(Admin admin,
- TableName tableName)
-Canary entry point for specified table.
-
-
-
-static void
-sniff(Admin admin,
- TableName tableName,
- boolean rawScanEnabled)
-Canary entry point for specified table.
-
-
-
-static void
-sniff(Admin admin,
- TableName tableName,
- Canary.RegionTask.TaskType taskType)
-Canary entry point for specified table with task 
type(read/write)
- Keeping this method backward compatible
-
-
-
-static void
-sniff(Admin admin,
- TableName tableName,
- Canary.RegionTask.TaskType taskType,
- boolean rawScanEnabled)
-Canary entry point for specified table with task 
type(read/write)
-
-
 
 
 
@@ -467,7 +435,7 @@ implements org.apache.hadoop.util.Tool
 
 
 USAGE_EXIT_CODE
-private static final int USAGE_EXIT_CODE
+private static final int USAGE_EXIT_CODE
 
 See Also:
 Constant
 Field Values
@@ -480,7 +448,7 @@ implements org.apache.hadoop.util.Tool
 
 
 INIT_ERROR_EXIT_CODE
-private static final int INIT_ERROR_EXIT_CODE
+private static final int INIT_ERROR_EXIT_CODE
 
 See Also:
 Constant
 Field Values
@@ -493,7 +461,7 @@ implements org.apache.hadoop.util.Tool
 
 
 TIMEOUT_ERROR_EXIT_CODE
-private static final int TIMEOUT_ERROR_EXIT_CODE
+private static final int TIMEOUT_ERROR_EXIT_CODE
 
 See Also:
 Constant
 Field Values
@@ -506,7 +474,7 @@ implements org.apache.hadoop.util.Tool
 
 
 ERROR_EXIT_CODE
-private static final int ERROR_EXIT_CODE
+private static final int ERROR_EXIT_CODE
 
 See Also:
 Constant
 Field Values
@@ -519,7 +487,7 @@ implements org.apache.hadoop.util.Tool
 
 
 FAILURE_EXIT_CODE
-private static final int FAILURE_EXIT_CODE
+private static final int FAILURE_EXIT_CODE
 
 See Also:
 Constant
 Field Values
@@ -532,7 +500,7 @@ implements org.apache.hadoop.util.Tool
 
 
 DEFAULT_INTERVAL
-private static final long DEFAULT_INTERVAL
+private static final long DEFAULT_INTERVAL
 
 See Also:
 Constant
 Field Values
@@ -545,7 +513,7 @@ implements org.apache.hadoop.util.Tool
 
 
 DEFAULT_TIMEOUT
-private static final long DEFAULT_TIMEOUT
+private static final long DEFAULT_TIMEOUT
 
 See Also:
 Constant
 Field Values
@@ -558,7 +526,7 @@ implements org.apache.hadoop.util.Tool
 
 
 MAX_THREADS_NUM
-private static final int MAX_THREADS_NUM
+private static final int MAX_THREADS_NUM
 
 See Also:
 Constant
 Field Values
@@ -571,7 +539,7 @@ implements org.apache.hadoop.util.Tool
 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -580,7 +548,7 @@ implements org.apache.hadoop.util.Tool
 
 
 DEFAULT_WRITE_TABLE_NAME
-public static final TableName DEFAULT_WRITE_TABLE_NAME
+public static final TableName DEFAULT_WRITE_TABLE_NAME
 
 
 
@@ -589,7 +557,7 @@ implements org.apache.hadoop.util.Tool
 
 
 CANARY_TABLE_FAMILY_NAME
-private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String CANARY_TABLE_FAMILY_NAME
+private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String CANARY_TABLE_FAMILY_NAME
 
 See Also:
 Constant
 Field Values
@@ -602,7 +570,7 @@ implements org.apache.hadoop.util.Tool
 
 
 conf
-private org.apache.hadoop.conf.Configuration conf
+private org.apache.hadoop.conf.Configuration conf
 
 
 
@@ -611,7 +579,7 @@ implements org.apache.hadoop.util.Tool
 
 
 interval
-private long interval
+private long interval
 
 
 
@@ -620,7 +588,7 @@ implements org.apache.hadoop.util.Tool
 
 
 sink
-private Can

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html
new file mode 100644
index 000..32185d0
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ChunkCreator.MemStoreChunkPool.html
@@ -0,0 +1,508 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+ChunkCreator.MemStoreChunkPool (Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver
+Class 
ChunkCreator.MemStoreChunkPool
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.regionserver.ChunkCreator.MemStoreChunkPool
+
+
+
+
+
+
+
+All Implemented Interfaces:
+HeapMemoryManager.HeapMemoryTuneObserver
+
+
+Enclosing class:
+ChunkCreator
+
+
+
+private class ChunkCreator.MemStoreChunkPool
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+implements HeapMemoryManager.HeapMemoryTuneObserver
+A pool of Chunk 
instances.
+
+ MemStoreChunkPool caches a number of retired chunks for reusing, it could
+ decrease allocating bytes when writing, thereby optimizing the garbage
+ collection on JVM.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes 
+
+Modifier and Type
+Class and Description
+
+
+private class 
+ChunkCreator.MemStoreChunkPool.StatisticsThread 
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
+chunkCount 
+
+
+private int
+maxCount 
+
+
+private float
+poolSizePercentage 
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true";
 title="class or interface in java.util.concurrent">BlockingQueue
+reclaimedChunks 
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
+reusedChunkCount 
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ScheduledExecutorService
+scheduleThreadPool
+Statistics thread schedule pool
+
+
+
+private static int
+statThreadPeriod
+Statistics thread
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+MemStoreChunkPool(int maxCount,
+ int initialCount,
+ float poolSizePercentage) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+(package private) Chunk
+getChunk()
+Poll a chunk from the pool, reset it if not null, else 
create a new chunk to return if we have
+ not yet created max allowed chunks count.
+
+
+
+private int
+getMaxCount() 
+
+
+void
+onHeapMemoryTune(long newMemstoreSize,
+long newBlockCacheSize)
+This method would be called by HeapMemoryManger when a heap 
memory tune action took place.
+
+
+
+private void
+putbackChunks(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetInteger> chunks)
+Add the chunks to the pool, when the pool achieves the max 
size, it will

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/TableNotEnabledException.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/TableNotEnabledException.html 
b/apidocs/org/apache/hadoop/hbase/TableNotEnabledException.html
deleted file mode 100644
index 645a51b..000
--- a/apidocs/org/apache/hadoop/hbase/TableNotEnabledException.html
+++ /dev/null
@@ -1,334 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-TableNotEnabledException (Apache HBase 2.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev Class
-Next Class
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-Summary: 
-Nested | 
-Field | 
-Constr | 
-Method
-
-
-Detail: 
-Field | 
-Constr | 
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase
-Class 
TableNotEnabledException
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">java.lang.Throwable
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">java.lang.Exception
-
-
-http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">java.io.IOException
-
-
-org.apache.hadoop.hbase.HBaseIOException
-
-
-org.apache.hadoop.hbase.DoNotRetryIOException
-
-
-org.apache.hadoop.hbase.TableNotEnabledException
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable
-
-
-
-@InterfaceAudience.Public
-public class TableNotEnabledException
-extends DoNotRetryIOException
-Thrown if a table should be enabled but is not
-
-See Also:
-Serialized
 Form
-
-
-
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors 
-
-Constructor and Description
-
-
-TableNotEnabledException()
-default constructor
-
-
-
-TableNotEnabledException(byte[] tableName) 
-
-
-TableNotEnabledException(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String s)
-Constructor
-
-
-
-TableNotEnabledException(TableName tableName) 
-
-
-
-
-
-
-
-
-
-Method Summary
-
-
-
-
-Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
-http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-";
 title="class or interface in java.lang">addSuppressed, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--";
 title="class or interface in java.lang">fillInStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getCause--";
 title="class or interface in java.lang">getCause, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--";
 title="class or interface in java.lang">getLocalizedMessage, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getMessage--";
 title="class or interface in java.lang">getMessage, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--";
 title="class or inter
 face in java.lang">getStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--";
 title="class or interface in java.lang">getSuppressed, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-";
 title="class or interface in java.lang">initCause, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace--";
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintStream-";
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintWriter-";
 title="class or in