[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/Table.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Table.html 
b/devapidocs/org/apache/hadoop/hbase/client/Table.html
index 201b071..35cba89 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Table.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Table.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":38,"i26":6,"i27":6,"i28":6,"i29":38,"i30":38,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":38,"i39":38,"i40":38,"i41":38,"i42":38};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":38,"i26":6,"i27":6,"i28":6,"i29":38,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":38,"i38":38,"i39":38,"i40":38};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -388,26 +388,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-long
-getWriteBufferSize()
-Deprecated.
-as of 1.0.1 (should not 
have been in 1.0.0). Replaced by BufferedMutator.getWriteBufferSize()
-
-
-
-
 int
 getWriteRpcTimeout()
 Get timeout (millisecond) of each rpc write request in this 
Table instance.
 
 
-
+
 Result
 increment(Incrementincrement)
 Increments one or more columns within a single row.
 
 
-
+
 long
 incrementColumnValue(byte[]row,
 byte[]family,
@@ -416,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 See incrementColumnValue(byte[],
 byte[], byte[], long, Durability)
 
 
-
+
 long
 incrementColumnValue(byte[]row,
 byte[]family,
@@ -426,25 +418,25 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 Atomically increments a column value.
 
 
-
+
 void
 mutateRow(RowMutationsrm)
 Performs multiple mutations atomically on a single 
row.
 
 
-
+
 void
 put(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
 Puts some data in the table, in batch.
 
 
-
+
 void
 put(Putput)
 Puts some data in the table.
 
 
-
+
 void
 setOperationTimeout(intoperationTimeout)
 Deprecated.
@@ -452,7 +444,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-
+
 void
 setReadRpcTimeout(intreadRpcTimeout)
 Deprecated.
@@ -460,7 +452,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-
+
 void
 setRpcTimeout(intrpcTimeout)
 Deprecated.
@@ -468,16 +460,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-
-void
-setWriteBufferSize(longwriteBufferSize)
-Deprecated.
-as of 1.0.1 (should not 
have been in 1.0.0). Replaced by BufferedMutator and
- BufferedMutatorParams.writeBufferSize(long)
-
-
-
-
+
 void
 setWriteRpcTimeout(intwriteRpcTimeout)
 Deprecated.
@@ -800,19 +783,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 put
-voidput(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
+voidput(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Puts some data in the table, in batch.
  
- This can be used for group commit, or for submitting user defined
- batches.  The writeBuffer will be periodically inspected while the List
- is processed, so depending on the List size the writeBuffer may flush
- not at all, or more than once.
+ This can be used for group commit, or for submitting user defined 
batches.
 
 Parameters:
-puts - The list of mutations to apply. The batch put is done 
by
- aggregating the iteration of the Puts over the write buffer
- at the client-side for a single RPC call.
+puts - The list of mutations to apply.
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - if a remote or 
network exception occurs.
 Since:
@@ -826,7 +804,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 checkAndPut
-booleancheckAndPut(byte[]row,
+booleancheckAndPut(byte[]row,
 byte[]family,
 byte[]qualifier,
 byte[]value,
@@ 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/coc.html
--
diff --git a/coc.html b/coc.html
index edaf4b9..d22d6e0 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-02
+  Last Published: 
2017-08-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 054cbd9..b7a52f1 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-02
+  Last Published: 
2017-08-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 094a434..6379409 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -527,7 +527,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-02
+  Last Published: 
2017-08-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 00e612d..f07ef52 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -290,10 +290,10 @@
 34
 
 Number of dependencies (NOD):
-289
+291
 
 Number of unique artifacts (NOA):
-311
+313
 
 Number of version-conflicting artifacts (NOC):
 15
@@ -345,13 +345,13 @@
 
 
 org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-com.google.guava:guava:jar:11.0.2:compile+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test - omitted for 
duplicate)\-org.apache.hadoop:hadoop-hdfs:test-jar:tests:2.7.1:test\-(com.google.guava:guava:jar:11.0.2:test - omitted 
for duplicate)
+org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-client:jar:2.7.1:compile|+-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile||\-(com.google.guava:guava:jar:11.0.2:compile 
- omitted for conflict with 14.0.1)|+-org.apache.hadoop:hadoop-mapreduce-client-app:jar:2.7.1:compile||\-org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.1:compile||+-org.apache.hadoop:hadoop-yarn-client:jar:2.7.1:compile|||\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for conflict with 14.0.1)||\-org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.1:compile||\-(com.google.guava:guava:jar:11.0.2:comp
 ile - omitted for conflict with 14.0.1)|+-org.apache.hadoop:hadoop-yarn-api:jar:2.7.1:compile||\-(com.google.guava:guava:jar:11.0.2:compile 
- omitted for conflict with 14.0.1)|\-org.apache.hadoop:hadoop-mapreduce-client-core:jar:2.7.1:compile|\-org.apache.hadoop:hadoop-yarn-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for conflict with 14.0.1)+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-com.google.guava:guava:jar:11.0.2:compile+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test - omitted for 
duplicate)\-org.apache.hadoop:hadoop-hdfs:test-jar:tests:2.7.1:test\-(com.google.guava:guava:jar:11.0.2:te
 st - omitted for duplicate)
 
 14.0.1
 
 
 org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT\-org.apache.spark:spark-core_2.10:jar:1.6.0:provided\-org.tachyonproject:tachyon-client:jar:0.8.2:provided+-(com.google.guava:guava:jar:14.0.1:provided
 - omitted for conflict with 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
index 35d5549..7f42873 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
@@ -115,2816 +115,2814 @@
 107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
 109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-150import 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 5c95397..860416b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -293,7944 +293,7962 @@
 285  final AtomicLong compactionsFailed = 
new AtomicLong(0L);
 286  final AtomicLong 
compactionNumFilesCompacted = new AtomicLong(0L);
 287  final AtomicLong 
compactionNumBytesCompacted = new AtomicLong(0L);
-288
-289  private final WAL wal;
-290  private final HRegionFileSystem fs;
-291  protected final Configuration conf;
-292  private final Configuration baseConf;
-293  private final int 
rowLockWaitDuration;
-294  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
-295
-296  // The internal wait duration to 
acquire a lock before read/update
-297  // from the region. It is not per row. 
The purpose of this wait time
-298  // is to avoid waiting a long time 
while the region is busy, so that
-299  // we can release the IPC handler soon 
enough to improve the
-300  // availability of the region server. 
It can be adjusted by
-301  // tuning configuration 
"hbase.busy.wait.duration".
-302  final long busyWaitDuration;
-303  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-304
-305  // If updating multiple rows in one 
call, wait longer,
-306  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-307  // we can limit the max multiplier.
-308  final int maxBusyWaitMultiplier;
-309
-310  // Max busy wait duration. There is no 
point to wait longer than the RPC
-311  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-312  final long maxBusyWaitDuration;
-313
-314  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-315  // in bytes
-316  final long maxCellSize;
-317
-318  // negative number indicates infinite 
timeout
-319  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-320  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-321
-322  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+288  final AtomicLong compactionsQueued = 
new AtomicLong(0L);
+289  final AtomicLong flushesQueued = new 
AtomicLong(0L);
+290
+291  private final WAL wal;
+292  private final HRegionFileSystem fs;
+293  protected final Configuration conf;
+294  private final Configuration baseConf;
+295  private final int 
rowLockWaitDuration;
+296  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
+297
+298  // The internal wait duration to 
acquire a lock before read/update
+299  // from the region. It is not per row. 
The purpose of this wait time
+300  // is to avoid waiting a long time 
while the region is busy, so that
+301  // we can release the IPC handler soon 
enough to improve the
+302  // availability of the region server. 
It can be adjusted by
+303  // tuning configuration 
"hbase.busy.wait.duration".
+304  final long busyWaitDuration;
+305  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+306
+307  // If updating multiple rows in one 
call, wait longer,
+308  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+309  // we can limit the max multiplier.
+310  final int maxBusyWaitMultiplier;
+311
+312  // Max busy wait duration. There is no 
point to wait longer than the RPC
+313  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+314  final long maxBusyWaitDuration;
+315
+316  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+317  // in bytes
+318  final long maxCellSize;
+319
+320  // negative number indicates infinite 
timeout
+321  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+322  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
 323
-324  /**
-325   * The sequence ID that was 
enLongAddered when this region was opened.
-326   */
-327  private long openSeqNum = 
HConstants.NO_SEQNUM;
-328
-329  /**
-330   * The default setting for whether to 
enable on-demand CF loading for
-331   * scan requests to this region. 
Requests can override it.
-332   */
-333  private boolean 
isLoadingCfsOnDemandDefault = false;
-334
-335  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-336  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
-337
-338  //
-339  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-340  // have to be conservative in how we 
replay wals. For 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.html
new file mode 100644
index 000..a7d4211
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/IncludeAllCompactionQueryMatcher.html
@@ -0,0 +1,384 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+IncludeAllCompactionQueryMatcher (Apache HBase 3.0.0-SNAPSHOT 
API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver.querymatcher
+Class 
IncludeAllCompactionQueryMatcher
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher
+
+
+org.apache.hadoop.hbase.regionserver.querymatcher.CompactionScanQueryMatcher
+
+
+org.apache.hadoop.hbase.regionserver.querymatcher.MinorCompactionScanQueryMatcher
+
+
+org.apache.hadoop.hbase.regionserver.querymatcher.IncludeAllCompactionQueryMatcher
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+ShipperListener
+
+
+
+@InterfaceAudience.Private
+public class IncludeAllCompactionQueryMatcher
+extends MinorCompactionScanQueryMatcher
+A compaction query matcher that always return INCLUDE and 
drops nothing.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher
+ScanQueryMatcher.MatchCode
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from 
classorg.apache.hadoop.hbase.regionserver.querymatcher.CompactionScanQueryMatcher
+deletes,
 keepDeletedCells,
 maxReadPointToTrackVersions
+
+
+
+
+
+Fields inherited from 
classorg.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher
+columns,
 currentRow,
 now,
 oldestUnexpiredTS,
 rowComparator,
 startKey
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+IncludeAllCompactionQueryMatcher(ScanInfoscanInfo,
+DeleteTrackerdeletes,
+ColumnTrackercolumns,
+longreadPointToUse,
+longoldestUnexpiredTS,
+longnow)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+ScanQueryMatcher.MatchCode
+match(Cellcell)
+Determines if the caller should do one of several things:
+ 
+ seek/skip to the next row (MatchCode.SEEK_NEXT_ROW)
+ seek/skip to the next column (MatchCode.SEEK_NEXT_COL)
+ include the current KeyValue (MatchCode.INCLUDE)
+ ignore the current KeyValue (MatchCode.SKIP)
+ got to the next row (MatchCode.DONE)
+ 
+
+
+
+
+
+
+
+Methods inherited from 
classorg.apache.hadoop.hbase.regionserver.querymatcher.CompactionScanQueryMatcher
+beforeShipped,
 create,
 getFilter,
 getNextKeyHint,
 hasNullColumnInQuery,
 isUserScan, 
moreRowsMayExistAfter,
 reset,
 trackDelete
+
+
+
+
+
+Methods inherited from 
classorg.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher
+checkColumn,
 checkDeleted,
 clearCurrentRow,
 compareKeyForNextColumn,
 compareK
 eyForNextRow, createStartKeyFromRow,
 currentRow,
 getKeyForNextColumn,
 getStartKey,
 getTrackers,
 preCheck,
 setToNewRow
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index eb9ae22..b0b5636 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -111,7 +111,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Private
  @InterfaceStability.Evolving
-public class ProcedureExecutorTEnvironment
+public class ProcedureExecutorTEnvironment
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Thread Pool that executes the submitted procedures.
  The executor has a ProcedureStore associated.
@@ -150,8 +150,16 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 private static class
+ProcedureExecutor.CompletedProcedureRetainer
+
+
+private static class
 ProcedureExecutor.DelayedProcedure
 
+
+private static class
+ProcedureExecutor.FailedProcedureTEnvironment
+
 
 private static class
 ProcedureExecutor.InlineChore
@@ -210,7 +218,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 checkOwnerSet
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,ProcedureInfo
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,ProcedureExecutor.CompletedProcedureRetainer
 completed
 Map the the procId returned by submitProcedure(), the 
Root-ProcID, to the ProcedureInfo.
 
@@ -447,11 +455,11 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 getProcedure(longprocId)
 
 
-ProcedureInfo
+Procedure
 getResult(longprocId)
 
 
-PairProcedureInfo,Procedure
+Procedure
 getResultOrProcedure(longprocId)
 
 
@@ -509,7 +517,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 join()
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListProcedureInfo
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListProcedure
 listProcedures()
 List procedures.
 
@@ -675,7 +683,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -684,7 +692,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CHECK_OWNER_SET_CONF_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CHECK_OWNER_SET_CONF_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CHECK_OWNER_SET_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -697,7 +705,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DEFAULT_CHECK_OWNER_SET
-private static finalboolean DEFAULT_CHECK_OWNER_SET
+private static finalboolean DEFAULT_CHECK_OWNER_SET
 
 See Also:
 Constant
 Field Values
@@ -710,7 +718,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 WORKER_KEEP_ALIVE_TIME_CONF_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WORKER_KEEP_ALIVE_TIME_CONF_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WORKER_KEEP_ALIVE_TIME_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -723,7 +731,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DEFAULT_WORKER_KEEP_ALIVE_TIME
-private static finallong DEFAULT_WORKER_KEEP_ALIVE_TIME
+private static finallong DEFAULT_WORKER_KEEP_ALIVE_TIME
 
 See Also:
 Constant
 Field Values
@@ -736,7 +744,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testing
-ProcedureExecutor.Testing testing
+ProcedureExecutor.Testing testing
 
 
 
@@ -745,7 +753,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 completed
-private 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index 6c200a1..e6f8c2e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -1350,726 +1350,719 @@
 1342  }
 1343
 1344  @Override
-1345  public 
MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions(
-1346  RpcController controller, 
MasterProtos.DispatchMergingRegionsRequest request)
-1347  throws ServiceException {
-1348return 
stub.dispatchMergingRegions(controller, request);
-1349  }
-1350
-1351  @Override
-1352  public 
MasterProtos.AssignRegionResponse assignRegion(RpcController controller,
-1353  
MasterProtos.AssignRegionRequest request) throws ServiceException {
-1354return 
stub.assignRegion(controller, request);
-1355  }
-1356
-1357  @Override
-1358  public 
MasterProtos.UnassignRegionResponse unassignRegion(RpcController controller,
-1359  
MasterProtos.UnassignRegionRequest request) throws ServiceException {
-1360return 
stub.unassignRegion(controller, request);
-1361  }
-1362
-1363  @Override
-1364  public 
MasterProtos.OfflineRegionResponse offlineRegion(RpcController controller,
-1365  
MasterProtos.OfflineRegionRequest request) throws ServiceException {
-1366return 
stub.offlineRegion(controller, request);
-1367  }
-1368
-1369  @Override
-1370  public 
MasterProtos.SplitTableRegionResponse splitRegion(RpcController controller,
-1371  
MasterProtos.SplitTableRegionRequest request) throws ServiceException {
-1372return 
stub.splitRegion(controller, request);
-1373  }
-1374
-1375  @Override
-1376  public 
MasterProtos.DeleteTableResponse deleteTable(RpcController controller,
-1377  
MasterProtos.DeleteTableRequest request) throws ServiceException {
-1378return 
stub.deleteTable(controller, request);
-1379  }
-1380
-1381  @Override
-1382  public 
MasterProtos.TruncateTableResponse truncateTable(RpcController controller,
-1383  
MasterProtos.TruncateTableRequest request) throws ServiceException {
-1384return 
stub.truncateTable(controller, request);
-1385  }
-1386
-1387  @Override
-1388  public 
MasterProtos.EnableTableResponse enableTable(RpcController controller,
-1389  
MasterProtos.EnableTableRequest request) throws ServiceException {
-1390return 
stub.enableTable(controller, request);
-1391  }
-1392
-1393  @Override
-1394  public 
MasterProtos.DisableTableResponse disableTable(RpcController controller,
-1395  
MasterProtos.DisableTableRequest request) throws ServiceException {
-1396return 
stub.disableTable(controller, request);
-1397  }
-1398
-1399  @Override
-1400  public 
MasterProtos.ModifyTableResponse modifyTable(RpcController controller,
-1401  
MasterProtos.ModifyTableRequest request) throws ServiceException {
-1402return 
stub.modifyTable(controller, request);
-1403  }
-1404
-1405  @Override
-1406  public 
MasterProtos.CreateTableResponse createTable(RpcController controller,
-1407  
MasterProtos.CreateTableRequest request) throws ServiceException {
-1408return 
stub.createTable(controller, request);
-1409  }
-1410
-1411  @Override
-1412  public 
MasterProtos.ShutdownResponse shutdown(RpcController controller,
-1413  MasterProtos.ShutdownRequest 
request) throws ServiceException {
-1414return stub.shutdown(controller, 
request);
-1415  }
-1416
-1417  @Override
-1418  public 
MasterProtos.StopMasterResponse stopMaster(RpcController controller,
-1419  MasterProtos.StopMasterRequest 
request) throws ServiceException {
-1420return 
stub.stopMaster(controller, request);
+1345  public 
MasterProtos.AssignRegionResponse assignRegion(RpcController controller,
+1346  
MasterProtos.AssignRegionRequest request) throws ServiceException {
+1347return 
stub.assignRegion(controller, request);
+1348  }
+1349
+1350  @Override
+1351  public 
MasterProtos.UnassignRegionResponse unassignRegion(RpcController controller,
+1352  
MasterProtos.UnassignRegionRequest request) throws ServiceException {
+1353return 
stub.unassignRegion(controller, request);
+1354  }
+1355
+1356  @Override
+1357  public 
MasterProtos.OfflineRegionResponse 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
index f26f8aa..8f0943d 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
@@ -284,944 +284,951 @@
 276this.mvccReadPoint = 
scan.getMvccReadPoint();
 277this.limit = scan.getLimit();
 278this.needCursorResult = 
scan.isNeedCursorResult();
-279  }
-280
-281  /**
-282   * Builds a scan object with the same 
specs as get.
-283   * @param get get to model scan after
-284   */
-285  public Scan(Get get) {
-286this.startRow = get.getRow();
-287this.includeStartRow = true;
-288this.stopRow = get.getRow();
-289this.includeStopRow = true;
-290this.filter = get.getFilter();
-291this.cacheBlocks = 
get.getCacheBlocks();
-292this.maxVersions = 
get.getMaxVersions();
-293this.storeLimit = 
get.getMaxResultsPerColumnFamily();
-294this.storeOffset = 
get.getRowOffsetPerColumnFamily();
-295this.tr = get.getTimeRange();
-296this.familyMap = 
get.getFamilyMap();
-297this.asyncPrefetch = false;
-298this.consistency = 
get.getConsistency();
-299
this.setIsolationLevel(get.getIsolationLevel());
-300this.loadColumnFamiliesOnDemand = 
get.getLoadColumnFamiliesOnDemandValue();
-301for (Map.EntryString, byte[] 
attr : get.getAttributesMap().entrySet()) {
-302  setAttribute(attr.getKey(), 
attr.getValue());
-303}
-304for (Map.Entrybyte[], 
TimeRange entry : get.getColumnFamilyTimeRange().entrySet()) {
-305  TimeRange tr = entry.getValue();
-306  
setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
-307}
-308this.mvccReadPoint = -1L;
-309  }
-310
-311  public boolean isGetScan() {
-312return includeStartRow  
includeStopRow
-313 
ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow);
-314  }
-315
-316  /**
-317   * Get all columns from the specified 
family.
-318   * p
-319   * Overrides previous calls to 
addColumn for this family.
-320   * @param family family name
-321   * @return this
-322   */
-323  public Scan addFamily(byte [] family) 
{
-324familyMap.remove(family);
-325familyMap.put(family, null);
-326return this;
-327  }
-328
-329  /**
-330   * Get the column from the specified 
family with the specified qualifier.
-331   * p
-332   * Overrides previous calls to 
addFamily for this family.
-333   * @param family family name
-334   * @param qualifier column qualifier
-335   * @return this
-336   */
-337  public Scan addColumn(byte [] family, 
byte [] qualifier) {
-338NavigableSetbyte [] set = 
familyMap.get(family);
-339if(set == null) {
-340  set = new 
TreeSet(Bytes.BYTES_COMPARATOR);
-341}
-342if (qualifier == null) {
-343  qualifier = 
HConstants.EMPTY_BYTE_ARRAY;
-344}
-345set.add(qualifier);
-346familyMap.put(family, set);
-347return this;
-348  }
-349
-350  /**
-351   * Set versions of columns only within 
the specified timestamp range,
-352   * [minStamp, maxStamp).  Note, default 
maximum versions to return is 1.  If
-353   * your time range spans more than one 
version and you want all versions
-354   * returned, up the number of versions 
beyond the default.
-355   * @param minStamp minimum timestamp 
value, inclusive
-356   * @param maxStamp maximum timestamp 
value, exclusive
-357   * @see #setMaxVersions()
-358   * @see #setMaxVersions(int)
-359   * @return this
-360   */
-361  @Override
-362  public Scan setTimeRange(long minStamp, 
long maxStamp) throws IOException {
-363return (Scan) 
super.setTimeRange(minStamp, maxStamp);
-364  }
-365
-366  /**
-367   * Set versions of columns only within 
the specified timestamp range,
-368   * @param tr Input TimeRange
-369   * @return this for invocation 
chaining
-370   */
-371  public Scan setTimeRange(TimeRange tr) 
{
-372return (Scan) 
super.setTimeRange(tr);
-373  }
-374
-375  /**
-376   * Get versions of columns with the 
specified timestamp. Note, default maximum
-377   * versions to return is 1.  If your 
time range spans more than one version
-378   * and you want all versions returned, 
up the number of versions beyond the
-379   * defaut.
-380   * @param timestamp version timestamp
-381   * @see #setMaxVersions()
-382   * @see #setMaxVersions(int)
-383   * @return this
-384   */
-385  public Scan setTimeStamp(long 
timestamp)
-386  throws IOException {
-387try {
-388  super.setTimeRange(timestamp, 
timestamp + 1);
-389} catch(Exception e) {
-390  // This should never happen, unless 
integer overflow or something extremely wrong...
-391  LOG.error("TimeRange failed, likely 
caused by integer overflow. ", e);
-392  throw e;
-393  

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index 0d66ce8..04c172d 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -273,12 +273,12 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
-org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
 org.apache.hadoop.hbase.io.hfile.BlockType
-org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
+org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
+org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
 org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
 org.apache.hadoop.hbase.io.hfile.BlockPriority
+org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
index 82b1548..9db3972 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
@@ -213,7 +213,7 @@ implements compressor
 
 
-private static 
com.google.common.cache.LoadingCachehttp://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddress,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger
+private static 
org.apache.hadoop.hbase.shaded.com.google.common.cache.LoadingCachehttp://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddress,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger
 concurrentCounterCache
 
 
@@ -746,7 +746,7 @@ implements 
 
 concurrentCounterCache
-private static finalcom.google.common.cache.LoadingCachehttp://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddress,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger concurrentCounterCache
+private static 
finalorg.apache.hadoop.hbase.shaded.com.google.common.cache.LoadingCachehttp://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddress,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger concurrentCounterCache
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/ipc/RpcClientFactory.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/RpcClientFactory.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/RpcClientFactory.html
index 5bbb857..4d7285f 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/RpcClientFactory.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/RpcClientFactory.html
@@ -136,7 +136,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 CUSTOM_RPC_CLIENT_IMPL_CONF_KEY
 
 
-private static 
com.google.common.collect.ImmutableMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+private static 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
index 53cae9a..64d0880 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
@@ -1034,289 +1034,283 @@
 1026  protected ListLoadQueueItem 
tryAtomicRegionLoad(ClientServiceCallablebyte[] serviceCallable,
 1027  final TableName tableName, final 
byte[] first, final CollectionLoadQueueItem lqis)
 1028  throws IOException {
-1029final ListPairbyte[], 
String famPaths = new ArrayList(lqis.size());
-1030for (LoadQueueItem lqi : lqis) {
-1031  if 
(!unmatchedFamilies.contains(Bytes.toString(lqi.family))) {
-1032
famPaths.add(Pair.newPair(lqi.family, lqi.hfilePath.toString()));
-1033  }
-1034}
-1035try {
-1036  ListLoadQueueItem toRetry 
= new ArrayList();
-1037  Configuration conf = getConf();
-1038  byte[] region = 
RpcRetryingCallerFactory.instantiate(conf,
-1039  null).byte[] 
newCaller()
-1040  
.callWithRetries(serviceCallable, Integer.MAX_VALUE);
-1041  if (region == null) {
-1042LOG.warn("Attempt to bulk load 
region containing "
-1043+ 
Bytes.toStringBinary(first) + " into table "
-1044+ tableName  + " with files 
" + lqis
-1045+ " failed.  This is 
recoverable and they will be retried.");
-1046toRetry.addAll(lqis); // return 
lqi's to retry
-1047  }
-1048  // success
-1049  return toRetry;
-1050} catch (IOException e) {
-1051  LOG.error("Encountered 
unrecoverable error from region server, additional details: "
-1052  + 
serviceCallable.getExceptionMessageAdditionalDetail(), e);
-1053  throw e;
-1054}
-1055  }
-1056
-1057  private final String 
toString(ListPairbyte[], String list) {
-1058StringBuffer sb = new 
StringBuffer();
-1059sb.append("[");
-1060if(list != null){
-1061  for(Pairbyte[], String 
pair: list) {
-1062sb.append("{");
-1063
sb.append(Bytes.toStringBinary(pair.getFirst()));
-1064sb.append(",");
-1065sb.append(pair.getSecond());
-1066sb.append("}");
-1067  }
-1068}
-1069sb.append("]");
-1070return sb.toString();
-1071  }
-1072  private boolean 
isSecureBulkLoadEndpointAvailable() {
-1073String classes = 
getConf().get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
-1074return 
classes.contains("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
-1075  }
-1076
-1077  /**
-1078   * Split a storefile into a top and 
bottom half, maintaining
-1079   * the metadata, recreating bloom 
filters, etc.
-1080   */
-1081  static void splitStoreFile(
-1082  Configuration conf, Path inFile,
-1083  HColumnDescriptor familyDesc, 
byte[] splitKey,
-1084  Path bottomOut, Path topOut) 
throws IOException {
-1085// Open reader with no block cache, 
and not in-memory
-1086Reference topReference = 
Reference.createTopReference(splitKey);
-1087Reference bottomReference = 
Reference.createBottomReference(splitKey);
-1088
-1089copyHFileHalf(conf, inFile, topOut, 
topReference, familyDesc);
-1090copyHFileHalf(conf, inFile, 
bottomOut, bottomReference, familyDesc);
-1091  }
-1092
-1093  /**
-1094   * Copy half of an HFile into a new 
HFile.
-1095   */
-1096  private static void copyHFileHalf(
-1097  Configuration conf, Path inFile, 
Path outFile, Reference reference,
-1098  HColumnDescriptor 
familyDescriptor)
-1099  throws IOException {
-1100FileSystem fs = 
inFile.getFileSystem(conf);
-1101CacheConfig cacheConf = new 
CacheConfig(conf);
-1102HalfStoreFileReader halfReader = 
null;
-1103StoreFileWriter halfWriter = null;
-1104try {
-1105  halfReader = new 
HalfStoreFileReader(fs, inFile, cacheConf, reference, true,
-1106  new AtomicInteger(0), true, 
conf);
-1107  Mapbyte[], byte[] fileInfo 
= halfReader.loadFileInfo();
-1108
-1109  int blocksize = 
familyDescriptor.getBlocksize();
-1110  Algorithm compression = 
familyDescriptor.getCompressionType();
-  BloomType bloomFilterType = 
familyDescriptor.getBloomFilterType();
-1112  HFileContext hFileContext = new 
HFileContextBuilder()
-1113  
.withCompression(compression)
-1114  
.withChecksumType(HStore.getChecksumType(conf))
-1115  
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
-1116  

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
index 09be381..e1de213 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MergeTableRegionsProcedure
+public class MergeTableRegionsProcedure
 extends AbstractStateMachineTableProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState
 The procedure to Merge a region in a table.
  This procedure takes an exclusive table lock since it is working over 
multiple regions.
@@ -556,7 +556,7 @@ extends 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -565,7 +565,7 @@ extends 
 
 traceEnabled
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean traceEnabled
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean traceEnabled
 
 
 
@@ -574,7 +574,7 @@ extends 
 
 lock
-private volatileboolean lock
+private volatileboolean lock
 
 
 
@@ -583,7 +583,7 @@ extends 
 
 regionLocation
-privateServerName regionLocation
+privateServerName regionLocation
 
 
 
@@ -592,7 +592,7 @@ extends 
 
 regionsToMerge
-privateHRegionInfo[] regionsToMerge
+privateHRegionInfo[] regionsToMerge
 
 
 
@@ -601,7 +601,7 @@ extends 
 
 mergedRegion
-privateHRegionInfo mergedRegion
+privateHRegionInfo mergedRegion
 
 
 
@@ -610,7 +610,7 @@ extends 
 
 forcible
-privateboolean forcible
+privateboolean forcible
 
 
 
@@ -627,7 +627,7 @@ extends 
 
 MergeTableRegionsProcedure
-publicMergeTableRegionsProcedure()
+publicMergeTableRegionsProcedure()
 
 
 
@@ -636,7 +636,7 @@ extends 
 
 MergeTableRegionsProcedure
-publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
+publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
   HRegionInforegionToMergeA,
   HRegionInforegionToMergeB)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -652,7 +652,7 @@ extends 
 
 MergeTableRegionsProcedure
-publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
+publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
   HRegionInforegionToMergeA,
   HRegionInforegionToMergeB,
   booleanforcible)
@@ -669,7 +669,7 @@ extends 
 
 MergeTableRegionsProcedure
-publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
+publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
   HRegionInfo[]regionsToMerge,
   booleanforcible)
throws MergeRegionException
@@ -693,7 +693,7 @@ extends 
 
 checkRegionsToMerge
-private staticvoidcheckRegionsToMerge(HRegionInfo[]regionsToMerge,
+private staticvoidcheckRegionsToMerge(HRegionInfo[]regionsToMerge,
 booleanforcible)
  throws MergeRegionException
 
@@ -708,7 +708,7 @@ extends 
 
 checkRegionsToMerge
-private staticvoidcheckRegionsToMerge(HRegionInforegionToMergeA,
+private staticvoidcheckRegionsToMerge(HRegionInforegionToMergeA,
 HRegionInforegionToMergeB,
 booleanforcible)
  throws MergeRegionException
@@ -724,7 +724,7 @@ extends 
 
 createMergedRegionInfo
-private staticHRegionInfocreateMergedRegionInfo(HRegionInfo[]regionsToMerge)
+private staticHRegionInfocreateMergedRegionInfo(HRegionInfo[]regionsToMerge)
 
 
 
@@ -733,7 +733,7 @@ extends 
 
 createMergedRegionInfo
-private staticHRegionInfocreateMergedRegionInfo(HRegionInforegionToMergeA,
+private staticHRegionInfocreateMergedRegionInfo(HRegionInforegionToMergeA,
   HRegionInforegionToMergeB)
 Create merged region info through the specified two 
regions
 
@@ -744,7 +744,7 @@ extends 
 
 getMergedRegionIdTimestamp
-private staticlonggetMergedRegionIdTimestamp(HRegionInforegionToMergeA,
+private staticlonggetMergedRegionIdTimestamp(HRegionInforegionToMergeA,
HRegionInforegionToMergeB)
 
 
@@ -754,7 +754,7 @@ 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
index 0a32350..cf44d69 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
@@ -75,735 +75,796 @@
 067import 
org.apache.hadoop.conf.Configuration;
 068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
 069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.FileSystem;
-071import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-072import org.apache.hadoop.fs.Path;
-073import 
org.apache.hadoop.fs.UnresolvedLinkException;
-074import 
org.apache.hadoop.fs.permission.FsPermission;
-075import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-076import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-077import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-078import 
org.apache.hadoop.hbase.util.FSUtils;
-079import 
org.apache.hadoop.hdfs.DFSClient;
-080import 
org.apache.hadoop.hdfs.DFSOutputStream;
-081import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-082import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-083import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-084import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-085import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-086import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-088import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-092import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-093import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-100import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-102import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-103import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-104import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-105import 
org.apache.hadoop.io.EnumSetWritable;
-106import 
org.apache.hadoop.ipc.RemoteException;
-107import org.apache.hadoop.net.NetUtils;
-108import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-109import 
org.apache.hadoop.security.token.Token;
-110import 
org.apache.hadoop.util.DataChecksum;
-111
-112/**
-113 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-114 */
-115@InterfaceAudience.Private
-116public final class 
FanOutOneBlockAsyncDFSOutputHelper {
-117
-118  private static final Log LOG = 
LogFactory.getLog(FanOutOneBlockAsyncDFSOutputHelper.class);
-119
-120  private 
FanOutOneBlockAsyncDFSOutputHelper() {
-121  }
-122
-123  // use pooled allocator for 
performance.
-124  private static final ByteBufAllocator 
ALLOC = PooledByteBufAllocator.DEFAULT;
-125
-126  // copied from DFSPacket since it is 
package private.
-127  public static final long 
HEART_BEAT_SEQNO = -1L;
-128
-129  // Timeouts for communicating with 
DataNode for streaming writes/reads
-130  public static final int READ_TIMEOUT = 
60 * 1000;
-131  public static final int 
READ_TIMEOUT_EXTENSION = 5 * 1000;
-132  public static final int WRITE_TIMEOUT = 
8 * 60 * 1000;
-133
-134  // helper class for getting Status from 
PipelineAckProto. In hadoop 2.6 or before, there is a
-135  // getStatus method, and for hadoop 2.7 
or after, the status is retrieved from flag. The flag may
-136  // get from proto directly, or combined 
by the reply field of the proto and a ECN object. See
-137  // createPipelineAckStatusGetter for 
more details.
-138  private interface 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.html
index 4262b8f..0a3a353 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.html
@@ -53,81 +53,83 @@
 045this.registry = registry;
 046  }
 047
-048  
CompletableFutureHRegionLocation getRegionLocation() {
+048  
CompletableFutureHRegionLocation getRegionLocation(boolean reload) {
 049for (;;) {
-050  HRegionLocation metaRegionLocation 
= this.metaRegionLocation.get();
-051  if (metaRegionLocation != null) {
-052return 
CompletableFuture.completedFuture(metaRegionLocation);
-053  }
-054  if (LOG.isTraceEnabled()) {
-055LOG.trace("Meta region location 
cache is null, try fetching from registry.");
-056  }
-057  if 
(metaRelocateFuture.compareAndSet(null, new CompletableFuture())) {
-058if (LOG.isDebugEnabled()) {
-059  LOG.debug("Start fetching meta 
region location from registry.");
-060}
-061
CompletableFutureHRegionLocation future = metaRelocateFuture.get();
-062
registry.getMetaRegionLocation().whenComplete((locs, error) - {
-063  if (error != null) {
-064if (LOG.isDebugEnabled()) {
-065  LOG.debug("Failed to fetch 
meta region location from registry", error);
-066}
-067
metaRelocateFuture.getAndSet(null).completeExceptionally(error);
-068return;
-069  }
-070  HRegionLocation loc = 
locs.getDefaultRegionLocation();
-071  if (LOG.isDebugEnabled()) {
-072LOG.debug("The fetched meta 
region location is " + loc);
-073  }
-074  // Here we update cache before 
reset future, so it is possible that someone can get a
-075  // stale value. Consider 
this:
-076  // 1. update cache
-077  // 2. someone clear the cache 
and relocate again
-078  // 3. the metaRelocateFuture is 
not null so the old future is used.
-079  // 4. we clear 
metaRelocateFuture and complete the future in it with the value being
-080  // cleared in step 2.
-081  // But we do not think it is a 
big deal as it rarely happens, and even if it happens, the
-082  // caller will retry again 
later, no correctness problems.
-083  
this.metaRegionLocation.set(loc);
-084  metaRelocateFuture.set(null);
-085  future.complete(loc);
-086});
-087  } else {
-088
CompletableFutureHRegionLocation future = metaRelocateFuture.get();
-089if (future != null) {
-090  return future;
-091}
-092  }
-093}
-094  }
-095
-096  void 
updateCachedLocation(HRegionLocation loc, Throwable exception) {
-097
AsyncRegionLocator.updateCachedLocation(loc, exception, l - 
metaRegionLocation.get(),
-098  newLoc - {
-099for (;;) {
-100  HRegionLocation oldLoc = 
metaRegionLocation.get();
-101  if (oldLoc != null  
(oldLoc.getSeqNum()  newLoc.getSeqNum() ||
-102  
oldLoc.getServerName().equals(newLoc.getServerName( {
-103return;
-104  }
-105  if 
(metaRegionLocation.compareAndSet(oldLoc, newLoc)) {
-106return;
-107  }
-108}
-109  }, l - {
-110for (;;) {
-111  HRegionLocation oldLoc = 
metaRegionLocation.get();
-112  if (!canUpdate(l, oldLoc) || 
metaRegionLocation.compareAndSet(oldLoc, null)) {
-113return;
-114  }
-115}
-116  });
-117  }
-118
-119  void clearCache() {
-120metaRegionLocation.set(null);
-121  }
-122}
+050  if (!reload) {
+051HRegionLocation 
metaRegionLocation = this.metaRegionLocation.get();
+052if (metaRegionLocation != null) 
{
+053  return 
CompletableFuture.completedFuture(metaRegionLocation);
+054}
+055  }
+056  if (LOG.isTraceEnabled()) {
+057LOG.trace("Meta region location 
cache is null, try fetching from registry.");
+058  }
+059  if 
(metaRelocateFuture.compareAndSet(null, new CompletableFuture())) {
+060if (LOG.isDebugEnabled()) {
+061  LOG.debug("Start fetching meta 
region location from registry.");
+062}
+063
CompletableFutureHRegionLocation future = metaRelocateFuture.get();
+064
registry.getMetaRegionLocation().whenComplete((locs, error) - {
+065  if (error != null) {
+066if (LOG.isDebugEnabled()) {
+067  LOG.debug("Failed to fetch 
meta region location from registry", error);
+068}
+069

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
index 71844ce..75db22d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
@@ -105,2564 +105,2642 @@
 097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
 099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-140import 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
index f5bc73a..feb42ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
@@ -4044,345 +4044,330 @@
 4036
 4037  @Override
 4038  public void 
drainRegionServers(ListServerName servers) throws IOException {
-4039final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4040for (ServerName server : servers) 
{
-4041  // Parse to ServerName to do 
simple validation.
-4042  
ServerName.parseServerName(server.toString());
-4043  
pbServers.add(ProtobufUtil.toServerName(server));
-4044}
-4045
-4046executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4047  @Override
-4048  public Void rpcCall() throws 
ServiceException {
-4049DrainRegionServersRequest req 
=
-4050
DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
-4051
master.drainRegionServers(getRpcController(), req);
-4052return null;
-4053  }
-4054});
-4055  }
-4056
-4057  @Override
-4058  public ListServerName 
listDrainingRegionServers() throws IOException {
-4059return executeCallable(new 
MasterCallableListServerName(getConnection(),
-4060  getRpcControllerFactory()) 
{
-4061  @Override
-4062  public ListServerName 
rpcCall() throws ServiceException {
-4063ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
-4064ListServerName servers = 
new ArrayList();
-4065for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
-4066.getServerNameList()) {
-4067  
servers.add(ProtobufUtil.toServerName(server));
-4068}
-4069return servers;
-4070  }
-4071});
-4072  }
-4073
-4074  @Override
-4075  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
-4076final 
ListHBaseProtos.ServerName pbServers = new 
ArrayList(servers.size());
-4077for (ServerName server : servers) 
{
-4078  
pbServers.add(ProtobufUtil.toServerName(server));
-4079}
-4080
-4081executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-4082  @Override
-4083  public Void rpcCall() throws 
ServiceException {
-4084
RemoveDrainFromRegionServersRequest req = 
RemoveDrainFromRegionServersRequest.newBuilder()
-4085
.addAllServerName(pbServers).build();
-4086
master.removeDrainFromRegionServers(getRpcController(), req);
-4087return null;
+4039executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4040  @Override
+4041  public Void rpcCall() throws 
ServiceException {
+4042
master.drainRegionServers(getRpcController(),
+4043  
RequestConverter.buildDrainRegionServersRequest(servers));
+4044return null;
+4045  }
+4046});
+4047  }
+4048
+4049  @Override
+4050  public ListServerName 
listDrainingRegionServers() throws IOException {
+4051return executeCallable(new 
MasterCallableListServerName(getConnection(),
+4052  getRpcControllerFactory()) 
{
+4053  @Override
+4054  public ListServerName 
rpcCall() throws ServiceException {
+4055ListDrainingRegionServersRequest 
req = ListDrainingRegionServersRequest.newBuilder().build();
+4056ListServerName servers = 
new ArrayList();
+4057for (HBaseProtos.ServerName 
server : master.listDrainingRegionServers(null, req)
+4058.getServerNameList()) {
+4059  
servers.add(ProtobufUtil.toServerName(server));
+4060}
+4061return servers;
+4062  }
+4063});
+4064  }
+4065
+4066  @Override
+4067  public void 
removeDrainFromRegionServers(ListServerName servers) throws IOException 
{
+4068executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
+4069  @Override
+4070  public Void rpcCall() throws 
ServiceException {
+4071
master.removeDrainFromRegionServers(getRpcController(), 
RequestConverter.buildRemoveDrainFromRegionServersRequest(servers));
+4072return null;
+4073  }
+4074});
+4075  }
+4076
+4077  @Override
+4078  public ListTableCFs 
listReplicatedTableCFs() throws IOException {
+4079ListTableCFs 
replicatedTableCFs = new ArrayList();
+4080HTableDescriptor[] tables = 
listTables();
+4081for (HTableDescriptor table : 
tables) {
+4082  HColumnDescriptor[] 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
index 290ffe3..986726c 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
@@ -238,7 +238,7 @@
 230  int reduceNum = 
context.getNumReduceTasks();
 231  Configuration conf = 
context.getConfiguration();
 232  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-233  try (Connection conn = 
ConnectionFactory.createConnection(conf); 
+233  try (Connection conn = 
ConnectionFactory.createConnection(conf);
 234  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
 235byte[][] startKeys = 
regionLocator.getStartKeys();
 236if (startKeys.length != 
reduceNum) {
@@ -630,10 +630,10 @@
 622
 623if (hfileOutPath != null  
conf.getBoolean(HAS_LARGE_RESULT, false)) {
 624  LOG.info("Use Large Result!!");
-625  try (Connection conn = 
ConnectionFactory.createConnection(conf); 
+625  try (Connection conn = 
ConnectionFactory.createConnection(conf);
 626  Table table = 
conn.getTable(tableName);
 627  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-628
HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), 
regionLocator);
+628
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), 
regionLocator);
 629
job.setMapperClass(KeyValueSortImporter.class);
 630
job.setReducerClass(KeyValueReducer.class);
 631Path outputDir = new 
Path(hfileOutPath);
@@ -663,7 +663,7 @@
 655
FileOutputFormat.setOutputPath(job, outputDir);
 656
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
 657
job.setMapOutputValueClass(KeyValue.class);
-658
HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), 
regionLocator);
+658
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), 
regionLocator);
 659
TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
 660
com.google.common.base.Preconditions.class);
 661  }



[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
index 249d4a0..7369fdf 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
@@ -65,12 +65,12 @@
 057import 
com.google.common.base.Preconditions;
 058
 059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
+060 * Cacheable Blocks of an {@link HFile} 
version 2 file.
+061 * Version 2 was introduced in 
hbase-0.92.0.
+062 *
+063 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
+064 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857). Support
+065 * for Version 1 was removed in 
hbase-1.3.0.
 066 *
 067 * h3HFileBlock: Version 
2/h3
 068 * In version 2, a block is structured as 
follows:
@@ -120,582 +120,582 @@
 112public class HFileBlock implements 
Cacheable {
 113  private static final Log LOG = 
LogFactory.getLog(HFileBlock.class);
 114
-115  /** Type of block. Header field 0. */
-116  private BlockType blockType;
-117
-118  /**
-119   * Size on disk excluding header, 
including checksum. Header field 1.
-120   * @see Writer#putHeader(byte[], int, 
int, int, int)
-121   */
-122  private int onDiskSizeWithoutHeader;
-123
-124  /**
-125   * Size of pure data. Does not include 
header or checksums. Header field 2.
-126   * @see Writer#putHeader(byte[], int, 
int, int, int)
-127   */
-128  private int 
uncompressedSizeWithoutHeader;
-129
-130  /**
-131   * The offset of the previous block on 
disk. Header field 3.
-132   * @see Writer#putHeader(byte[], int, 
int, int, int)
-133   */
-134  private long prevBlockOffset;
-135
-136  /**
-137   * Size on disk of header + data. 
Excludes checksum. Header field 6,
-138   * OR calculated from {@link 
#onDiskSizeWithoutHeader} when using HDFS checksum.
-139   * @see Writer#putHeader(byte[], int, 
int, int, int)
-140   */
-141  private int onDiskDataSizeWithHeader;
-142
-143
-144  /**
-145   * The in-memory representation of the 
hfile block. Can be on or offheap. Can be backed by
-146   * a single ByteBuffer or by many. Make 
no assumptions.
-147   *
-148   * pBe careful reading from 
this codebuf/code. Duplicate and work on the duplicate or if
-149   * not, be sure to reset position and 
limit else trouble down the road.
-150   *
-151   * pTODO: Make this read-only 
once made.
-152   *
-153   * pWe are using the ByteBuff 
type. ByteBuffer is not extensible yet we need to be able to have
-154   * a ByteBuffer-like API across 
multiple ByteBuffers reading from a cache such as BucketCache.
-155   * So, we have this ByteBuff type. 
Unfortunately, it is spread all about HFileBlock. Would be
-156   * good if could be confined to 
cache-use only but hard-to-do.
-157   */
-158  private ByteBuff buf;
-159
-160  /** Meta data that holds meta 
information on the hfileblock.
-161   */
-162  private HFileContext fileContext;
-163
-164  /**
-165   * The offset of this block in the 
file. Populated by the reader for
-166   * convenience of access. This offset 
is not part of the block header.
-167   */
-168  private long offset = UNSET;
-169
-170  private MemoryType memType = 
MemoryType.EXCLUSIVE;
-171
-172  /**
-173   * The on-disk size of the next block, 
including the header and checksums if present, obtained by
-174   * peeking into the first {@link 
HConstants#HFILEBLOCK_HEADER_SIZE} bytes of the next block's
-175   * header, or UNSET if unknown.
-176   *
-177   * Blocks try to carry the size of the 
next block to read in this data member. They will even have
-178   * this value when served from cache. 
Could save a seek in the case where we are iterating through
-179   * a file and some of the blocks come 
from cache. If from cache, then having this info to hand
-180   * will save us doing a seek to read 
the header so we can read the body of a block.
-181   * TODO: see how effective this is at 
saving seeks.
-182   */
-183  private int nextBlockOnDiskSize = 
UNSET;
-184
-185  /**
-186   * On a checksum failure, do these many 
succeeding read requests using hdfs checksums before
-187   * auto-reenabling hbase checksum 
verification.
-188   */
-189  static final int 
CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD = 3;
-190
-191  

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index c37b4f3..d271132 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":18,"i6":6,"i7":6,"i8":6,"i9":18,"i10":6,"i11":18,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":18,"i53":6,"i54":6,"i55":6,"i56":18,"i57":6,"i58":18,"i59":6,"i60":6,"i61":18,"i62":6,"i63":18,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":18,"i83":6,"i84":6,"i85":6,"i86":6,"i87":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":18,"i6":6,"i7":6,"i8":6,"i9":18,"i10":6,"i11":18,"i12":6,"i13":6,"i14":6,"i15":18,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":18,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":18,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":18,"i52":6,"i53":18,"i54":6,"i55":18,"i56":6,"i57":18,"i58":6,"i59":6,"i60":18,"i61":6,"i62":18,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":18,"i78":18,"i79":6,"i80":6,"i81":18,"i82":6,"i83":6,"i84":6,"i85":6,"i86":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -101,7 +101,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-AsyncHBaseAdmin
+AsyncHBaseAdmin, RawAsyncHBaseAdmin
 
 
 
@@ -152,7 +152,7 @@ public interface 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-appendReplicationPeerTableCFs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid,
+appendReplicationPeerTableCFs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,? extends http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collectionhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringtableCfs)
 Append the replicable table-cf config of the specified 
peer
 
@@ -215,7 +215,7 @@ public interface 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-compactRegionServer(ServerNamesn)
+compactRegionServer(ServerNameserverName)
 Compact all regions on the region server.
 
 
@@ -226,20 +226,13 @@ public interface 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 createTable(TableDescriptordesc)
 Creates a new table.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
index dc12c09..82506d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
@@ -54,2261 +54,2259 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.directory.api.util.OptionalComponentsMonitor;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.HRegionLocation;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor;
-053import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-054import 
org.apache.hadoop.hbase.NotServingRegionException;
-055import 
org.apache.hadoop.hbase.ProcedureInfo;
-056import 
org.apache.hadoop.hbase.RegionLocations;
-057import 
org.apache.hadoop.hbase.ServerName;
-058import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.TableExistsException;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-068import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-071import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-072import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-073import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index 708cf4c..d93c615 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,8 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
- @InterfaceStability.Evolving
-public class AsyncHBaseAdmin
+public class AsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -342,23 +341,17 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-balancer()
+balance(booleanforcible)
 Invoke the balancer.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-balancer(booleanforce)
-Invoke the balancer.
-
-
-
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[]
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
 batchTableOperations(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,
 AsyncHBaseAdmin.TableOperatoroperator,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringoperationType)
 
-
+
 private 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
index 49714a2..d0f1508 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.SwapRegionsAction.html
@@ -172,1438 +172,1562 @@
 164MapServerName, 
ListHRegionInfo clusterState;
 165
 166protected final RackManager 
rackManager;
-167
-168protected Cluster(
-169MapServerName, 
ListHRegionInfo clusterState,
-170MapString, 
DequeBalancerRegionLoad loads,
-171RegionLocationFinder 
regionFinder,
-172RackManager rackManager) {
-173  this(null, clusterState, loads, 
regionFinder, rackManager);
-174}
-175
-176@SuppressWarnings("unchecked")
-177protected Cluster(
-178CollectionHRegionInfo 
unassignedRegions,
-179MapServerName, 
ListHRegionInfo clusterState,
-180MapString, 
DequeBalancerRegionLoad loads,
-181RegionLocationFinder 
regionFinder,
-182RackManager rackManager) {
-183
-184  if (unassignedRegions == null) {
-185unassignedRegions = 
EMPTY_REGION_LIST;
-186  }
+167// Maps region - rackIndex - 
locality of region on rack
+168private float[][] rackLocalities;
+169// Maps localityType - region 
- [server|rack]Index with highest locality
+170private int[][] 
regionsToMostLocalEntities;
+171
+172protected Cluster(
+173MapServerName, 
ListHRegionInfo clusterState,
+174MapString, 
DequeBalancerRegionLoad loads,
+175RegionLocationFinder 
regionFinder,
+176RackManager rackManager) {
+177  this(null, clusterState, loads, 
regionFinder, rackManager);
+178}
+179
+180@SuppressWarnings("unchecked")
+181protected Cluster(
+182CollectionHRegionInfo 
unassignedRegions,
+183MapServerName, 
ListHRegionInfo clusterState,
+184MapString, 
DequeBalancerRegionLoad loads,
+185RegionLocationFinder 
regionFinder,
+186RackManager rackManager) {
 187
-188  serversToIndex = new 
HashMap();
-189  hostsToIndex = new 
HashMap();
-190  racksToIndex = new 
HashMap();
-191  tablesToIndex = new 
HashMap();
-192
-193  //TODO: We should get the list of 
tables from master
-194  tables = new ArrayList();
-195  this.rackManager = rackManager != 
null ? rackManager : new DefaultRackManager();
+188  if (unassignedRegions == null) {
+189unassignedRegions = 
EMPTY_REGION_LIST;
+190  }
+191
+192  serversToIndex = new 
HashMap();
+193  hostsToIndex = new 
HashMap();
+194  racksToIndex = new 
HashMap();
+195  tablesToIndex = new 
HashMap();
 196
-197  numRegions = 0;
-198
-199  ListListInteger 
serversPerHostList = new ArrayList();
-200  ListListInteger 
serversPerRackList = new ArrayList();
-201  this.clusterState = clusterState;
-202  this.regionFinder = regionFinder;
-203
-204  // Use servername and port as there 
can be dead servers in this list. We want everything with
-205  // a matching hostname and port to 
have the same index.
-206  for (ServerName sn : 
clusterState.keySet()) {
-207if (sn == null) {
-208  LOG.warn("TODO: Enable TRACE on 
BaseLoadBalancer. Empty servername); " +
-209  "skipping; unassigned 
regions?");
-210  if (LOG.isTraceEnabled()) {
-211LOG.trace("EMPTY SERVERNAME " 
+ clusterState.toString());
-212  }
-213  continue;
-214}
-215if 
(serversToIndex.get(sn.getAddress().toString()) == null) {
-216  
serversToIndex.put(sn.getHostAndPort(), numServers++);
-217}
-218if 
(!hostsToIndex.containsKey(sn.getHostname())) {
-219  
hostsToIndex.put(sn.getHostname(), numHosts++);
-220  serversPerHostList.add(new 
ArrayList(1));
+197  //TODO: We should get the list of 
tables from master
+198  tables = new ArrayList();
+199  this.rackManager = rackManager != 
null ? rackManager : new DefaultRackManager();
+200
+201  numRegions = 0;
+202
+203  ListListInteger 
serversPerHostList = new ArrayList();
+204  ListListInteger 
serversPerRackList = new ArrayList();
+205  this.clusterState = clusterState;
+206  this.regionFinder = regionFinder;
+207
+208  // Use servername and port as there 
can be dead servers in this list. We want everything with
+209  // a matching hostname and port to 
have the same index.
+210  for (ServerName sn : 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 95efd69..111aa1f 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2017 The Apache Software Foundation
 
-  File: 2228,
- Errors: 14494,
+  File: 2231,
+ Errors: 14590,
  Warnings: 0,
  Infos: 0
   
@@ -3779,7 +3779,7 @@ under the License.
   0
 
 
-  13
+  12
 
   
   
@@ -7690,6 +7690,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.ImmutableHColumnDescriptor.java;>org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
+
+
+  0
+
+
+  0
+
+
+  1
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.wal.FSWALEntry.java;>org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
 
 
@@ -11064,6 +11078,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.java;>org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+
+
+  0
+
+
+  0
+
+
+  52
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.crypto.DefaultCipherProvider.java;>org/apache/hadoop/hbase/io/crypto/DefaultCipherProvider.java
 
 
@@ -13318,6 +13346,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.ColumnFamilyDescriptor.java;>org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
+
+
+  0
+
+
+  0
+
+
+  12
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.RegionException.java;>org/apache/hadoop/hbase/RegionException.java
 
 
@@ -14531,7 +14573,7 @@ under the License.
   0
 
 
-  27
+  39
 
   
   
@@ -17835,7 +17877,7 @@ under the License.
   0
 
 
-  135
+  137
 
   
   
@@ -19137,7 +19179,7 @@ under the License.
   0
 
 
-  58
+  59
 
   
   
@@ -21937,7 +21979,7 @@ under the License.
   0
 
 
-  235
+  237
 
   
   
@@ -22175,7 +22217,7 @@ under the License.
   0
 
 
-  27
+  31
 
   
   
@@ -24135,7 +24177,7 @@ under the License.
   0
 
 
-  31
+  34
 
   
   
@@ -24751,7 +24793,7 @@ under the License.
   0
 
 
-  21
+  22
 
   
   
@@ -26795,7 +26837,7 @@ under the License.
   0
 
 
-  70
+  72
 
   
   
@@ -27299,7 +27341,7 @@ under the License.
   0
 
 
-  37
+  43
 
   
   
@@ -30099,7 +30141,7 @@ under the License.
   0
 
 
-  81
+  80
 
   

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/apidocs/org/apache/hadoop/hbase/util/JsonMapper.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/util/JsonMapper.html 
b/apidocs/org/apache/hadoop/hbase/util/JsonMapper.html
index 5813baf..82b31f7 100644
--- a/apidocs/org/apache/hadoop/hbase/util/JsonMapper.html
+++ b/apidocs/org/apache/hadoop/hbase/util/JsonMapper.html
@@ -4,7 +4,7 @@
 
 
 
-JsonMapper (Apache HBase 2.0.0-SNAPSHOT API)
+JsonMapper (Apache HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
-LeaseNotRecoveredException (Apache HBase 2.0.0-SNAPSHOT API)
+LeaseNotRecoveredException (Apache HBase 3.0.0-SNAPSHOT API)