[29/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/org/apache/hadoop/hbase/thrift2/client/ThriftTable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/thrift2/client/ThriftTable.html 
b/devapidocs/org/apache/hadoop/hbase/thrift2/client/ThriftTable.html
new file mode 100644
index 000..4ed87c3
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/thrift2/client/ThriftTable.html
@@ -0,0 +1,1261 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+ThriftTable (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.thrift2.client
+Class ThriftTable
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.thrift2.client.ThriftTable
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true";
 title="class or interface in java.io">Closeable, https://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html?is-external=true";
 title="class or interface in java.lang">AutoCloseable, Table
+
+
+
+@InterfaceAudience.Private
+public class ThriftTable
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+implements Table
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes 
+
+Modifier and Type
+Class and Description
+
+
+private class 
+ThriftTable.CheckAndMutateBuilderImpl 
+
+
+private class 
+ThriftTable.Scanner
+A scanner to perform scan from thrift server
+ getScannerResults is used in this scanner
+
+
+
+
+
+
+
+Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.client.Table
+Table.CheckAndMutateBuilder
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private 
org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client
+client 
+
+
+private 
org.apache.hadoop.conf.Configuration
+conf 
+
+
+private int
+operationTimeout 
+
+
+private int
+scannerCaching 
+
+
+private TableName
+tableName 
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer
+tableNameInBytes 
+
+
+private 
org.apache.thrift.transport.TTransport
+tTransport 
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+ThriftTable(TableName tableName,
+   
org.apache.hadoop.hbase.thrift2.generated.THBaseService.Client client,
+   org.apache.thrift.transport.TTransport tTransport,
+   org.apache.hadoop.conf.Configuration conf) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+Result
+append(Append append)
+Appends values to one or more columns within a single 
row.
+
+
+
+void
+batch(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List actions,
+ https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[] results)
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends, RowMutations.
+
+
+
+ void
+batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List actions,
+ https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[] results,
+ Batch.Callback

[29/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
index 38edbe5..568e74dc 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":18,"i1":18,"i2":18,"i3":18,"i4":18,"i5":18,"i6":18,"i7":18,"i8":18,"i9":18,"i10":18,"i11":18,"i12":18,"i13":18,"i14":50,"i15":18,"i16":18,"i17":18,"i18":18,"i19":18,"i20":18,"i21":18,"i22":18,"i23":18,"i24":18,"i25":18,"i26":18,"i27":18,"i28":18,"i29":18,"i30":18,"i31":18,"i32":18,"i33":18,"i34":18,"i35":18,"i36":18,"i37":18,"i38":18,"i39":18,"i40":18,"i41":18,"i42":18,"i43":18,"i44":18,"i45":18,"i46":50,"i47":18,"i48":50,"i49":18,"i50":18,"i51":18,"i52":18,"i53":18,"i54":18,"i55":18,"i56":18,"i57":18,"i58":18,"i59":18,"i60":18,"i61":18,"i62":18,"i63":18,"i64":18,"i65":18,"i66":18,"i67":18,"i68":18,"i69":18,"i70":18,"i71":18,"i72":18,"i73":18,"i74":18,"i75":18,"i76":18,"i77":18,"i78":18,"i79":18,"i80":18,"i81":18,"i82":18,"i83":18,"i84":18,"i85":18,"i86":18,"i87":18,"i88":18,"i89":18,"i90":18,"i91":18,"i92":18,"i93":18,"i94":18,"i95":18,"i96":18,"i97":18,"i98":18,"i99":18,"i100":18,"i101":18,"i102":18,"i103":18,"i104":18,"i105":18,"i106":18,"i107":18,"i108":18,"i
 
109":18,"i110":18,"i111":18,"i112":18,"i113":18,"i114":18,"i115":18,"i116":18,"i117":18,"i118":18,"i119":18,"i120":50,"i121":18,"i122":50,"i123":18,"i124":50,"i125":18,"i126":18,"i127":18,"i128":18,"i129":18,"i130":18,"i131":18,"i132":18,"i133":18,"i134":18,"i135":18,"i136":18,"i137":18,"i138":18,"i139":18,"i140":18,"i141":18,"i142":18,"i143":18,"i144":18,"i145":18,"i146":18,"i147":18,"i148":18,"i149":18,"i150":18,"i151":18,"i152":18,"i153":18,"i154":18,"i155":18};
+var methods = 
{"i0":18,"i1":18,"i2":18,"i3":18,"i4":18,"i5":18,"i6":18,"i7":18,"i8":18,"i9":18,"i10":18,"i11":18,"i12":18,"i13":18,"i14":50,"i15":18,"i16":18,"i17":18,"i18":18,"i19":18,"i20":18,"i21":18,"i22":18,"i23":18,"i24":18,"i25":18,"i26":18,"i27":18,"i28":18,"i29":18,"i30":18,"i31":18,"i32":18,"i33":18,"i34":18,"i35":18,"i36":18,"i37":18,"i38":18,"i39":18,"i40":18,"i41":18,"i42":18,"i43":18,"i44":18,"i45":18,"i46":18,"i47":50,"i48":18,"i49":50,"i50":18,"i51":18,"i52":18,"i53":18,"i54":18,"i55":18,"i56":18,"i57":18,"i58":18,"i59":18,"i60":18,"i61":18,"i62":18,"i63":18,"i64":18,"i65":18,"i66":18,"i67":18,"i68":18,"i69":18,"i70":18,"i71":18,"i72":18,"i73":18,"i74":18,"i75":18,"i76":18,"i77":18,"i78":18,"i79":18,"i80":18,"i81":18,"i82":18,"i83":18,"i84":18,"i85":18,"i86":18,"i87":18,"i88":18,"i89":18,"i90":18,"i91":18,"i92":18,"i93":18,"i94":18,"i95":18,"i96":18,"i97":18,"i98":18,"i99":18,"i100":18,"i101":18,"i102":18,"i103":18,"i104":18,"i105":18,"i106":18,"i107":18,"i108":18,"i
 
109":18,"i110":18,"i111":18,"i112":18,"i113":18,"i114":18,"i115":18,"i116":18,"i117":18,"i118":18,"i119":18,"i120":18,"i121":18,"i122":18,"i123":50,"i124":18,"i125":50,"i126":18,"i127":50,"i128":18,"i129":18,"i130":18,"i131":18,"i132":18,"i133":18,"i134":18,"i135":18,"i136":18,"i137":18,"i138":18,"i139":18,"i140":18,"i141":18,"i142":18,"i143":18,"i144":18,"i145":18,"i146":18,"i147":18,"i148":18,"i149":18,"i150":18,"i151":18,"i152":18,"i153":18,"i154":18,"i155":18,"i156":18,"i157":18,"i158":18,"i159":18};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -435,51 +435,58 @@ public interface 
 default void
+postIsRpcThrottleEnabled(ObserverContext ctx,
+boolean rpcThrottleEnabled)
+Called after getting if is rpc throttle enabled.
+
+
+
+default void
 postListDecommissionedRegionServers(ObserverContext ctx)
 Called after list decommissioned region servers.
 
 
-
+
 default void
 postListNamespaceDescriptors(ObserverContext ctx,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List descriptors)
 Called after a listNamespaceDescriptors request has been 
processed.
 
 
-
+
 default void
 postListReplicationPeers(ObserverContext ctx,
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String regex)
 Called after list replication peers.
 
 
-
+
 default void
 postListRSGroups(ObserverContext ctx)
 Called after listing region server group information.
 
 
-
+
 default void
 postListSnapshot(ObserverC

[29/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
index cd0ff28..4f9947f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
@@ -31,161 +31,161 @@
 023import static 
org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
 024import static 
org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
 025import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-026
-027import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-028
-029import java.io.IOException;
-030import java.util.ArrayList;
-031import java.util.Collections;
-032import java.util.HashMap;
-033import java.util.IdentityHashMap;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Optional;
-037import 
java.util.concurrent.CompletableFuture;
-038import 
java.util.concurrent.ConcurrentHashMap;
-039import 
java.util.concurrent.ConcurrentLinkedQueue;
-040import 
java.util.concurrent.ConcurrentMap;
-041import 
java.util.concurrent.ConcurrentSkipListMap;
-042import java.util.concurrent.TimeUnit;
-043import java.util.function.Supplier;
-044import java.util.stream.Collectors;
-045import java.util.stream.Stream;
-046
-047import 
org.apache.hadoop.hbase.CellScannable;
-048import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-049import 
org.apache.hadoop.hbase.HRegionLocation;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.TableName;
-052import 
org.apache.yetus.audience.InterfaceAudience;
-053import org.slf4j.Logger;
-054import org.slf4j.LoggerFactory;
-055import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
-056import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
-057import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-062import 
org.apache.hadoop.hbase.util.Bytes;
-063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-064
-065/**
-066 * Retry caller for batch.
-067 * 

-068 * Notice that, the {@link #operationTimeoutNs} is the total time limit now which is the same with -069 * other single operations -070 *

-071 * And the {@link #maxAttempts} is a limit for each single operation in the batch logically. In the -072 * implementation, we will record a {@code tries} parameter for each operation group, and if it is -073 * split to several groups when retrying, the sub groups will inherit the {@code tries}. You can -074 * imagine that the whole retrying process is a tree, and the {@link #maxAttempts} is the limit of -075 * the depth of the tree. -076 */ -077@InterfaceAudience.Private -078class AsyncBatchRpcRetryingCaller { -079 -080 private static final Logger LOG = LoggerFactory.getLogger(AsyncBatchRpcRetryingCaller.class); -081 -082 private final HashedWheelTimer retryTimer; -083 -084 private final AsyncConnectionImpl conn; -085 -086 private final TableName tableName; -087 -088 private final List actions; -089 -090 private final List> futures; -091 -092 private final IdentityHashMap> action2Future; -093 -094 private final IdentityHashMap> action2Errors; -095 -096 private final long pauseNs; -097 -098 private final int maxAttempts; -099 -100 private final long operationTimeoutNs; -101 -102 private final long rpcTimeoutNs; -103 -104 private final int startLogErrorsCnt; -105 -106 private final long startNs; -107 -108 // we can not use HRegionLocation as the map key because the hashCode and equals method of -109 // HRegionLocation only consider serverName. -110 private static final class RegionRequest { -111 -112public final HRegionLocation loc; -113 -114public final ConcurrentLinkedQueue actions = new ConcurrentLinkedQueue<>(); -115 -116public RegionRequest(HRegionLocation loc) { -117 this.loc = loc; -118} -119 } -120 -121 private static final class ServerRequest { -122 -123public final ConcurrentMap actionsByRegion = -124new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); -125 -126public void addAction(HRegionLocation


[29/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
index c4e8c8b..aa58108 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
@@ -82,602 +82,613 @@
 074  public static final String 
USER_COPROCESSORS_ENABLED_CONF_KEY =
 075"hbase.coprocessor.user.enabled";
 076  public static final boolean 
DEFAULT_USER_COPROCESSORS_ENABLED = true;
-077
-078  private static final Logger LOG = 
LoggerFactory.getLogger(CoprocessorHost.class);
-079  protected Abortable abortable;
-080  /** Ordered set of loaded coprocessors 
with lock */
-081  protected final SortedList 
coprocEnvironments =
-082  new SortedList<>(new 
EnvironmentPriorityComparator());
-083  protected Configuration conf;
-084  // unique file prefix to use for local 
copies of jars when classloading
-085  protected String pathPrefix;
-086  protected AtomicInteger loadSequence = 
new AtomicInteger();
-087
-088  public CoprocessorHost(Abortable 
abortable) {
-089this.abortable = abortable;
-090this.pathPrefix = 
UUID.randomUUID().toString();
-091  }
-092
-093  /**
-094   * Not to be confused with the 
per-object _coprocessors_ (above),
-095   * coprocessorNames is static and 
stores the set of all coprocessors ever
-096   * loaded by any thread in this JVM. It 
is strictly additive: coprocessors are
-097   * added to coprocessorNames, by 
checkAndLoadInstance() but are never removed, since
-098   * the intention is to preserve a 
history of all loaded coprocessors for
-099   * diagnosis in case of server crash 
(HBASE-4014).
-100   */
-101  private static Set 
coprocessorNames =
-102  Collections.synchronizedSet(new 
HashSet());
-103
-104  public static Set 
getLoadedCoprocessors() {
-105synchronized (coprocessorNames) {
-106  return new 
HashSet(coprocessorNames);
-107}
-108  }
-109
-110  /**
-111   * Used to create a parameter to the 
HServerLoad constructor so that
-112   * HServerLoad can provide information 
about the coprocessors loaded by this
-113   * regionserver.
-114   * (HBASE-4070: Improve region server 
metrics to report loaded coprocessors
-115   * to master).
-116   */
-117  public Set 
getCoprocessors() {
-118Set returnValue = new 
TreeSet<>();
-119for (E e: coprocEnvironments) {
-120  
returnValue.add(e.getInstance().getClass().getSimpleName());
-121}
-122return returnValue;
-123  }
-124
-125  /**
-126   * Load system coprocessors once only. 
Read the class names from configuration.
-127   * Called by constructor.
-128   */
-129  protected void 
loadSystemCoprocessors(Configuration conf, String confKey) {
-130boolean coprocessorsEnabled = 
conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY,
-131  DEFAULT_COPROCESSORS_ENABLED);
-132if (!coprocessorsEnabled) {
-133  return;
-134}
-135
-136Class implClass;
-137
-138// load default coprocessors from 
configure file
-139String[] defaultCPClasses = 
conf.getStrings(confKey);
-140if (defaultCPClasses == null || 
defaultCPClasses.length == 0)
-141  return;
-142
-143int priority = 
Coprocessor.PRIORITY_SYSTEM;
-144for (String className : 
defaultCPClasses) {
-145  className = className.trim();
-146  if (findCoprocessor(className) != 
null) {
-147// If already loaded will just 
continue
-148LOG.warn("Attempted duplicate 
loading of " + className + "; skipped");
-149continue;
-150  }
-151  ClassLoader cl = 
this.getClass().getClassLoader();
-152  
Thread.currentThread().setContextClassLoader(cl);
-153  try {
-154implClass = 
cl.loadClass(className);
-155// Add coprocessors as we go to 
guard against case where a coprocessor is specified twice
-156// in the configuration
-157E env = 
checkAndLoadInstance(implClass, priority, conf);
-158if (env != null) {
-159  
this.coprocEnvironments.add(env);
-160  LOG.info("System coprocessor {} 
loaded, priority={}.", className, priority);
-161  ++priority;
-162}
-163  } catch (Throwable t) {
-164// We always abort if system 
coprocessors cannot be loaded
-165abortServer(className, t);
-166  }
-167}
-168  }
-169
-170  /**
-171   * Load a coprocessor implementation 
into the host
-172   * @param path path to implementation 
jar
-173   * @param className the main class 
name
-174   * @param pr

[29/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.html
index 9eec050..5de1553 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.html
@@ -603,6 +603,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html 
b/devapidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
index 3a922a6..174be2f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
@@ -354,6 +354,6 @@ not permitted.)
 
 
 
-Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
index 9662819..4e37df7 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
@@ -2822,6 +2822,6 @@ public Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/ScanResultCache.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/ScanResultCache.html 
b/devapidocs/org/apache/hadoop/hbase/client/ScanResultCache.html
index 3ae9227..cc74b69 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ScanResultCache.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ScanResultCache.html
@@ -315,6 +315,6 @@ interface Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/ScanResultConsumer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/ScanResultConsumer.html 
b/devapidocs/org/apache/hadoop/hbase/client/ScanResultConsumer.html
index b532719..04c39fa 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ScanResultConsumer.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ScanResultConsumer.html
@@ -241,6 +241,6 @@ extends Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/ScanResultConsumerBase.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ScanResultConsumerBase.html 
b/devapidocs/org/apache/hadoop/hbase/client/ScanResultConsumerBase.html
index bf42658..3d6a57a 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ScanResultConsumerBase.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ScanResultConsumerBase.html
@@ -269,6 +269,6 @@ public interface Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/client/ScannerCallable.MoreResults.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ScannerCallable.MoreResults.html 
b/devapidocs/org/apache/hadoop/hbase/client/S

[29/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
index 6c0bd3a..bc975c0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
@@ -126,7 +126,7 @@
 
 
 
-abstract class RegionCoprocessorHost.BulkLoadObserverOperation
+abstract class RegionCoprocessorHost.BulkLoadObserverOperation
 extends CoprocessorHost.ObserverOperationWithoutResult
 
 
@@ -205,7 +205,7 @@ extends 
 
 BulkLoadObserverOperation
-public BulkLoadObserverOperation(User user)
+public BulkLoadObserverOperation(User user)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
index 21d19cb..5082701 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
@@ -126,7 +126,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class RegionCoprocessorHost.RegionEnvironment
+private static class RegionCoprocessorHost.RegionEnvironment
 extends BaseEnvironment
 implements RegionCoprocessorEnvironment
 Encapsulation of the environment of each coprocessor
@@ -303,7 +303,7 @@ implements 
 
 region
-private Region region
+private Region region
 
 
 
@@ -312,7 +312,7 @@ implements 
 
 sharedData
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true";
 title="class or interface in java.util.concurrent">ConcurrentMapString,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object> sharedData
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true";
 title="class or interface in java.util.concurrent">ConcurrentMapString,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object> sharedData
 
 
 
@@ -321,7 +321,7 @@ implements 
 
 metricRegistry
-private final MetricRegistry metricRegistry
+private final MetricRegistry metricRegistry
 
 
 
@@ -330,7 +330,7 @@ implements 
 
 services
-private final RegionServerServices services
+private final RegionServerServices services
 
 
 
@@ -347,7 +347,7 @@ implements 
 
 RegionEnvironment
-public RegionEnvironment(RegionCoprocessor impl,
+public RegionEnvironment(RegionCoprocessor impl,
  int priority,
  int seq,
  org.apache.hadoop.conf.Configuration conf,
@@ -376,7 +376,7 @@ implements 
 
 getRegion
-public Region getRegion()
+public Region getRegion()
 
 Specified by:
 getRegion in
 interface RegionCoprocessorEnvironment
@@ -391,7 +391,7 @@ implements 
 
 getOnlineRegions
-public OnlineRegions getOnlineRegions()
+public OnlineRegions getOnlineRegions()
 
 Specified by:
 getOnlineRegions in
 interface RegionCoprocessorEnvironment
@@ -406,7 +406,7 @@ implements 
 
 getConnection
-public Connection getConnection()
+public Connection getConnection()
 Description copied from 
interface: RegionCoprocessorEnvironment
 Returns the hosts' Connection to the Cluster. Do not 
close! This is a shared connection
  with the hosting server. Throws https://docs.oracle.com/javase/8/docs/api/java/lang/UnsupportedOperationException.html?is-external=true";
 title="class or interface in 
java.lang">UnsupportedOperationException if you try to close
@@ -445,7 +445,7 @@ implements 
 
 createConnection
-public Connection createConnection(org.apache.hadoop.conf.Configuration conf)
+public Connection createConnection(org.apache.hadoop.conf.Configuration conf)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
interface

[29/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index b390abe..6424016 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HRegion.WriteState
+static class HRegion.WriteState
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -239,7 +239,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 flushing
-volatile boolean flushing
+volatile boolean flushing
 
 
 
@@ -248,7 +248,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 flushRequested
-volatile boolean flushRequested
+volatile boolean flushRequested
 
 
 
@@ -257,7 +257,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 compacting
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting
 
 
 
@@ -266,7 +266,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 writesEnabled
-volatile boolean writesEnabled
+volatile boolean writesEnabled
 
 
 
@@ -275,7 +275,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 readOnly
-volatile boolean readOnly
+volatile boolean readOnly
 
 
 
@@ -284,7 +284,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 readsEnabled
-volatile boolean readsEnabled
+volatile boolean readsEnabled
 
 
 
@@ -293,7 +293,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 HEAP_SIZE
-static final long HEAP_SIZE
+static final long HEAP_SIZE
 
 
 
@@ -310,7 +310,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 WriteState
-WriteState()
+WriteState()
 
 
 
@@ -327,7 +327,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setReadOnly
-void setReadOnly(boolean onOff)
+void setReadOnly(boolean onOff)
 Set flags that make this region read-only.
 
 Parameters:
@@ -341,7 +341,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isReadOnly
-boolean isReadOnly()
+boolean isReadOnly()
 
 
 
@@ -350,7 +350,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isFlushRequested
-boolean isFlushRequested()
+boolean isFlushRequested()
 
 
 
@@ -359,7 +359,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setReadsEnabled
-void setReadsEnabled(boolean readsEnabled)
+void setReadsEnabled(boolean readsEnabled)
 
 
 



[29/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterStatusServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterStatusServlet.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterStatusServlet.html
index 98a70a6..8f8bcd8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterStatusServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterStatusServlet.html
@@ -30,82 +30,78 @@
 022import java.util.List;
 023import java.util.Map;
 024import java.util.Set;
-025
-026import javax.servlet.http.HttpServlet;
-027import 
javax.servlet.http.HttpServletRequest;
-028import 
javax.servlet.http.HttpServletResponse;
-029
-030import 
org.apache.yetus.audience.InterfaceAudience;
-031import 
org.apache.hadoop.conf.Configuration;
-032import 
org.apache.hadoop.hbase.ServerName;
-033import 
org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
-034import 
org.apache.hadoop.hbase.util.FSUtils;
-035import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-036
-037/**
-038 * The servlet responsible for rendering 
the index page of the
-039 * master.
-040 */
-041@InterfaceAudience.Private
-042public class MasterStatusServlet extends 
HttpServlet {
-043  private static final long 
serialVersionUID = 1L;
-044
-045  @Override
-046  public void doGet(HttpServletRequest 
request, HttpServletResponse response)
-047throws IOException
-048  {
-049HMaster master = (HMaster) 
getServletContext().getAttribute(HMaster.MASTER);
-050assert master != null : "No Master in 
context!";
+025import javax.servlet.http.HttpServlet;
+026import 
javax.servlet.http.HttpServletRequest;
+027import 
javax.servlet.http.HttpServletResponse;
+028import 
org.apache.hadoop.conf.Configuration;
+029import 
org.apache.hadoop.hbase.ServerName;
+030import 
org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
+031import 
org.apache.hadoop.hbase.util.FSUtils;
+032import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+033import 
org.apache.yetus.audience.InterfaceAudience;
+034
+035/**
+036 * The servlet responsible for rendering 
the index page of the
+037 * master.
+038 */
+039@InterfaceAudience.Private
+040public class MasterStatusServlet extends 
HttpServlet {
+041  private static final long 
serialVersionUID = 1L;
+042
+043  @Override
+044  public void doGet(HttpServletRequest 
request, HttpServletResponse response)
+045throws IOException
+046  {
+047HMaster master = (HMaster) 
getServletContext().getAttribute(HMaster.MASTER);
+048assert master != null : "No Master in 
context!";
+049
+050
response.setContentType("text/html");
 051
-052
response.setContentType("text/html");
+052Configuration conf = 
master.getConfiguration();
 053
-054Configuration conf = 
master.getConfiguration();
-055
-056Map frags = 
getFragmentationInfo(master, conf);
-057ServerName metaLocation = null;
-058List servers = 
null;
-059Set deadServers = 
null;
-060
-061if(master.isActiveMaster()) {
-062  metaLocation = 
getMetaLocationOrNull(master);
-063  ServerManager serverManager = 
master.getServerManager();
-064  if (serverManager != null) {
-065deadServers = 
serverManager.getDeadServers().copyServerNames();
-066servers = 
serverManager.getOnlineServersList();
-067  }
-068}
-069
-070MasterStatusTmpl tmpl = new 
MasterStatusTmpl()
-071  .setFrags(frags)
-072  .setMetaLocation(metaLocation)
-073  .setServers(servers)
-074  .setDeadServers(deadServers)
-075  
.setCatalogJanitorEnabled(master.isCatalogJanitorEnabled());
-076
-077if (request.getParameter("filter") != 
null)
-078  
tmpl.setFilter(request.getParameter("filter"));
-079if (request.getParameter("format") != 
null)
-080  
tmpl.setFormat(request.getParameter("format"));
-081tmpl.render(response.getWriter(), 
master);
-082  }
-083
-084  private ServerName 
getMetaLocationOrNull(HMaster master) {
-085MetaTableLocator metaTableLocator = 
master.getMetaTableLocator();
-086return metaTableLocator == null ? 
null :
-087  
metaTableLocator.getMetaRegionLocation(master.getZooKeeper());
-088  }
-089
-090  private Map 
getFragmentationInfo(
-091  HMaster master, Configuration conf) 
throws IOException {
-092boolean showFragmentation = 
conf.getBoolean(
-093
"hbase.master.ui.fragmentation.enabled", false);
-094if (showFragmentation) {
-095  return 
FSUtils.getTableFragmentation(master);
-096} else {
-097  return null;
-098}
-099  }
-100}
+054Map frags = 
getFragmentationInfo(master, conf);
+055ServerName metaLocation = null;
+056List servers = 
null;
+057Set deadServers = 
null;
+058
+059if(master.isActiveMaster())

[29/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.html
index 99241bf..1a64278 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/CloseRegionProcedure.html
@@ -186,6 +186,13 @@ extends Procedure
 NO_PROC_ID,
 NO_TIMEOUT
 
+
+
+
+
+Fields inherited from 
interface org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+DUMMY_NAMESPACE_TABLE_NAME
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.html
index bdf52f0..4148a1a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.html
@@ -214,6 +214,13 @@ extends Procedure
 NO_PROC_ID,
 NO_TIMEOUT
 
+
+
+
+
+Fields inherited from 
interface org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+DUMMY_NAMESPACE_TABLE_NAME
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.html
index 168b7d6..1bccc73 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.html
@@ -206,6 +206,13 @@ extends Procedure
 NO_PROC_ID,
 NO_TIMEOUT
 
+
+
+
+
+Fields inherited from 
interface org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+DUMMY_NAMESPACE_TABLE_NAME
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
index 5fc5a88..164f281 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
@@ -225,6 +225,13 @@ extends Procedure
 NO_PROC_ID,
 NO_TIMEOUT
 
+
+
+
+
+Fields inherited from 
interface org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+DUMMY_NAMESPACE_TABLE_NAME
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
index 45884e5..49adc3c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
@@ -213,6 +213,13 @@ extends Procedure
 NO_PROC_ID,
 NO_TIMEOUT
 
+
+
+
+
+Fields inherited from 
interface org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+DUMMY_NAMESPACE_TABLE_NAME
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/assignment/OpenRegionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/OpenRegionProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/OpenRegionProcedure.html
index 0b1cc40..78e2bff 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/OpenRegionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/OpenRegionProcedure.html
@@ -175,6 +175,13 @@ extends Procedure
 NO_PROC_ID,
 NO_TIMEOUT
 
+
+
+
+
+Fields inherited from 
interface org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+DUMMY_NAMESPACE_TABLE_NAME
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionRemoteProcedureBase.html
-

[29/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/org/apache/hadoop/hbase/security/access/class-use/TablePermission.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/class-use/TablePermission.html
 
b/devapidocs/org/apache/hadoop/hbase/security/access/class-use/TablePermission.html
index c7df26c..e71aa27 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/access/class-use/TablePermission.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/access/class-use/TablePermission.html
@@ -95,22 +95,6 @@
 
 
 Uses of TablePermission 
in org.apache.hadoop.hbase.security.access
-
-Subclasses of TablePermission 
in org.apache.hadoop.hbase.security.access 
-
-Modifier and Type
-Class and Description
-
-
-
-class 
-UserPermission
-Represents an authorization for access over the given 
table, column family
- plus qualifier, for the given user.
-
-
-
-
 
 Fields in org.apache.hadoop.hbase.security.access
 with type parameters of type TablePermission 
 
@@ -119,12 +103,14 @@
 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapString,TableAuthManager.PermissionCache>
-TableAuthManager.nsCache 
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true";
 title="class or interface in java.util.concurrent">ConcurrentHashMap>
+AuthManager.tableCache
+Cache for table permission.
+
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap>
-TableAuthManager.tableCache 
+(package private) AuthManager.PermissionCache
+AuthManager.TBL_NO_PERMISSION 
 
 
 
@@ -137,18 +123,6 @@
 
 
 static TablePermission
-ShadedAccessControlUtil.toTablePermission(org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission proto)
-Converts a Permission shaded proto to a client 
TablePermission object.
-
-
-
-static TablePermission
-AccessControlUtil.toTablePermission(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission proto)
-Converts a Permission proto to a client TablePermission 
object.
-
-
-
-static TablePermission
 AccessControlUtil.toTablePermission(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.TablePermission proto)
 Converts a TablePermission proto to a client 
TablePermission object.
 
@@ -156,108 +130,6 @@
 
 
 
-Methods in org.apache.hadoop.hbase.security.access
 that return types with arguments of type TablePermission 
-
-Modifier and Type
-Method and Description
-
-
-
-static 
org.apache.hbase.thirdparty.com.google.common.collect.ListMultimapString,TablePermission>
-AccessControlLists.getNamespacePermissions(org.apache.hadoop.conf.Configuration conf,
-   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String namespace) 
-
-
-private TableAuthManager.PermissionCache
-TableAuthManager.getNamespacePermissions(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String namespace) 
-
-
-(package private) static 
org.apache.hbase.thirdparty.com.google.common.collect.ListMultimapString,TablePermission>
-AccessControlLists.getPermissions(org.apache.hadoop.conf.Configuration conf,
-  byte[] entryName,
-  Table t,
-  byte[] cf,
-  byte[] cq,
-  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String user,
-  boolean hasFilterUser)
-Reads user permission assignments stored in the 
l: column family of the first
- table row in _acl_.
-
-
-
-static 
org.apache.hbase.thirdparty.com.google.common.collect.ListMultimapString,TablePermission>
-AccessControlLists.getTablePermissions(org.apache.hadoop.conf.Configuration conf,
-   TableName tableName) 
-
-
-private TableAuthManager.PermissionCache
-TableAuthManager.getTablePermissions(TableName table)

[29/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
index 0af8acd..c5f21ac 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
@@ -645,1615 +645,1597 @@
 637
proc.afterReplay(getEnvironment());
 638  }
 639});
-640
-641// 4. Push the procedures to the 
timeout executor
-642waitingTimeoutList.forEach(proc -> 
{
-643  
proc.afterReplay(getEnvironment());
-644  timeoutExecutor.add(proc);
-645});
-646// 5. restore locks
-647restoreLocks();
-648// 6. Push the procedure to the 
scheduler
-649
failedList.forEach(scheduler::addBack);
-650runnableList.forEach(p -> {
-651  p.afterReplay(getEnvironment());
-652  if (!p.hasParent()) {
-653
sendProcedureLoadedNotification(p.getProcId());
-654  }
-655  // If the procedure holds the lock, 
put the procedure in front
-656  // If its parent holds the lock, 
put the procedure in front
-657  // TODO. Is that possible that its 
ancestor holds the lock?
-658  // For now, the deepest procedure 
hierarchy is:
-659  // ModifyTableProcedure -> 
ReopenTableProcedure ->
-660  // MoveTableProcedure -> 
Unassign/AssignProcedure
-661  // But ModifyTableProcedure and 
ReopenTableProcedure won't hold the lock
-662  // So, check parent lock is 
enough(a tricky case is resovled by HBASE-21384).
-663  // If some one change or add new 
procedures making 'grandpa' procedure
-664  // holds the lock, but parent 
procedure don't hold the lock, there will
-665  // be a problem here. We have to 
check one procedure's ancestors.
-666  // And we need to change 
LockAndQueue.hasParentLock(Procedure proc) method
-667  // to check all ancestors too.
-668  if (p.isLockedWhenLoading() || 
(p.hasParent() && procedures
-669  
.get(p.getParentProcId()).isLockedWhenLoading())) {
-670scheduler.addFront(p, false);
-671  } else {
-672// if it was not, it can wait.
-673scheduler.addBack(p, false);
-674  }
-675});
-676// After all procedures put into the 
queue, signal the worker threads.
-677// Otherwise, there is a race 
condition. See HBASE-21364.
-678scheduler.signalAll();
-679  }
+640// 4. restore locks
+641restoreLocks();
+642
+643// 5. Push the procedures to the 
timeout executor
+644waitingTimeoutList.forEach(proc -> 
{
+645  
proc.afterReplay(getEnvironment());
+646  timeoutExecutor.add(proc);
+647});
+648
+649// 6. Push the procedure to the 
scheduler
+650
failedList.forEach(scheduler::addBack);
+651runnableList.forEach(p -> {
+652  p.afterReplay(getEnvironment());
+653  if (!p.hasParent()) {
+654
sendProcedureLoadedNotification(p.getProcId());
+655  }
+656  scheduler.addBack(p);
+657});
+658// After all procedures put into the 
queue, signal the worker threads.
+659// Otherwise, there is a race 
condition. See HBASE-21364.
+660scheduler.signalAll();
+661  }
+662
+663  /**
+664   * Initialize the procedure executor, 
but do not start workers. We will start them later.
+665   * 

+666 * It calls ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, and +667 * ensure a single executor, and start the procedure replay to resume and recover the previous +668 * pending and in-progress procedures. +669 * @param numThreads number of threads available for procedure execution. +670 * @param abortOnCorruption true if you want to abort your service in case a corrupted procedure +671 * is found on replay. otherwise false. +672 */ +673 public void init(int numThreads, boolean abortOnCorruption) throws IOException { +674// We have numThreads executor + one timer thread used for timing out +675// procedures and triggering periodic procedures. +676this.corePoolSize = numThreads; +677this.maxPoolSize = 10 * numThreads; +678LOG.info("Starting {} core workers (bigger of cpus/4 or 16) with max (burst) worker count={}", +679corePoolSize, maxPoolSize); 680 -681 /** -682 * Initialize the procedure executor, but do not start workers. We will start them later. -683 *

-684 * It calls ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, and -685 * ensure a single executor, and start the procedure replay to resume and recover the previous -686 * pending and in-progress pro


[29/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i < 
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step < 
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost < currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i < 
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime >
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost > currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i < 
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);
-470  }
-471

[29/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.EntryIterator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.EntryIterator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.EntryIterator.html
deleted file mode 100644
index a281f12..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.EntryIterator.html
+++ /dev/null
@@ -1,679 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018package 
org.apache.hadoop.hbase.procedure2.store.wal;
-019
-020import java.io.IOException;
-021import 
org.apache.hadoop.hbase.procedure2.Procedure;
-022import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-023import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-024import org.slf4j.Logger;
-025import org.slf4j.LoggerFactory;
-026
-027import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-028
-029/**
-030 * We keep an in-memory map of the 
procedures sorted by replay order. (see the details in the
-031 * beginning of {@link 
ProcedureWALFormatReader}).
-032 *
-033 * 
-034 *  procedureMap = | A |   | E |   | 
C |   |   |   |   | G |   |   |
-035 *   D   
B
-036 *  replayOrderHead = C <-> B 
<-> E <-> D <-> A <-> G
-037 *
-038 *  We also have a lazy grouping by "root 
procedure", and a list of
-039 *  unlinked procedures. If after reading 
all the WALs we have unlinked
-040 *  procedures it means that we had a 
missing WAL or a corruption.
-041 *  rootHead = A <-> D 
<-> G
-042 * B E
-043 * C
-044 *  unlinkFromLinkList = None
-045 * 
-046 */ -047class WALProcedureMap { -048 -049 private static final Logger LOG = LoggerFactory.getLogger(WALProcedureMap.class); -050 -051 private static class Entry { -052// For bucketed linked lists in hash-table. -053private Entry hashNext; -054// child head -055private Entry childHead; -056// double-link for rootHead or childHead -057private Entry linkNext; -058private Entry linkPrev; -059// replay double-linked-list -060private Entry replayNext; -061private Entry replayPrev; -062// procedure-infos -063private Procedure procedure; -064private ProcedureProtos.Procedure proto; -065private boolean ready = false; -066 -067public Entry(Entry hashNext) { -068 this.hashNext = hashNext; -069} -070 -071public long getProcId() { -072 return proto.getProcId(); -073} -074 -075public long getParentId() { -076 return proto.getParentId(); -077} -078 -079public boolean hasParent() { -080 return proto.hasParentId(); -081} -082 -083public boolean isReady() { -084 return ready; -085} -086 -087public boolean isFinished() { -088 if (!hasParent()) { -089// we only consider 'root' procedures. because for the user 'finished' -090// means when everything up to the 'root' is finished. -091switch (proto.getState()) { -092 case ROLLEDBACK: -093 case SUCCESS: -094return true; -095 default: -096break; -097} -098 } -099 return false; -100} -101 -102public Procedure convert() throws IOException { -103 if (procedure == null) { -104procedure = ProcedureUtil.convertToProcedure(proto); -105 } -106 return procedure; -107} -108 -109@Override -110public String toString() { -111 final StringBuilder sb = new StringBuilder(); -112 sb.append("Entry("); -113 sb.append(getProcId()); -114 sb.append(", parentId="); -115 sb.append(getParentId()); -116 sb.append(", class="); -117 sb.append(proto.getClassName()); -118 sb.append(")"); -119 return sb.toString(); -120} -121 } -122 -123

[29/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
index 7ff373c..a3e902f 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class BaseLoadBalancer
+public abstract class BaseLoadBalancer
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements LoadBalancer
 The base class for load balancers. It provides the the 
functions used to by
@@ -500,7 +500,7 @@ implements 
 
 MIN_SERVER_BALANCE
-protected static final int MIN_SERVER_BALANCE
+protected static final int MIN_SERVER_BALANCE
 
 See Also:
 Constant
 Field Values
@@ -513,7 +513,7 @@ implements 
 
 stopped
-private volatile boolean stopped
+private volatile boolean stopped
 
 
 
@@ -522,7 +522,7 @@ implements 
 
 EMPTY_REGION_LIST
-private static final https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List EMPTY_REGION_LIST
+private static final https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List EMPTY_REGION_LIST
 
 
 
@@ -531,7 +531,7 @@ implements 
 
 IDLE_SERVER_PREDICATOR
-static final https://docs.oracle.com/javase/8/docs/api/java/util/function/Predicate.html?is-external=true";
 title="class or interface in java.util.function">Predicate IDLE_SERVER_PREDICATOR
+static final https://docs.oracle.com/javase/8/docs/api/java/util/function/Predicate.html?is-external=true";
 title="class or interface in java.util.function">Predicate IDLE_SERVER_PREDICATOR
 
 
 
@@ -540,7 +540,7 @@ implements 
 
 regionFinder
-protected RegionLocationFinder regionFinder
+protected RegionLocationFinder regionFinder
 
 
 
@@ -549,7 +549,7 @@ implements 
 
 useRegionFinder
-protected boolean useRegionFinder
+protected boolean useRegionFinder
 
 
 
@@ -558,7 +558,7 @@ implements 
 
 slop
-protected float slop
+protected float slop
 
 
 
@@ -567,7 +567,7 @@ implements 
 
 overallSlop
-protected float overallSlop
+protected float overallSlop
 
 
 
@@ -576,7 +576,7 @@ implements 
 
 config
-protected org.apache.hadoop.conf.Configuration config
+protected org.apache.hadoop.conf.Configuration config
 
 
 
@@ -585,7 +585,7 @@ implements 
 
 rackManager
-protected RackManager rackManager
+protected RackManager rackManager
 
 
 
@@ -594,7 +594,7 @@ implements 
 
 RANDOM
-private static final https://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true";
 title="class or interface in java.util">Random RANDOM
+private static final https://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true";
 title="class or interface in java.util">Random RANDOM
 
 
 
@@ -603,7 +603,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -612,7 +612,7 @@ implements 
 
 metricsBalancer
-protected MetricsBalancer 
metricsBalancer
+protected MetricsBalancer 
metricsBalancer
 
 
 
@@ -621,7 +621,7 @@ implements 
 
 clusterStatus
-protected ClusterMetrics clusterStatus
+protected ClusterMetrics clusterStatus
 
 
 
@@ -630,7 +630,7 @@ implements 
 
 masterServerName
-protected ServerName masterServerName
+protected ServerName masterServerName
 
 
 
@@ -639,7 +639,7 @@ implements 
 
 services
-protected MasterServices services
+protected MasterServices services
 
 
 
@@ -648,7 +648,7 @@ implements 
 
 tablesOnMaster
-protected boolean tablesOnMaster
+protected boolean tablesOnMaster
 
 
 
@@ -657,7 +657,7 @@ implements 
 
 onlySystemTablesOnMaster
-protected boolean onlySystemTablesOnMaster
+protected boolean onlySystemTablesOnMaster
 
 
 
@@ -674,7 +674,7 @@ implements 
 
 BaseLoadBalancer
-protected BaseLoadBalancer()
+protected BaseLoadBalancer()
 The constructor that uses the basic MetricsBalancer
 
 
@@ -684,7 +684,7 @@ implements 
 
 BaseLoadBalancer
-protected BaseLoadBalancer(MetricsBalancer metricsBalancer)
+protected BaseLoadBalancer(MetricsBalancer metricsBalancer)
 This Constructor accepts an instance of MetricsBalancer,
  which will be used instead of creating a new one
 
@@ -703,7 +703,7 @@ implements 
 
 createRegionFinder
-private void createRegionFinder()
+private void createRegionFinder()
 
 
 
@@ -712,7 +712,7 @@ implements 
 
 setConf
-public void setConf(org.apache.hadoop.conf.Configuration conf)
+public void setConf(org.apache.hadoop.conf.Configuration conf)
 
 Speci

[29/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/allclasses-noframe.html
--
diff --git a/testdevapidocs/allclasses-noframe.html 
b/testdevapidocs/allclasses-noframe.html
index f63e01b..8614ee4 100644
--- a/testdevapidocs/allclasses-noframe.html
+++ b/testdevapidocs/allclasses-noframe.html
@@ -454,6 +454,7 @@
 ProcedureTestingUtility
 ProcedureTestingUtility.LoadCounter
 ProcedureTestingUtility.NoopProcedure
+ProcedureTestingUtility.NoopStateMachineProcedure
 ProcedureTestingUtility.TestProcedure
 ProcedureTestUtil
 ProcedureWALLoaderPerformanceEvaluation
@@ -504,6 +505,13 @@
 RestartRandomZKNodeAction
 RestartRsHoldingMetaAction
 RestartRsHoldingTableAction
+RestoreSnapshotFromClientAfterSplittingRegionsTestBase
+RestoreSnapshotFromClientAfterTruncateTestBase
+RestoreSnapshotFromClientCloneTestBase
+RestoreSnapshotFromClientGetCompactionStateTestBase
+RestoreSnapshotFromClientSchemaChangeTestBase
+RestoreSnapshotFromClientSimpleTestBase
+RestoreSnapshotFromClientTestBase
 RestTests
 RollingBatchRestartRsAction
 RollingBatchRestartRsAction.KillOrStart
@@ -718,6 +726,7 @@
 TestBatchScanResultCache
 TestBigDecimalComparator
 TestBitComparator
+TestBitSetNode
 TestBlockCacheReporting
 TestBlockEvictionFromClient
 TestBlockEvictionFromClient.CustomInnerRegionObserver
@@ -1533,7 +1542,12 @@
 TestMobFileName
 TestMobFlushSnapshotFromClient
 TestMobRestoreFlushSnapshotFromClient
-TestMobRestoreSnapshotFromClient
+TestMobRestoreSnapshotFromClientAfterSplittingRegions
+TestMobRestoreSnapshotFromClientAfterTruncate
+TestMobRestoreSnapshotFromClientClone
+TestMobRestoreSnapshotFromClientGetCompactionState
+TestMobRestoreSnapshotFromClientSchemaChange
+TestMobRestoreSnapshotFromClientSimple
 TestMobRestoreSnapshotHelper
 TestMobSecureExportSnapshot
 TestMobSnapshotCloneIndependence
@@ -1647,6 +1661,8 @@
 TestProcedureBypass
 TestProcedureBypass.RootProcedure
 TestProcedureBypass.StuckProcedure
+TestProcedureBypass.StuckStateMachineProcedure
+TestProcedureBypass.StuckStateMachineState
 TestProcedureBypass.SuspendProcedure
 TestProcedureBypass.TestProcEnv
 TestProcedureCoordinator
@@ -1933,7 +1949,12 @@
 TestRestartCluster
 TestRestoreBoundaryTests
 TestRestoreFlushSnapshotFromClient
-TestRestoreSnapshotFromClient
+TestRestoreSnapshotFromClientAfterSplittingRegions
+TestRestoreSnapshotFromClientAfterTruncate
+TestRestoreSnapshotFromClientClone
+TestRestoreSnapshotFromClientGetCompactionState
+TestRestoreSnapshotFromClientSchemaChange
+TestRestoreSnapshotFromClientSimple
 TestRestoreSnapshotFromClientWithRegionReplicas
 TestRestoreSnapshotHelper
 TestRestoreSnapshotProcedure

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/constant-values.html
--
diff --git a/testdevapidocs/constant-values.html 
b/testdevapidocs/constant-values.html
index 63ba7f4..170dc44 100644
--- a/testdevapidocs/constant-values.html
+++ b/testdevapidocs/constant-values.html
@@ -8598,6 +8598,25 @@
 
 
 
+org.apache.hadoop.hbase.procedure2.org.apache.hadoop.hbase.procedure2.TestProcedureExecution.TestProcedureException 
+
+Modifier and Type
+Constant Field
+Value
+
+
+
+
+
+private static final long
+serialVersionUID
+8798565784658913798L
+
+
+
+
+
+
 org.apache.hadoop.hbase.procedure2.TestProcedureInMemoryChore 
 
 Modifier and Type



[29/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
index 061ce80..bdfc3f8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
@@ -39,2126 +39,2163 @@
 031import java.util.Set;
 032import 
java.util.concurrent.ConcurrentHashMap;
 033import 
java.util.concurrent.CopyOnWriteArrayList;
-034import java.util.concurrent.TimeUnit;
-035import 
java.util.concurrent.atomic.AtomicBoolean;
-036import 
java.util.concurrent.atomic.AtomicInteger;
-037import 
java.util.concurrent.atomic.AtomicLong;
-038import java.util.stream.Collectors;
-039import java.util.stream.Stream;
-040import 
org.apache.hadoop.conf.Configuration;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-048import 
org.apache.hadoop.hbase.security.User;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.hadoop.hbase.util.IdLock;
-051import 
org.apache.hadoop.hbase.util.NonceKey;
-052import 
org.apache.hadoop.hbase.util.Threads;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import org.slf4j.Logger;
-055import org.slf4j.LoggerFactory;
-056
-057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+034import java.util.concurrent.Executor;
+035import java.util.concurrent.Executors;
+036import java.util.concurrent.TimeUnit;
+037import 
java.util.concurrent.atomic.AtomicBoolean;
+038import 
java.util.concurrent.atomic.AtomicInteger;
+039import 
java.util.concurrent.atomic.AtomicLong;
+040import java.util.stream.Collectors;
+041import java.util.stream.Stream;
+042import 
org.apache.hadoop.conf.Configuration;
+043import 
org.apache.hadoop.hbase.HConstants;
+044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+049import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener;
+050import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+051import 
org.apache.hadoop.hbase.security.User;
+052import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+053import 
org.apache.hadoop.hbase.util.IdLock;
+054import 
org.apache.hadoop.hbase.util.NonceKey;
+055import 
org.apache.hadoop.hbase.util.Threads;
+056import 
org.apache.yetus.audience.InterfaceAudience;
+057import org.slf4j.Logger;
+058import org.slf4j.LoggerFactory;
 059
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-061
-062/**
-063 * Thread Pool that executes the 
submitted procedures.
-064 * The executor has a ProcedureStore 
associated.
-065 * Each operation is logged and on 
restart the pending procedures are resumed.
-066 *
-067 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-068 * the procedure will complete (at some 
point in time), On restart the pending
-069 * procedures are resumed and the once 
failed will be rolledback.
+060import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+061import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+062import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+063
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
+065
+066/**
+067 * Thread Pool that executes the 
submitted procedures.
+068 * The executor has a ProcedureStore 
associated.
+069 * Each operation is logged and on 
restart the pending procedures are resumed.
 070 *
-071 * The user can add procedures to the 
executor via submitProcedure(proc)
-072 * check for the finished state via 
isFinished(procId)
-073 * and get the result via 
getResult(procId)
-074 */
-075@InterfaceAudience.Private
-076public class 
ProcedureExecutor {
-077  private static final Logger LOG = 
LoggerFacto

[29/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
index 2593c38..2cb7b3f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
@@ -40,310 +40,351 @@
 032import 
org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 033import 
org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
 034import 
org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
-035import 
org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
-036import 
org.apache.hadoop.hbase.replication.ReplicationException;
-037import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-038import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
-039import 
org.apache.hadoop.hbase.replication.ReplicationUtils;
-040import 
org.apache.hadoop.hbase.util.Pair;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import org.slf4j.Logger;
-043import org.slf4j.LoggerFactory;
-044
-045import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState;
-046
-047/**
-048 * The base class for all replication 
peer related procedure except sync replication state
-049 * transition.
-050 */
-051@InterfaceAudience.Private
-052public abstract class ModifyPeerProcedure 
extends AbstractPeerProcedure {
-053
-054  private static final Logger LOG = 
LoggerFactory.getLogger(ModifyPeerProcedure.class);
-055
-056  protected static final int 
UPDATE_LAST_SEQ_ID_BATCH_SIZE = 1000;
+035import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+036import 
org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+037import 
org.apache.hadoop.hbase.replication.ReplicationException;
+038import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+039import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
+040import 
org.apache.hadoop.hbase.replication.ReplicationUtils;
+041import 
org.apache.hadoop.hbase.util.Pair;
+042import 
org.apache.yetus.audience.InterfaceAudience;
+043import org.slf4j.Logger;
+044import org.slf4j.LoggerFactory;
+045
+046import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+047
+048import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState;
+049import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+050
+051/**
+052 * The base class for all replication 
peer related procedure except sync replication state
+053 * transition.
+054 */
+055@InterfaceAudience.Private
+056public abstract class ModifyPeerProcedure 
extends AbstractPeerProcedure {
 057
-058  // The sleep interval when waiting 
table to be enabled or disabled.
-059  protected static final int 
SLEEP_INTERVAL_MS = 1000;
-060
-061  protected ModifyPeerProcedure() {
-062  }
-063
-064  protected ModifyPeerProcedure(String 
peerId) {
-065super(peerId);
-066  }
-067
-068  /**
-069   * Called before we start the actual 
processing. The implementation should call the pre CP hook,
-070   * and also the pre-check for the peer 
modification.
-071   * 

-072 * If an IOException is thrown then we will give up and mark the procedure as failed directly. If -073 * all checks passes then the procedure can not be rolled back any more. -074 */ -075 protected abstract void prePeerModification(MasterProcedureEnv env) -076 throws IOException, ReplicationException; -077 -078 protected abstract void updatePeerStorage(MasterProcedureEnv env) throws ReplicationException; -079 -080 /** -081 * Called before we finish the procedure. The implementation can do some logging work, and also -082 * call the coprocessor hook if any. -083 *

-084 * Notice that, since we have already done the actual work, throwing {@code IOException} here will -085 * not fail this procedure, we will just ignore it and finish the procedure as suceeded. If -086 * {@code ReplicationException} is thrown we will retry since this usually means we fails to -087 * update the peer storage. -088 */ -089 protected abstract void postPeerModification(MasterProcedureEnv env) -090 throws IOException, ReplicationException; -091 -092 private void releaseLatch() { -093 ProcedurePrepareLatch.releaseLatch(latch, this); -094 } -095 -096 /** -097 * Implementation class can override this method. By default we will jump to -098 * POST_PEER_MODIFICATION and finish the procedure. -099 */ -100 protected PeerModificationState nextStateAfterRefresh


[29/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index dcb80c1..1bf7981 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HRegionServer.CompactionChecker
+private static class HRegionServer.CompactionChecker
 extends ScheduledChore
 
 
@@ -233,7 +233,7 @@ extends 
 
 instance
-private final HRegionServer instance
+private final HRegionServer instance
 
 
 
@@ -242,7 +242,7 @@ extends 
 
 majorCompactPriority
-private final int majorCompactPriority
+private final int majorCompactPriority
 
 
 
@@ -251,7 +251,7 @@ extends 
 
 DEFAULT_PRIORITY
-private static final int DEFAULT_PRIORITY
+private static final int DEFAULT_PRIORITY
 
 See Also:
 Constant
 Field Values
@@ -264,7 +264,7 @@ extends 
 
 iteration
-private long iteration
+private long iteration
 
 
 
@@ -281,7 +281,7 @@ extends 
 
 CompactionChecker
-CompactionChecker(HRegionServer h,
+CompactionChecker(HRegionServer h,
   int sleepTime,
   Stoppable stopper)
 
@@ -300,7 +300,7 @@ extends 
 
 chore
-protected void chore()
+protected void chore()
 Description copied from 
class: ScheduledChore
 The task to execute on each scheduled execution of the 
Chore
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index 19d70bf..84e0084 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HRegionServer.MovedRegionInfo
+private static class HRegionServer.MovedRegionInfo
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -218,7 +218,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 serverName
-private final ServerName serverName
+private final ServerName serverName
 
 
 
@@ -227,7 +227,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 seqNum
-private final long seqNum
+private final long seqNum
 
 
 
@@ -236,7 +236,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ts
-private final long ts
+private final long ts
 
 
 
@@ -253,7 +253,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 MovedRegionInfo
-public MovedRegionInfo(ServerName serverName,
+public MovedRegionInfo(ServerName serverName,
long closeSeqNum)
 
 
@@ -271,7 +271,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getServerName
-public ServerName getServerName()
+public ServerName getServerName()
 
 
 
@@ -280,7 +280,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getSeqNum
-public long getSeqNum()
+public long getSeqNum()
 
 
 
@@ -289,7 +289,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getMoveTime
-public long getMoveTime()
+public long getMoveTime()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index 3d963f4..7c909a6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected static final class HRegionServer.MovedRegionsCleaner
+protected static final class HRegionServer.MovedRegionsCleaner
 extends ScheduledChore
 implements Stoppable
 Creates a Chore thread to clean the moved region 
cache.
@@ -242,7 +242,7 @@ implements 
 
 regionServer
-private HRegionServer regionServer
+private HRegionServer regionServer

[29/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index a5d292d..d85465c 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -520,14 +520,14 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.util.PoolMap.PoolType
 org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
+org.apache.hadoop.hbase.util.ChecksumType
 org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
-org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.Comparer)
 org.apache.hadoop.hbase.util.PrettyPrinter.Unit
 org.apache.hadoop.hbase.util.Order
+org.apache.hadoop.hbase.util.PoolMap.PoolType
 org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.Comparer)
-org.apache.hadoop.hbase.util.ChecksumType
+org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.Comparer)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/overview-tree.html
--
diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html
index c83ea97..4c1f36c 100644
--- a/devapidocs/overview-tree.html
+++ b/devapidocs/overview-tree.html
@@ -1500,6 +1500,7 @@
 org.apache.hadoop.hbase.util.EnvironmentEdgeManager
 org.apache.hadoop.hbase.executor.EventHandler (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true";
 title="class or interface in java.lang">Runnable)
 
+org.apache.hadoop.hbase.regionserver.handler.AssignRegionHandler
 org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler
 
 org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler
@@ -1521,6 +1522,7 @@
 org.apache.hadoop.hbase.master.snapshot.EnabledTableSnapshotHandler
 
 
+org.apache.hadoop.hbase.regionserver.handler.UnassignRegionHandler
 org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index af8aab0..f2bd10b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"7ab77518a2569e2416a50020393aa386e7734501";
+011  public static final String revision = 
"8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Sat 
Sep 22 14:42:20 UTC 2018";
+013  public static final String date = "Tue 
Sep 25 14:45:54 UTC 2018";
 014  public static final String url = 
"git://jenkins-websites1.apache.org/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "40cfe40c509b5c9b75784418a7a88604";
+015  public static final String srcChecksum 
= "5bd47f87f2a094af440e01bd108f2b16";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.Executor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.Executor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.Executor.html
index 49f081b..33c9cc0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.Executor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/executor/ExecutorService.Executor.html
@@ -35,309 +35,328 @@
 027import 
java.util.concurrent.BlockingQueue;
 028

[29/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
index a5789e0..93a57cb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
@@ -238,4120 +238,4119 @@
 230 * @see Admin
 231 */
 232@InterfaceAudience.Private
-233@InterfaceStability.Evolving
-234public class HBaseAdmin implements Admin 
{
-235  private static final Logger LOG = 
LoggerFactory.getLogger(HBaseAdmin.class);
-236
-237  private ClusterConnection connection;
-238
-239  private final Configuration conf;
-240  private final long pause;
-241  private final int numRetries;
-242  private final int syncWaitTimeout;
-243  private boolean aborted;
-244  private int operationTimeout;
-245  private int rpcTimeout;
-246
-247  private RpcRetryingCallerFactory 
rpcCallerFactory;
-248  private RpcControllerFactory 
rpcControllerFactory;
-249
-250  private NonceGenerator ng;
-251
-252  @Override
-253  public int getOperationTimeout() {
-254return operationTimeout;
-255  }
-256
-257  HBaseAdmin(ClusterConnection 
connection) throws IOException {
-258this.conf = 
connection.getConfiguration();
-259this.connection = connection;
-260
-261// TODO: receive 
ConnectionConfiguration here rather than re-parsing these configs every time.
-262this.pause = 
this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-263
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-264this.numRetries = 
this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-265
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-266this.operationTimeout = 
this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-267
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-268this.rpcTimeout = 
this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-269
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-270this.syncWaitTimeout = 
this.conf.getInt(
-271  
"hbase.client.sync.wait.timeout.msec", 10 * 6); // 10min
-272
-273this.rpcCallerFactory = 
connection.getRpcRetryingCallerFactory();
-274this.rpcControllerFactory = 
connection.getRpcControllerFactory();
-275
-276this.ng = 
this.connection.getNonceGenerator();
-277  }
-278
-279  @Override
-280  public void abort(String why, Throwable 
e) {
-281// Currently does nothing but throw 
the passed message and exception
-282this.aborted = true;
-283throw new RuntimeException(why, e);
-284  }
-285
-286  @Override
-287  public boolean isAborted() {
-288return this.aborted;
-289  }
-290
-291  @Override
-292  public boolean abortProcedure(final 
long procId, final boolean mayInterruptIfRunning)
-293  throws IOException {
-294return 
get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
-295  TimeUnit.MILLISECONDS);
-296  }
-297
-298  @Override
-299  public Future 
abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
-300  throws IOException {
-301Boolean abortProcResponse =
-302executeCallable(new 
MasterCallable(getConnection(),
-303getRpcControllerFactory()) 
{
-304  @Override
-305  protected AbortProcedureResponse 
rpcCall() throws Exception {
-306AbortProcedureRequest 
abortProcRequest =
-307
AbortProcedureRequest.newBuilder().setProcId(procId).build();
-308return 
master.abortProcedure(getRpcController(), abortProcRequest);
-309  }
-310}).getIsProcedureAborted();
-311return new AbortProcedureFuture(this, 
procId, abortProcResponse);
-312  }
-313
-314  @Override
-315  public List 
listTableDescriptors() throws IOException {
-316return 
listTableDescriptors((Pattern)null, false);
-317  }
-318
-319  @Override
-320  public List 
listTableDescriptors(Pattern pattern) throws IOException {
-321return listTableDescriptors(pattern, 
false);
-322  }
-323
-324  @Override
-325  public List 
listTableDescriptors(Pattern pattern, boolean includeSysTables)
-326  throws IOException {
-327return executeCallable(new 
MasterCallable>(getConnection(),
-328getRpcControllerFactory()) {
-329  @Override
-330  protected 
List rpcCall() throws Exception {
-331GetTableDescriptorsRequest req 
=
-332
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
-333return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-334req));
-335  }
-336});
-337  }
-338
-339  

[29/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/filter/FamilyFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FamilyFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FamilyFilter.html
index 58e8dfb..db6bbc9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FamilyFilter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FamilyFilter.html
@@ -29,147 +29,142 @@
 021
 022import java.io.IOException;
 023import java.util.ArrayList;
-024import java.util.Objects;
-025
-026import org.apache.hadoop.hbase.Cell;
-027import 
org.apache.hadoop.hbase.CompareOperator;
-028import 
org.apache.yetus.audience.InterfaceAudience;
-029import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-030import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-031import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-032import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-033
-034/**
-035 * 

-036 * This filter is used to filter based on the column family. It takes an -037 * operator (equal, greater, not equal, etc) and a byte [] comparator for the -038 * column family portion of a key. -039 *

-040 * This filter can be wrapped with {@link org.apache.hadoop.hbase.filter.WhileMatchFilter} and {@link org.apache.hadoop.hbase.filter.SkipFilter} -041 * to add more control. -042 *

-043 * Multiple filters can be combined using {@link org.apache.hadoop.hbase.filter.FilterList}. -044 *

-045 * If an already known column family is looked for, use {@link org.apache.hadoop.hbase.client.Get#addFamily(byte[])} -046 * directly rather than a filter. -047 */ -048@InterfaceAudience.Public -049public class FamilyFilter extends CompareFilter { -050 -051 /** -052 * Constructor. -053 * -054 * @param familyCompareOp the compare op for column family matching -055 * @param familyComparator the comparator for column family matching -056 * @deprecated Since 2.0.0. Will be removed in 3.0.0. -057 * Use {@link #FamilyFilter(CompareOperator, ByteArrayComparable)} -058 */ -059 @Deprecated -060 public FamilyFilter(final CompareOp familyCompareOp, -061 final ByteArrayComparable familyComparator) { -062 super(familyCompareOp, familyComparator); -063 } -064 -065 /** -066 * Constructor. -067 * -068 * @param op the compare op for column family matching -069 * @param familyComparator the comparator for column family matching -070 */ -071 public FamilyFilter(final CompareOperator op, -072 final ByteArrayComparable familyComparator) { -073super(op, familyComparator); -074 } -075 -076 @Deprecated -077 @Override -078 public ReturnCode filterKeyValue(final Cell c) { -079return filterCell(c); -080 } -081 -082 @Override -083 public ReturnCode filterCell(final Cell c) { -084int familyLength = c.getFamilyLength(); -085if (familyLength > 0) { -086 if (compareFamily(getCompareOperator(), this.comparator, c)) { -087return ReturnCode.NEXT_ROW; -088 } -089} -090return ReturnCode.INCLUDE; -091 } -092 -093 public static Filter createFilterFromArguments(ArrayList filterArguments) { -094ArrayList arguments = CompareFilter.extractArguments(filterArguments); -095CompareOperator compareOp = (CompareOperator)arguments.get(0); -096ByteArrayComparable comparator = (ByteArrayComparable)arguments.get(1); -097return new FamilyFilter(compareOp, comparator); -098 } -099 -100 /** -101 * @return The filter serialized using pb -102 */ -103 @Override -104 public byte [] toByteArray() { -105FilterProtos.FamilyFilter.Builder builder = -106 FilterProtos.FamilyFilter.newBuilder(); -107 builder.setCompareFilter(super.convert()); -108return builder.build().toByteArray(); -109 } -110 -111 /** -112 * @param pbBytes A pb serialized {@link FamilyFilter} instance -113 * @return An instance of {@link FamilyFilter} made from bytes -114 * @throws DeserializationException -115 * @see #toByteArray -116 */ -117 public static FamilyFilter parseFrom(final byte [] pbBytes) -118 throws DeserializationException { -119FilterProtos.FamilyFilter proto; -120try { -121 proto = FilterProtos.FamilyFilter.parseFrom(pbBytes); -122} catch (InvalidProtocolBufferException e) { -123 throw new DeserializationException(e); -124} -125final CompareOperator valueCompareOp = -126 CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name()); -127ByteArrayComparable valueComparator = null; -128try { -129 if (proto.getCompareFilter().hasComparator()) { -130valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator()); -131

[29/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index b6f0498..6641f94 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class HRegion.RegionScannerImpl
+class HRegion.RegionScannerImpl
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements RegionScanner, Shipper, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -425,7 +425,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -434,7 +434,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -445,7 +445,7 @@ implements 
 
 joinedContinuationRow
-protected Cell joinedContinuationRow
+protected Cell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -456,7 +456,7 @@ implements 
 
 filterClosed
-private boolean filterClosed
+private boolean filterClosed
 
 
 
@@ -465,7 +465,7 @@ implements 
 
 stopRow
-protected final byte[] stopRow
+protected final byte[] stopRow
 
 
 
@@ -474,7 +474,7 @@ implements 
 
 includeStopRow
-protected final boolean includeStopRow
+protected final boolean includeStopRow
 
 
 
@@ -483,7 +483,7 @@ implements 
 
 region
-protected final HRegion region
+protected final HRegion region
 
 
 
@@ -492,7 +492,7 @@ implements 
 
 comparator
-protected final CellComparator comparator
+protected final CellComparator comparator
 
 
 
@@ -501,7 +501,7 @@ implements 
 
 readPt
-private final long readPt
+private final long readPt
 
 
 
@@ -510,7 +510,7 @@ implements 
 
 maxResultSize
-private final long maxResultSize
+private final long maxResultSize
 
 
 
@@ -519,7 +519,7 @@ implements 
 
 defaultScannerContext
-private final ScannerContext defaultScannerContext
+private final ScannerContext defaultScannerContext
 
 
 
@@ -528,7 +528,7 @@ implements 
 
 filter
-private final FilterWrapper filter
+private final FilterWrapper filter
 
 
 
@@ -545,7 +545,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scan scan,
+RegionScannerImpl(Scan scan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners,
   HRegion region)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -561,7 +561,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scan scan,
+RegionScannerImpl(Scan scan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners,
   HRegion region,
   long nonceGroup,
@@ -587,7 +587,7 @@ implements 
 
 getRegionInfo
-public RegionInfo getRegionInfo()
+public RegionInfo getRegionInfo()
 
 Specified by:
 getRegionInfo in
 interface RegionScanner
@@ -602,7 +602,7 @@ implements 
 
 initializeScanners
-protected void initializeScanners(Scan scan,
+protected void initializeScanners(Scan scan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -617,7 +617,7 @@ implements 
 
 initializeKVHeap
-protected void initializeKVHeap(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
+protected void initializeKVHeap(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List joinedScanners,
 HRegion region)
  throws 

[29/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
index e42e654..fe4e081 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
@@ -560,7 +560,7 @@
 552TransitRegionStateProcedure proc;
 553regionNode.lock();
 554try {
-555  preTransitCheck(regionNode, 
RegionStates.STATES_EXPECTED_ON_OPEN);
+555  preTransitCheck(regionNode, 
STATES_EXPECTED_ON_ASSIGN);
 556  proc = 
TransitRegionStateProcedure.assign(getProcedureEnvironment(), regionInfo, 
sn);
 557  regionNode.setProcedure(proc);
 558} finally {
@@ -581,7 +581,7 @@
 573TransitRegionStateProcedure proc;
 574regionNode.lock();
 575try {
-576  preTransitCheck(regionNode, 
RegionStates.STATES_EXPECTED_ON_CLOSE);
+576  preTransitCheck(regionNode, 
STATES_EXPECTED_ON_UNASSIGN_OR_MOVE);
 577  proc = 
TransitRegionStateProcedure.unassign(getProcedureEnvironment(), regionInfo);
 578  regionNode.setProcedure(proc);
 579} finally {
@@ -599,7 +599,7 @@
 591TransitRegionStateProcedure proc;
 592regionNode.lock();
 593try {
-594  preTransitCheck(regionNode, 
RegionStates.STATES_EXPECTED_ON_CLOSE);
+594  preTransitCheck(regionNode, 
STATES_EXPECTED_ON_UNASSIGN_OR_MOVE);
 595  regionNode.checkOnline();
 596  proc = 
TransitRegionStateProcedure.move(getProcedureEnvironment(), regionInfo, 
targetServer);
 597  regionNode.setProcedure(proc);
@@ -1419,448 +1419,483 @@
 1411  }
 1412
 1413  // 

-1414  //  Region Status update
-1415  //  Should only be called in 
TransitRegionStateProcedure
-1416  // 

-1417  private void 
transitStateAndUpdate(RegionStateNode regionNode, RegionState.State newState,
-1418  RegionState.State... 
expectedStates) throws IOException {
-1419RegionState.State state = 
regionNode.getState();
-1420regionNode.transitionState(newState, 
expectedStates);
-1421boolean succ = false;
-1422try {
-1423  
regionStateStore.updateRegionLocation(regionNode);
-1424  succ = true;
-1425} finally {
-1426  if (!succ) {
-1427// revert
-1428regionNode.setState(state);
-1429  }
-1430}
-1431  }
-1432
-1433  // should be called within the 
synchronized block of RegionStateNode
-1434  void regionOpening(RegionStateNode 
regionNode) throws IOException {
-1435transitStateAndUpdate(regionNode, 
State.OPENING, RegionStates.STATES_EXPECTED_ON_OPEN);
-1436
regionStates.addRegionToServer(regionNode);
-1437// update the operation count 
metrics
-1438
metrics.incrementOperationCounter();
-1439  }
-1440
-1441  // should be called within the 
synchronized block of RegionStateNode.
-1442  // The parameter 'giveUp' means 
whether we will try to open the region again, if it is true, then
-1443  // we will persist the FAILED_OPEN 
state into hbase:meta.
-1444  void regionFailedOpen(RegionStateNode 
regionNode, boolean giveUp) throws IOException {
-1445RegionState.State state = 
regionNode.getState();
-1446ServerName regionLocation = 
regionNode.getRegionLocation();
-1447if (giveUp) {
-1448  
regionNode.setState(State.FAILED_OPEN);
-1449  
regionNode.setRegionLocation(null);
-1450  boolean succ = false;
-1451  try {
-1452
regionStateStore.updateRegionLocation(regionNode);
-1453succ = true;
-1454  } finally {
-1455if (!succ) {
-1456  // revert
-1457  regionNode.setState(state);
-1458  
regionNode.setRegionLocation(regionLocation);
-1459}
-1460  }
-1461}
-1462if (regionLocation != null) {
-1463  
regionStates.removeRegionFromServer(regionLocation, regionNode);
-1464}
-1465  }
-1466
-1467  // should be called within the 
synchronized block of RegionStateNode
-1468  void regionOpened(RegionStateNode 
regionNode) throws IOException {
-1469// TODO: OPENING Updates hbase:meta 
too... we need to do both here and there?
-1470// That is a lot of hbase:meta 
writing.
-1471transitStateAndUpdate(regionNode, 
State.OPEN, RegionStates.STATES_EXPECTED_ON_OPEN);
-1472RegionInfo hri = 
regionNode.getRegionInfo();
-1473if (isMetaRegion(hri)) {
-1474  // Usually we'd set a table 
ENABLED at this stage but hbase:meta is ALWAYs 

[29/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/apidocs/src-html/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html
index c8e02c8..309387e 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html
@@ -27,195 +27,211 @@
 019
 020import java.io.IOException;
 021import java.util.ArrayList;
-022import java.util.Comparator;
-023import java.util.TreeSet;
-024
-025import org.apache.hadoop.hbase.Cell;
-026import 
org.apache.hadoop.hbase.CellUtil;
-027import 
org.apache.hadoop.hbase.PrivateCellUtil;
-028import 
org.apache.yetus.audience.InterfaceAudience;
-029import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-030import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-031import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-032import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-033import 
org.apache.hadoop.hbase.util.Bytes;
-034
-035/**
-036 * This filter is used for selecting only 
those keys with columns that matches
-037 * a particular prefix. For example, if 
prefix is 'an', it will pass keys will
-038 * columns like 'and', 'anti' but not 
keys with columns like 'ball', 'act'.
-039 */
-040@InterfaceAudience.Public
-041public class MultipleColumnPrefixFilter 
extends FilterBase {
-042  protected byte [] hint = null;
-043  protected TreeSet 
sortedPrefixes = createTreeSet();
-044  private final static int 
MAX_LOG_PREFIXES = 5;
-045
-046  public MultipleColumnPrefixFilter(final 
byte [][] prefixes) {
-047if (prefixes != null) {
-048  for (int i = 0; i < 
prefixes.length; i++) {
-049if 
(!sortedPrefixes.add(prefixes[i]))
-050  throw new 
IllegalArgumentException ("prefixes must be distinct");
-051  }
-052}
-053  }
-054
-055  public byte [][] getPrefix() {
-056int count = 0;
-057byte [][] temp = new byte 
[sortedPrefixes.size()][];
-058for (byte [] prefixes : 
sortedPrefixes) {
-059  temp [count++] = prefixes;
-060}
-061return temp;
-062  }
-063
-064  @Override
-065  public boolean filterRowKey(Cell cell) 
throws IOException {
-066// Impl in FilterBase might do 
unnecessary copy for Off heap backed Cells.
-067return false;
-068  }
-069
-070  @Deprecated
-071  @Override
-072  public ReturnCode filterKeyValue(final 
Cell c) {
-073return filterCell(c);
-074  }
-075
-076  @Override
-077  public ReturnCode filterCell(final Cell 
c) {
-078if (sortedPrefixes.isEmpty()) {
-079  return ReturnCode.INCLUDE;
-080} else {
-081  return filterColumn(c);
-082}
-083  }
-084
-085  public ReturnCode filterColumn(Cell 
cell) {
-086byte [] qualifier = 
CellUtil.cloneQualifier(cell);
-087TreeSet 
lesserOrEqualPrefixes =
-088  (TreeSet) 
sortedPrefixes.headSet(qualifier, true);
-089
-090if (lesserOrEqualPrefixes.size() != 
0) {
-091  byte [] 
largestPrefixSmallerThanQualifier = lesserOrEqualPrefixes.last();
-092  
-093  if (Bytes.startsWith(qualifier, 
largestPrefixSmallerThanQualifier)) {
-094return ReturnCode.INCLUDE;
-095  }
-096  
-097  if (lesserOrEqualPrefixes.size() == 
sortedPrefixes.size()) {
-098return ReturnCode.NEXT_ROW;
-099  } else {
-100hint = 
sortedPrefixes.higher(largestPrefixSmallerThanQualifier);
-101return 
ReturnCode.SEEK_NEXT_USING_HINT;
-102  }
-103} else {
-104  hint = sortedPrefixes.first();
-105  return 
ReturnCode.SEEK_NEXT_USING_HINT;
-106}
-107  }
-108
-109  public static Filter 
createFilterFromArguments(ArrayList filterArguments) {
-110byte [][] prefixes = new byte 
[filterArguments.size()][];
-111for (int i = 0 ; i < 
filterArguments.size(); i++) {
-112  byte [] columnPrefix = 
ParseFilter.removeQuotesFromByteArray(filterArguments.get(i));
-113  prefixes[i] = columnPrefix;
-114}
-115return new 
MultipleColumnPrefixFilter(prefixes);
-116  }
-117
-118  /**
-119   * @return The filter serialized using 
pb
-120   */
-121  @Override
-122  public byte [] toByteArray() {
-123
FilterProtos.MultipleColumnPrefixFilter.Builder builder =
-124  
FilterProtos.MultipleColumnPrefixFilter.newBuilder();
-125for (byte [] element : 
sortedPrefixes) {
-126  if (element != null) 
builder.addSortedPrefixes(UnsafeByteOperations.unsafeWrap(element));
-127}
-128return 
builder.build().toByteArray();
-129  }
-130
-131  /**
-132   * @param pbBytes A pb serialized 
{@link MultipleColumnPrefixFilter} instance
-133   * @return An instance of {@link 
MultipleColumnPrefixFilter} made fro

[29/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index db8431b..a8cb7c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  Collection stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column family.");
+887}
+888
+889Monito

[29/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
deleted file mode 100644
index bfde48c..000
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
+++ /dev/null
@@ -1,402 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-RegionStates.ServerState (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":9,"i1":9};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev Class
-Next Class
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-Summary: 
-Nested | 
-Enum Constants | 
-Field | 
-Method
-
-
-Detail: 
-Enum Constants | 
-Field | 
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.master.assignment
-Enum 
RegionStates.ServerState
-
-
-
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
-
-
-https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">java.lang.Enum
-
-
-org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable, https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable
-
-
-Enclosing class:
-RegionStates
-
-
-
-public static enum RegionStates.ServerState
-extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum
-Server State.
-
-
-
-
-
-
-
-
-
-
-
-Enum Constant Summary
-
-Enum Constants 
-
-Enum Constant and Description
-
-
-OFFLINE
-WAL splitting done.
-
-
-
-ONLINE
-Initial state.
-
-
-
-SPLITTING
-Server expired/crashed.
-
-
-
-SPLITTING_META
-Only server which carries meta can have this state.
-
-
-
-SPLITTING_META_DONE
-Indicate that the meta splitting is done.
-
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All Methods Static Methods Concrete Methods 
-
-Modifier and Type
-Method and Description
-
-
-static RegionStates.ServerState
-valueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
-Returns the enum constant of this type with the specified 
name.
-
-
-
-static RegionStates.ServerState[]
-values()
-Returns an array containing the constants of this enum 
type, in
-the order they are declared.
-
-
-
-
-
-
-
-Methods inherited from class java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum
-https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#compareTo-E-";
 title="class or interface in java.lang">compareTo, https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#getDeclaringClass--";
 title="class or interface in java.lang">getDeclaringClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/
 8/docs/api/java/lang/Enum.html?is-external=true#name--" title="class or 
interface in java.lang">name, https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#ordinal--";
 title

[29/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.CellWritableComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.CellWritableComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.CellWritableComparator.html
index 39170f0..7859ebc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.CellWritableComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.CellWritableComparator.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i < 
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapper {
-259private Map 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapper {
-304private Map 
cfRenameMap;
-305private List 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key,

[29/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
index b7b4236..3d1edb3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
@@ -259,1863 +259,1867 @@
 251   * + Metadata!  + <= See note on 
BLOCK_METADATA_SPACE above.
 252   * ++
 253   * 
-254   * @see #serialize(ByteBuffer)
+254   * @see #serialize(ByteBuffer, 
boolean)
 255   */
-256  static final 
CacheableDeserializer BLOCK_DESERIALIZER =
-257  new 
CacheableDeserializer() {
-258@Override
-259public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
-260throws IOException {
-261  // The buf has the file block 
followed by block metadata.
-262  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
-263  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
-264  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
-265  ByteBuff newByteBuff;
-266  if (reuse) {
-267newByteBuff = buf.slice();
-268  } else {
-269int len = buf.limit();
-270newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
-271newByteBuff.put(0, buf, 
buf.position(), len);
-272  }
-273  // Read out the 
BLOCK_METADATA_SPACE content and shove into our HFileBlock.
-274  buf.position(buf.limit());
-275  buf.limit(buf.limit() + 
HFileBlock.BLOCK_METADATA_SPACE);
-276  boolean usesChecksum = buf.get() == 
(byte) 1;
-277  long offset = buf.getLong();
-278  int nextBlockOnDiskSize = 
buf.getInt();
-279  HFileBlock hFileBlock =
-280  new HFileBlock(newByteBuff, 
usesChecksum, memType, offset, nextBlockOnDiskSize, null);
-281  return hFileBlock;
-282}
-283
-284@Override
-285public int 
getDeserialiserIdentifier() {
-286  return DESERIALIZER_IDENTIFIER;
-287}
-288
-289@Override
-290public HFileBlock 
deserialize(ByteBuff b) throws IOException {
-291  // Used only in tests
-292  return deserialize(b, false, 
MemoryType.EXCLUSIVE);
-293}
-294  };
-295
-296  private static final int 
DESERIALIZER_IDENTIFIER;
-297  static {
-298DESERIALIZER_IDENTIFIER =
-299
CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER);
-300  }
-301
-302  /**
-303   * Copy constructor. Creates a shallow 
copy of {@code that}'s buffer.
-304   */
-305  private HFileBlock(HFileBlock that) {
-306this(that, false);
-307  }
-308
-309  /**
-310   * Copy constructor. Creates a 
shallow/deep copy of {@code that}'s buffer as per the boolean
-311   * param.
-312   */
-313  private HFileBlock(HFileBlock that, 
boolean bufCopy) {
-314init(that.blockType, 
that.onDiskSizeWithoutHeader,
-315
that.uncompressedSizeWithoutHeader, that.prevBlockOffset,
-316that.offset, 
that.onDiskDataSizeWithHeader, that.nextBlockOnDiskSize, that.fileContext);
-317if (bufCopy) {
-318  this.buf = new 
SingleByteBuff(ByteBuffer.wrap(that.buf.toBytes(0, that.buf.limit(;
-319} else {
-320  this.buf = that.buf.duplicate();
-321}
-322  }
-323
-324  /**
-325   * Creates a new {@link HFile} block 
from the given fields. This constructor
-326   * is used only while writing blocks 
and caching,
-327   * and is sitting in a byte buffer and 
we want to stuff the block into cache.
-328   *
-329   * 

TODO: The caller presumes no checksumming -330 * required of this block instance since going into cache; checksum already verified on -331 * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? +256 public static final CacheableDeserializer BLOCK_DESERIALIZER = new BlockDeserializer(); +257 +258 public static final class BlockDeserializer implements CacheableDeserializer { +259private BlockDeserializer() { +260} +261 +262@Override +263public HFileBlock deserialize(ByteBuff buf, boolean reuse, MemoryType memType) +264throws IOException { +265 // The buf has the file block followed by block metadata. +266 // Set limit to just before the BLOCK_METADATA_SPACE then rewind. +267 buf.limit(buf.limit() - BLOCK_METADATA_SPACE).rewind(); +268 // Get a new buffer to pass the HFileBlock for it to 'own'. +269 ByteBuff newByteBuff; +270 if (reuse) { +271newByteBuff = buf.slice(); +272 } else { +273int len = buf.limit(); +274newByteBuff = new SingleByteBuff(ByteBuffer.allocate(len)); +275newByteBuff.put(0, buf, buf.po


[29/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
index d044d2e..33176c2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
@@ -382,7 +382,7 @@ extends AbstractStateMachineTableProcedure
-acquireLock,
 checkOnline,
 checkTableModifiable,
 getRegionDir,
 getUser, preflightChecks,
 releaseLock,
 releaseSyncLatch,
 setUser,
 toStringClassDetails
+acquireLock,
 checkOnline,
 checkTableModifiable,
 getRegionDir,
 getUser, preflightChecks,
 releaseLock,
 releaseSyncLatch,
 setUser,
 toStringClassDetails,
 waitInitialized
 
 
 
@@ -396,7 +396,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback, elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime<
 /a>, getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 get
 ProcName, getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch, isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId, setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure, shouldWaitClientAck,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp, 
wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/master/procedure/PeerQueue.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/procedure/PeerQueue.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/PeerQueue.html
index 850636b..248e97f 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/PeerQueue.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/PeerQueue.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":9};
+var methods = {"i0":10,"i1":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -187,13 +187,9 @@ extends 
 boolean
-isAvailable() 
-
-
-boolean
 requireExclusiveLock(Procedure proc) 
 
-
+
 private static boolean
 requirePeerExclusiveLock(PeerProcedureInterface proc) 
 
@@ -203,7 +199,7 @@ extends Queue
-add,
 compareKey,
 compareTo,
 getKey,
 getLockStatus,
 getPriority,
 isEmpty,
 peek,
 poll, size,
 toString
+add,
 compareKey,
 compareTo,
 getKey,
 getLockStatus,
 getPriority,
 isAvailable,
 isEmpty,
 peek, poll,
 size,
 toString
 
 
 
@@ -244,26 +240,13 @@ extends 
-
-
-
-
-isAvailable
-public boolean isAvailable()
-
-Overrides:
-isAvailable in
 class QueueString>
-
-
-
 
 
 
 
 
 requireExclusiveLock
-public boolean requireExclusiveLock(Procedure proc)
+public boolean requireExclusiveLock(Procedure proc)
 
 Specified by:
 requireExclusiveLock in
 class Queue[29/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646return -1;
-647

[29/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html 
b/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
index 34a97e9..f29213b 100644
--- a/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd";>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42,"i41":42,"i42":42,"i43":42,"i44":42,"i45":41,"i46":42,"i47":42,"i48":42,"i49":42,"i50":42,"i51":42,"i52":42,"i53":42,"i54":42,"i55":42,"i56":42,"i57":42,"i58":42,"i59":42,"i60":42,"i61":42,"i62":42,"i63":42,"i64":42,"i65":42,"i66":42,"i67":42,"i68":42,"i69":42,"i70":42,"i71":42,"i72":42,"i73":42,"i74":42};
-var tabs = 
{65535:["t0","所有方法"],1:["t1","静态方法"],2:["t2","实例方法"],8:["t4","å
…·ä½“方法"],32:["t6","已过时的方法"]};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+Prev Class
+Next Class
 
 
-框架
-无框架
+Frames
+No Frames
 
 
-所有类
+All Classes
 
 
 

[29/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/HRegionLocation.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/HRegionLocation.html 
b/apidocs/org/apache/hadoop/hbase/HRegionLocation.html
index fe2ce0e..b02567a 100644
--- a/apidocs/org/apache/hadoop/hbase/HRegionLocation.html
+++ b/apidocs/org/apache/hadoop/hbase/HRegionLocation.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd";>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":42,"i7":10,"i8":10,"i9":10,"i10":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
+var tabs = {65535:["t0","所有方法"],2:["t2","实例方法"],8:["t4","å…
·ä½“方法"],32:["t6","已过时的方法"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-Prev Class
-Next Class
+上一个类
+下一个类
 
 
-Frames
-No Frames
+框架
+无框架
 
 
-All Classes
+所有类
 
 
 

[29/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 5623ece..6f1a7b9 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":9,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":9,"i132":10,"i133":9,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":9,"i153":10,"i154":9,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":9,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":9,"i133":10,"i134":9,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":9,"i154":10,"i155":9,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
-public class HRegionServer
+public class HRegionServer
 extends HasThread
 implements RegionServerServices, LastSequenceId, 
ConfigurationObserver
 HRegionServer makes a set of HRegions available to clients. 
It checks in with
@@ -1165,116 +1165,120 @@ implements getWALFileSystem() 
 
 
+https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,ReplicationStatus>
+getWalGroupsReplicationStatus() 
+
+
 LogRoller
 getWalRoller() 
 
-
+
 org.apache.hadoop.fs.Path
 getWALRootDir() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 getWALs() 
 
-
+
 (package 

[29/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlClient.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlClient.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlClient.html
index 427beaa..f1579fc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlClient.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlClient.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-Prev Class
+Prev Class
 Next Class
 
 
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class AccessControlClient
+public class AccessControlClient
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Utility client for doing access control admin 
operations.
 
@@ -179,6 +179,52 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+getUserPermissions(Connection connection,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String tableRegex,
+  byte[] columnFamily)
+List all the userPermissions matching the given table 
pattern and column family.
+
+
+
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+getUserPermissions(Connection connection,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String tableRegex,
+  byte[] columnFamily,
+  byte[] columnQualifier)
+List all the userPermissions matching the given table 
pattern, column family and column
+ qualifier.
+
+
+
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+getUserPermissions(Connection connection,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String tableRegex,
+  byte[] columnFamily,
+  byte[] columnQualifier,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String userName)
+List all the userPermissions matching the given table 
pattern, column family and column
+ qualifier.
+
+
+
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+getUserPermissions(Connection connection,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String tableRegex,
+  byte[] columnFamily,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String userName)
+List all the userPermissions matching the given table 
pattern, column family and user name.
+
+
+
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+getUserPermissions(Connection connection,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String tableRegex,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String userName)
+List all the userPermissions matching the given table 
pattern and user name.
+
+
+
 private static void
 grant(Connection connection,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String userName,
@@ -187,7 +233,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 Grants permission on the specified namespace for the 
specified user.
 

[29/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index 8fbdb7c..7927fbf 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class ConnectionImplementation
+class ConnectionImplementation
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements ClusterConnection, https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true";
 title="class or interface in java.io">Closeable
 Main implementation of Connection 
and ClusterConnection interfaces.
@@ -847,7 +847,7 @@ implements 
 
 RETRIES_BY_SERVER_KEY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RETRIES_BY_SERVER_KEY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RETRIES_BY_SERVER_KEY
 
 See Also:
 Constant
 Field Values
@@ -860,7 +860,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -869,7 +869,7 @@ implements 
 
 RESOLVE_HOSTNAME_ON_FAIL_KEY
-private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RESOLVE_HOSTNAME_ON_FAIL_KEY
+private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RESOLVE_HOSTNAME_ON_FAIL_KEY
 
 See Also:
 Constant
 Field Values
@@ -882,7 +882,7 @@ implements 
 
 hostnamesCanChange
-private final boolean hostnamesCanChange
+private final boolean hostnamesCanChange
 
 
 
@@ -891,7 +891,7 @@ implements 
 
 pause
-private final long pause
+private final long pause
 
 
 
@@ -900,7 +900,7 @@ implements 
 
 pauseForCQTBE
-private final long pauseForCQTBE
+private final long pauseForCQTBE
 
 
 
@@ -909,7 +909,7 @@ implements 
 
 useMetaReplicas
-private boolean useMetaReplicas
+private boolean useMetaReplicas
 
 
 
@@ -918,7 +918,7 @@ implements 
 
 metaReplicaCallTimeoutScanInMicroSecond
-private final int metaReplicaCallTimeoutScanInMicroSecond
+private final int metaReplicaCallTimeoutScanInMicroSecond
 
 
 
@@ -927,7 +927,7 @@ implements 
 
 numTries
-private final int numTries
+private final int numTries
 
 
 
@@ -936,7 +936,7 @@ implements 
 
 rpcTimeout
-final int rpcTimeout
+final int rpcTimeout
 
 
 
@@ -945,7 +945,7 @@ implements 
 
 nonceGenerator
-private static volatile NonceGenerator nonceGenerator
+private static volatile NonceGenerator nonceGenerator
 Global nonceGenerator shared per client.Currently there's 
no reason to limit its scope.
  Once it's set under nonceGeneratorCreateLock, it is never unset or 
changed.
 
@@ -956,7 +956,7 @@ implements 
 
 nonceGeneratorCreateLock
-private static final https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object nonceGeneratorCreateLock
+private static final https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object nonceGeneratorCreateLock
 The nonce generator lock. Only taken when creating 
Connection, which gets a private copy.
 
 
@@ -966,7 +966,7 @@ implements 
 
 asyncProcess
-private final AsyncProcess asyncProcess
+private final AsyncProcess asyncProcess
 
 
 
@@ -975,7 +975,7 @@ implements 
 
 stats
-private final ServerStatisticTracker stats
+private final ServerStatisticTracker stats
 
 
 
@@ -984,7 +984,7 @@ implements 
 
 closed
-private volatile boolean closed
+private volatile boolean closed
 
 
 
@@ -993,7 +993,7 @@ implements 
 
 aborted
-private volatile boolean aborted
+private volatile boolean aborted
 
 
 
@@ -1002,7 +1002,7 @@ implements 
 
 clusterStatusListener
-ClusterStatusListener clusterStatusListener
+ClusterStatusListener clusterStatusListener
 
 
 
@@ -1011,7 +1011,7 @@ implements 
 
 metaRegionLock
-private final https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object metaRegionLock
+private final https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object metaRegionLock
 
 
 
@@ -1020,7 +1020,7 @@ implements 
 
 masterLock
-private final https://docs.oracle.com/javase/8/docs/api/jav

[29/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
index 5a4ee8e..a9d2e8d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class RegionStates.RegionStateStampComparator
+private static class RegionStates.RegionStateStampComparator
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true";
 title="class or interface in java.util">Comparator
 
@@ -197,7 +197,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/Comparat
 
 
 RegionStateStampComparator
-private RegionStateStampComparator()
+private RegionStateStampComparator()
 
 
 
@@ -214,7 +214,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/Comparat
 
 
 compare
-public int compare(RegionState l,
+public int compare(RegionState l,
RegionState r)
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
index a4093cb..acc2dcf 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
@@ -112,7 +112,7 @@
 
 
 
-private static class RegionStates.ServerReportEvent
+private static class RegionStates.ServerReportEvent
 extends ProcedureEvent
 
 
@@ -177,7 +177,7 @@ extends 
 
 ServerReportEvent
-public ServerReportEvent(ServerName serverName)
+public ServerReportEvent(ServerName serverName)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
index 4016c84..3bd4c3c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static enum RegionStates.ServerState
+public static enum RegionStates.ServerState
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum
 Server State.
 
@@ -230,7 +230,7 @@ the order they are declared.
 
 
 ONLINE
-public static final RegionStates.ServerState ONLINE
+public static final RegionStates.ServerState ONLINE
 Initial state. Available.
 
 
@@ -240,7 +240,7 @@ the order they are declared.
 
 
 SPLITTING_META
-public static final RegionStates.ServerState SPLITTING_META
+public static final RegionStates.ServerState SPLITTING_META
 Only server which carries meta can have this state. We will 
split wal for meta and then
  assign meta first before splitting other wals.
 
@@ -251,7 +251,7 @@ the order they are declared.
 
 
 SPLITTING_META_DONE
-public static final RegionStates.ServerState SPLITTING_META_DONE
+public static final RegionStates.ServerState SPLITTING_META_DONE
 Indicate that the meta splitting is done. We need this 
state so that the UnassignProcedure
  for meta can safely quit. See the comments in 
UnassignProcedure.remoteCallFailed for more
  details.
@@ -263,7 +263,7 @@ the order they are declared.
 
 
 SPLITTING
-public static final RegionStates.ServerState SPLITTING
+public static final RegionStates.ServerState SPLITTING
 Server expired/crashed. Currently undergoing WAL 
splitting.
 
 
@@ -273,7 +273,7 @@ the order they are declared.
 
 
 OFFLINE
-public static final RegionStates.ServerState OFFLINE
+public static final RegionStates.ServerState OFFLINE
 WAL splitting done. This state will be used to tell the 
UnassignProcedure that it can safely
  quit. See the comments in 

[29/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
index b6e7636..592c2cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
@@ -356,3901 +356,3924 @@
 348  public Future 
modifyTableAsync(TableDescriptor td) throws IOException {
 349ModifyTableResponse response = 
executeCallable(
 350  new 
MasterCallable(getConnection(), 
getRpcControllerFactory()) {
-351@Override
-352protected ModifyTableResponse 
rpcCall() throws Exception {
-353  
setPriority(td.getTableName());
-354  ModifyTableRequest request = 
RequestConverter.buildModifyTableRequest(
-355td.getTableName(), td, 
ng.getNonceGroup(), ng.newNonce());
-356  return 
master.modifyTable(getRpcController(), request);
-357}
-358  });
-359return new ModifyTableFuture(this, 
td.getTableName(), response);
-360  }
-361
-362  @Override
-363  public List 
listTableDescriptorsByNamespace(byte[] name) throws IOException {
-364return executeCallable(new 
MasterCallable>(getConnection(),
-365getRpcControllerFactory()) {
-366  @Override
-367  protected 
List rpcCall() throws Exception {
-368return 
master.listTableDescriptorsByNamespace(getRpcController(),
-369
ListTableDescriptorsByNamespaceRequest.newBuilder()
-370  
.setNamespaceName(Bytes.toString(name)).build())
-371.getTableSchemaList()
-372.stream()
-373
.map(ProtobufUtil::toTableDescriptor)
-374
.collect(Collectors.toList());
-375  }
-376});
-377  }
-378
-379  @Override
-380  public List 
listTableDescriptors(List tableNames) throws IOException {
-381return executeCallable(new 
MasterCallable>(getConnection(),
-382getRpcControllerFactory()) {
-383  @Override
-384  protected 
List rpcCall() throws Exception {
-385GetTableDescriptorsRequest req 
=
-386
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
-387  return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-388  req));
-389  }
-390});
-391  }
-392
-393  @Override
-394  public List 
getRegions(final ServerName sn) throws IOException {
-395AdminService.BlockingInterface admin 
= this.connection.getAdmin(sn);
-396// TODO: There is no timeout on this 
controller. Set one!
-397HBaseRpcController controller = 
rpcControllerFactory.newController();
-398return 
ProtobufUtil.getOnlineRegions(controller, admin);
-399  }
-400
-401  @Override
-402  public List 
getRegions(TableName tableName) throws IOException {
-403if 
(TableName.isMetaTableName(tableName)) {
-404  return 
Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-405} else {
-406  return 
MetaTableAccessor.getTableRegions(connection, tableName, true);
-407}
-408  }
-409
-410  private static class 
AbortProcedureFuture extends ProcedureFuture {
-411private boolean isAbortInProgress;
-412
-413public AbortProcedureFuture(
-414final HBaseAdmin admin,
-415final Long procId,
-416final Boolean abortProcResponse) 
{
-417  super(admin, procId);
-418  this.isAbortInProgress = 
abortProcResponse;
-419}
-420
-421@Override
-422public Boolean get(long timeout, 
TimeUnit unit)
-423throws InterruptedException, 
ExecutionException, TimeoutException {
-424  if (!this.isAbortInProgress) {
-425return false;
-426  }
-427  super.get(timeout, unit);
-428  return true;
-429}
-430  }
-431
-432  /** @return Connection used by this 
object. */
-433  @Override
-434  public Connection getConnection() {
-435return connection;
-436  }
-437
-438  @Override
-439  public boolean tableExists(final 
TableName tableName) throws IOException {
-440return executeCallable(new 
RpcRetryingCallable() {
-441  @Override
-442  protected Boolean rpcCall(int 
callTimeout) throws Exception {
-443return 
MetaTableAccessor.tableExists(connection, tableName);
-444  }
-445});
-446  }
-447
-448  @Override
-449  public HTableDescriptor[] listTables() 
throws IOException {
-450return listTables((Pattern)null, 
false);
-451  }
-452
-453  @Override
-454  public HTableDescr

[29/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index b0a820f..15337a1 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -958,7 +958,9 @@
 
 
 protected Procedure.LockState
-RecoverMetaProcedure.acquireLock(MasterProcedureEnv env) 
+RecoverMetaProcedure.acquireLock(MasterProcedureEnv env)
+Deprecated. 
+ 
 
 
 protected Procedure.LockState
@@ -985,12 +987,16 @@
 AbstractStateMachineNamespaceProcedure.acquireLock(MasterProcedureEnv env) 
 
 
+protected Procedure.LockState
+InitMetaProcedure.acquireLock(MasterProcedureEnv env) 
+
+
 private void
 CloneSnapshotProcedure.addRegionsToMeta(MasterProcedureEnv env)
 Add regions to hbase:meta table.
 
 
-
+
 private static void
 CreateTableProcedure.addRegionsToMeta(MasterProcedureEnv env,
 TableDescriptor tableDescriptor,
@@ -998,48 +1004,48 @@
 Add the specified set of regions to the hbase:meta 
table.
 
 
-
+
 private static void
 ModifyTableProcedure.addRegionsToMeta(MasterProcedureEnv env,
 TableDescriptor tableDescriptor,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regionInfos) 
 
-
+
 protected static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 CreateTableProcedure.addTableToMeta(MasterProcedureEnv env,
   TableDescriptor tableDescriptor,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regions) 
 
-
+
 private static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest
 RSProcedureDispatcher.buildOpenRegionRequest(MasterProcedureEnv env,
   ServerName serverName,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List operations) 
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo
 RSProcedureDispatcher.RegionOpenOperation.buildRegionOpenInfoRequest(MasterProcedureEnv env) 
 
-
+
 protected static void
 AbstractStateMachineTableProcedure.checkOnline(MasterProcedureEnv env,
RegionInfo ri)
 Check region is online.
 
 
-
+
 protected void
 AbstractStateMachineRegionProcedure.checkTableModifiable(MasterProcedureEnv env)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 protected void
 AbstractStateMachineTableProcedure.checkTableModifiable(MasterProcedureEnv env)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 private static void
 DeleteTableProcedure.cleanAnyRemainingRows(MasterProcedureEnv env,
  TableName tableName)
@@ -1047,17 +1053,23 @@
  info:regioninfo column was empty because of some write error.
 
 
+
+protected void
+RecoverMetaProcedure.completionCleanup(MasterProcedureEnv env)
+Deprecated. 
+ 
+
 
 protected void
-RecoverMetaProcedure.completionCleanup(MasterProcedureEnv env) 
+TruncateTableProcedure.completionCleanup(MasterProcedureEnv env) 
 
 
 protected void
-TruncateTableProcedure.completionCleanup(MasterProcedureEnv env) 
+ModifyTableProcedure.completionCleanup(MasterProcedureEnv env) 
 
 
 protected void
-ModifyTableProcedure.completionCleanup(MasterProcedureEnv env) 
+InitMetaProcedure.completionCleanup(MasterProcedureEnv env) 
 
 
 protected static void
@@ -1245,107 +1257,115 @@
 
 
 protected StateMachineProcedure.Flow
+InitMetaProcedure.executeFromState(MasterProcedureEnv env,
+
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.InitMetaState state) 
+
+
+protected StateMachineProcedure.Flow
 ModifyNamespaceProcedure.executeFromState(MasterProcedureEnv env,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyNamespaceState state) 
 
-
+
 protected StateMachineProcedure.Flow
 ModifyTableProcedure.executeFromState(MasterProcedureEnv env,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyTableState state) 
 
-
+
 protected StateMachineProcedure.Flow
 RecoverMetaProcedure.executeFromState(MasterProcedureEnv env,
-
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.Re

[29/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HistoryCommand.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HistoryCommand.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HistoryCommand.html
index f236300..513d2ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HistoryCommand.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.HistoryCommand.html
@@ -26,1048 +26,1115 @@
 018
 019package 
org.apache.hadoop.hbase.backup.impl;
 020
-021import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
-022import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
-023import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
-024import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
-025import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
-026import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
-027import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
-028import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
-029import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
-030import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
-031import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
-032import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
-033import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
-034import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
-035import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
-036import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
-037import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
-038import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
-039
-040import java.io.IOException;
-041import java.net.URI;
-042import java.util.List;
+021import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC;
+022import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
+023import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
+024import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
+025import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
+026import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP;
+027import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP_DESC;
+028import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_LIST;
+029import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
+030import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
+031import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
+032import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
+033import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
+034import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
+035import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
+036import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
+037import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
+038import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
+039import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
+040import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
+041import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
+042import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
 043
-044import 
org.apache.commons.lang3.StringUtils;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.conf.Configured;
-047import org.apache.hadoop.fs.FileSystem;
-048import org.apache.hadoop.fs.Path;
-049import 
org.apache.hadoop.hbase.HBaseConfiguration;
-050import 
org.apache.hadoop.hbase.TableName;
-051import 
org.apache.hadoop.hbase.backup.BackupAdmin;
-052import 
org.apache.hadoop.hbase.backup.BackupInfo;
-053import 
org.apache.hadoop

[29/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index aa48364..9549aa5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -2830,843 +2830,858 @@
 2822   * @return true if master is in 
maintenanceMode
 2823   */
 2824  @Override
-2825  public boolean isInMaintenanceMode() 
{
-2826return 
maintenanceModeTracker.isInMaintenanceMode();
-2827  }
-2828
-2829  @VisibleForTesting
-2830  public void setInitialized(boolean 
isInitialized) {
-2831
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-2832  }
-2833
-2834  @Override
-2835  public ProcedureEvent 
getInitializedEvent() {
-2836return initialized;
-2837  }
-2838
-2839  /**
-2840   * ServerCrashProcessingEnabled is set 
false before completing assignMeta to prevent processing
-2841   * of crashed servers.
-2842   * @return true if assignMeta has 
completed;
-2843   */
-2844  @Override
-2845  public boolean 
isServerCrashProcessingEnabled() {
-2846return 
serverCrashProcessingEnabled.isReady();
-2847  }
-2848
-2849  @VisibleForTesting
-2850  public void 
setServerCrashProcessingEnabled(final boolean b) {
-2851
procedureExecutor.getEnvironment().setEventReady(serverCrashProcessingEnabled, 
b);
-2852  }
-2853
-2854  public ProcedureEvent 
getServerCrashProcessingEnabledEvent() {
-2855return 
serverCrashProcessingEnabled;
-2856  }
-2857
-2858  /**
-2859   * Compute the average load across all 
region servers.
-2860   * Currently, this uses a very naive 
computation - just uses the number of
-2861   * regions being served, ignoring 
stats about number of requests.
-2862   * @return the average load
-2863   */
-2864  public double getAverageLoad() {
-2865if (this.assignmentManager == null) 
{
-2866  return 0;
-2867}
-2868
-2869RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-2870if (regionStates == null) {
-2871  return 0;
-2872}
-2873return 
regionStates.getAverageLoad();
-2874  }
-2875
-2876  /*
-2877   * @return the count of region split 
plans executed
-2878   */
-2879  public long getSplitPlanCount() {
-2880return splitPlanCount;
-2881  }
-2882
-2883  /*
-2884   * @return the count of region merge 
plans executed
-2885   */
-2886  public long getMergePlanCount() {
-2887return mergePlanCount;
-2888  }
-2889
-2890  @Override
-2891  public boolean registerService(Service 
instance) {
-2892/*
-2893 * No stacking of instances is 
allowed for a single service name
-2894 */
-2895Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-2896String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-2897if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-2898  LOG.error("Coprocessor service 
"+serviceName+
-2899  " already registered, 
rejecting request from "+instance
-2900  );
-2901  return false;
-2902}
-2903
-2904
coprocessorServiceHandlers.put(serviceName, instance);
-2905if (LOG.isDebugEnabled()) {
-2906  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-2907}
-2908return true;
-2909  }
-2910
-2911  /**
-2912   * Utility for constructing an 
instance of the passed HMaster class.
-2913   * @param masterClass
-2914   * @return HMaster instance.
-2915   */
-2916  public static HMaster 
constructMaster(Class masterClass,
-2917  final Configuration conf)  {
-2918try {
-2919  Constructor c = masterClass.getConstructor(Configuration.class);
-2920  return c.newInstance(conf);
-2921} catch(Exception e) {
-2922  Throwable error = e;
-2923  if (e instanceof 
InvocationTargetException &&
-2924  
((InvocationTargetException)e).getTargetException() != null) {
-2925error = 
((InvocationTargetException)e).getTargetException();
-2926  }
-2927  throw new RuntimeException("Failed 
construction of Master: " + masterClass.toString() + ". "
-2928, error);
-2929}
-2930  }
-2931
-2932  /**
-2933   * @see 
org.apache.hadoop.hbase.master.HMasterCommandLine
-2934   */
-2935  public static void main(String [] 
args) {
-2936LOG.info("STARTING service " + 
HMaster.class.getSimpleName());
-2937VersionInfo.logVersion();
-2938new 
HMasterCommandLine(HMaster.class).doMain(args);
-2939  }
-2940
-2941  public HFileCleaner getHFileCleaner() 
{
-2942return this.hfileCleaner;
-2943  }
-2944
-2945  public LogCleaner getLogCleaner() {
-2946return this.logCleane

[29/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
index 3da432b..d30fa8f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
@@ -928,7690 +928,7698 @@
 920  Collection stores = 
this.stores.values();
 921  try {
 922// update the stores that we are 
replaying
-923
stores.forEach(HStore::startReplayingFromWAL);
-924// Recover any edits if 
available.
-925maxSeqId = Math.max(maxSeqId,
-926  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-927// Make sure mvcc is up to max.
-928this.mvcc.advanceTo(maxSeqId);
-929  } finally {
-930// update the stores that we are 
done replaying
-931
stores.forEach(HStore::stopReplayingFromWAL);
-932  }
-933}
-934this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-935
-936
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-937this.writestate.flushRequested = 
false;
-938this.writestate.compacting.set(0);
-939
-940if (this.writestate.writesEnabled) 
{
-941  // Remove temporary data left over 
from old regions
-942  status.setStatus("Cleaning up 
temporary data from old regions");
-943  fs.cleanupTempDir();
-944}
-945
-946if (this.writestate.writesEnabled) 
{
-947  status.setStatus("Cleaning up 
detritus from prior splits");
-948  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-949  // these directories here on open.  
We may be opening a region that was
-950  // being split but we crashed in 
the middle of it all.
-951  fs.cleanupAnySplitDetritus();
-952  fs.cleanupMergesDir();
-953}
-954
-955// Initialize split policy
-956this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-957
-958// Initialize flush policy
-959this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-960
-961long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-962for (HStore store: stores.values()) 
{
-963  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-964}
-965
-966// Use maximum of log sequenceid or 
that which was found in stores
-967// (particularly if no recovered 
edits, seqid will be -1).
-968long maxSeqIdFromFile =
-969  
WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
-970long nextSeqId = Math.max(maxSeqId, 
maxSeqIdFromFile) + 1;
-971if (writestate.writesEnabled) {
-972  
WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
nextSeqId - 1);
-973}
-974
-975LOG.info("Opened {}; next 
sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
-976
-977// A region can be reopened if failed 
a split; reset flags
-978this.closing.set(false);
-979this.closed.set(false);
-980
-981if (coprocessorHost != null) {
-982  status.setStatus("Running 
coprocessor post-open hooks");
-983  coprocessorHost.postOpen();
-984}
+923LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
+924
stores.forEach(HStore::startReplayingFromWAL);
+925// Recover any edits if 
available.
+926maxSeqId = Math.max(maxSeqId,
+927  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+928// Make sure mvcc is up to max.
+929this.mvcc.advanceTo(maxSeqId);
+930  } finally {
+931LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
+932// update the stores that we are 
done replaying
+933
stores.forEach(HStore::stopReplayingFromWAL);
+934  }
+935}
+936this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+937
+938
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+939this.writestate.flushRequested = 
false;
+940this.writestate.compacting.set(0);
+941
+942if (this.writestate.writesEnabled) 
{
+943  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
+944  // Remove temporary data left over 
from old regions
+945  status.setStatus("Cleaning up 
temporary data from old regions");
+946  fs.cleanupTempDir();
+947}
+948
+949if (this.writestate.writesEnabled) 
{
+950  status.setStatus("Cleaning up 
detritus from prior splits");
+951  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
+952  // thes

[29/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/ExtendedCell.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ExtendedCell.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ExtendedCell.html
index b96d8b1..f759d1b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ExtendedCell.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ExtendedCell.html
@@ -38,9 +38,9 @@
 030 * must implement this.
 031 */
 032@InterfaceAudience.Private
-033public interface ExtendedCell extends 
RawCell, HeapSize, Cloneable {
-034
-035  int CELL_NOT_BASED_ON_CHUNK = -1;
+033public interface ExtendedCell extends 
RawCell, HeapSize {
+034  int CELL_NOT_BASED_ON_CHUNK = -1;
+035
 036  /**
 037   * Write this cell to an OutputStream 
in a {@link KeyValue} format.
 038   * 
KeyValue format
@@ -96,85 +96,93 @@ 088 } 089 090 /** -091 * Write this Cell into the given buf's offset in a {@link KeyValue} format. -092 * @param buf The buffer where to write the Cell. -093 * @param offset The offset within buffer, to write the Cell. -094 */ -095 default void write(ByteBuffer buf, int offset) { -096KeyValueUtil.appendTo(this, buf, offset, true); -097 } -098 -099 /** -100 * Does a deep copy of the contents to a new memory area and returns it as a new cell. -101 * @return The deep cloned cell -102 */ -103 default ExtendedCell deepClone() { -104// When being added to the memstore, deepClone() is called and KeyValue has less heap overhead. -105return new KeyValue(this); -106 } -107 -108 /** -109 * Extracts the id of the backing bytebuffer of this cell if it was obtained from fixed sized -110 * chunks as in case of MemstoreLAB -111 * @return the chunk id if the cell is backed by fixed sized Chunks, else return -1 -112 */ -113 default int getChunkId() { -114return CELL_NOT_BASED_ON_CHUNK; -115 } -116 -117 /** -118 * Sets with the given seqId. -119 * @param seqId sequence ID +091 * @return Serialized size (defaults to include tag length). +092 */ +093 default int getSerializedSize() { +094return getSerializedSize(true); +095 } +096 +097 /** +098 * Write this Cell into the given buf's offset in a {@link KeyValue} format. +099 * @param buf The buffer where to write the Cell. +100 * @param offset The offset within buffer, to write the Cell. +101 */ +102 default void write(ByteBuffer buf, int offset) { +103KeyValueUtil.appendTo(this, buf, offset, true); +104 } +105 +106 /** +107 * Does a deep copy of the contents to a new memory area and returns it as a new cell. +108 * @return The deep cloned cell +109 */ +110 default ExtendedCell deepClone() { +111// When being added to the memstore, deepClone() is called and KeyValue has less heap overhead. +112return new KeyValue(this); +113 } +114 +115 /** +116 * Extracts the id of the backing bytebuffer of this cell if it was obtained from fixed sized +117 * chunks as in case of MemstoreLAB +118 * @return the chunk id if the cell is backed by fixed sized Chunks, else return +119 * {@link #CELL_NOT_BASED_ON_CHUNK}; i.e. -1. 120 */ -121 void setSequenceId(long seqId) throws IOException; -122 -123 /** -124 * Sets with the given timestamp. -125 * @param ts timestamp -126 */ -127 void setTimestamp(long ts) throws IOException; -128 -129 /** -130 * Sets with the given timestamp. -131 * @param ts buffer containing the timestamp value -132 */ -133 void setTimestamp(byte[] ts) throws IOException; -134 -135 /** -136 * A region-specific unique monotonically increasing sequence ID given to each Cell. It always -137 * exists for cells in the memstore but is not retained forever. It will be kept for -138 * {@link HConstants#KEEP_SEQID_PERIOD} days, but generally becomes irrelevant after the cell's -139 * row is no longer involved in any operations that require strict consistency. -140 * @return seqId (always > 0 if exists), or 0 if it no longer exists -141 */ -142 long getSequenceId(); -143 -144 /** -145 * Contiguous raw bytes representing tags that may start at any index in the containing array. -146 * @return the tags byte array -147 */ -148 byte[] getTagsArray(); -149 -150 /** -151 * @return the first offset where the tags start in the Cell -152 */ -153 int getTagsOffset(); -154 -155 /** -156 * HBase internally uses 2 bytes to store tags length in Cell. As the tags length is always a -157 * non-negative number, to make good use of the sign bit, the max of tags length is defined 2 * -158 * Short.MAX_VALUE + 1 = 65535. As a result, the return type is int, because a short is not -159 * capable of handling that. Please note that even if the return type is int, the max tags length -160 * is far less than Integer.MAX_VALUE. -161 * @return the total length of the tags i

[29/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index a97dfdc..2b1b6c6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -2370,1287 +2370,1292 @@
 2362  }
 2363
 2364  @Override
-2365  public long modifyTable(final 
TableName tableName, final TableDescriptor descriptor,
+2365  public long modifyTable(final 
TableName tableName, final TableDescriptor newDescriptor,
 2366  final long nonceGroup, final long 
nonce) throws IOException {
 2367checkInitialized();
-2368
sanityCheckTableDescriptor(descriptor);
+2368
sanityCheckTableDescriptor(newDescriptor);
 2369
 2370return 
MasterProcedureUtil.submitProcedure(
 2371new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
 2372  @Override
 2373  protected void run() throws 
IOException {
-2374
getMaster().getMasterCoprocessorHost().preModifyTable(tableName, descriptor);
-2375
-2376
LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
+2374TableDescriptor oldDescriptor = 
getMaster().getTableDescriptors().get(tableName);
+2375
getMaster().getMasterCoprocessorHost()
+2376  .preModifyTable(tableName, 
oldDescriptor, newDescriptor);
 2377
-2378// Execute the operation 
synchronously - wait for the operation completes before continuing.
-2379//
-2380// We need to wait for the 
procedure to potentially fail due to "prepare" sanity
-2381// checks. This will block only 
the beginning of the procedure. See HBASE-19953.
-2382ProcedurePrepareLatch latch = 
ProcedurePrepareLatch.createBlockingLatch();
-2383submitProcedure(new 
ModifyTableProcedure(procedureExecutor.getEnvironment(),
-2384descriptor, latch));
-2385latch.await();
-2386
-2387
getMaster().getMasterCoprocessorHost().postModifyTable(tableName, 
descriptor);
-2388  }
-2389
-2390  @Override
-2391  protected String getDescription() 
{
-2392return "ModifyTableProcedure";
-2393  }
-2394});
-2395  }
-2396
-2397  public long restoreSnapshot(final 
SnapshotDescription snapshotDesc,
-2398  final long nonceGroup, final long 
nonce, final boolean restoreAcl) throws IOException {
-2399checkInitialized();
-2400
getSnapshotManager().checkSnapshotSupport();
-2401
-2402// Ensure namespace exists. Will 
throw exception if non-known NS.
-2403final TableName dstTable = 
TableName.valueOf(snapshotDesc.getTable());
-2404
getClusterSchema().getNamespace(dstTable.getNamespaceAsString());
-2405
-2406return 
MasterProcedureUtil.submitProcedure(
-2407new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
-2408  @Override
-2409  protected void run() throws 
IOException {
-2410  setProcId(
-2411
getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), 
restoreAcl));
-2412  }
-2413
-2414  @Override
-2415  protected String getDescription() 
{
-2416return 
"RestoreSnapshotProcedure";
-2417  }
-2418});
-2419  }
-2420
-2421  private void checkTableExists(final 
TableName tableName)
-2422  throws IOException, 
TableNotFoundException {
-2423if 
(!MetaTableAccessor.tableExists(getConnection(), tableName)) {
-2424  throw new 
TableNotFoundException(tableName);
-2425}
-2426  }
-2427
-2428  @Override
-2429  public void checkTableModifiable(final 
TableName tableName)
-2430  throws IOException, 
TableNotFoundException, TableNotDisabledException {
-2431if (isCatalogTable(tableName)) {
-2432  throw new IOException("Can't 
modify catalog tables");
-2433}
-2434checkTableExists(tableName);
-2435TableState ts = 
getTableStateManager().getTableState(tableName);
-2436if (!ts.isDisabled()) {
-2437  throw new 
TableNotDisabledException("Not DISABLED; " + ts);
-2438}
-2439  }
-2440
-2441  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
-2442return 
getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
-2443  }
-2444
-2445  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor(EnumSet

[29/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
index 2ab9e4d..c18869f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
@@ -31,323 +31,327 @@
 023import java.util.LinkedList;
 024import java.util.List;
 025
-026import 
org.apache.yetus.audience.InterfaceAudience;
-027import org.slf4j.Logger;
-028import org.slf4j.LoggerFactory;
-029import 
org.apache.hadoop.hbase.util.Bytes;
-030import 
org.apache.hadoop.hbase.util.ClassSize;
-031
-032/**
-033 * The compaction pipeline of a {@link 
CompactingMemStore}, is a FIFO queue of segments.
-034 * It supports pushing a segment at the 
head of the pipeline and removing a segment from the
-035 * tail when it is flushed to disk.
-036 * It also supports swap method to allow 
the in-memory compaction swap a subset of the segments
-037 * at the tail of the pipeline with a new 
(compacted) one. This swap succeeds only if the version
-038 * number passed with the list of 
segments to swap is the same as the current version of the
-039 * pipeline.
-040 * Essentially, there are two methods 
which can change the structure of the pipeline: pushHead()
-041 * and swap(), the later is used both by 
a flush to disk and by an in-memory compaction.
-042 * The pipeline version is updated by 
swap(); it allows to identify conflicting operations at the
-043 * suffix of the pipeline.
-044 *
-045 * The synchronization model is 
copy-on-write. Methods which change the structure of the
-046 * pipeline (pushHead(), 
flattenOneSegment() and swap()) apply their changes in the context of a
-047 * lock. They also make a read-only copy 
of the pipeline's list. Read methods read from a
-048 * read-only copy. If a read method 
accesses the read-only copy more than once it makes a local
-049 * copy of it to ensure it accesses the 
same copy.
-050 *
-051 * The methods getVersionedList(), 
getVersionedTail(), and flattenOneSegment() are also
-052 * protected by a lock since they need to 
have a consistent (atomic) view of the pipeline list
-053 * and version number.
-054 */
-055@InterfaceAudience.Private
-056public class CompactionPipeline {
-057  private static final Logger LOG = 
LoggerFactory.getLogger(CompactionPipeline.class);
-058
-059  public final static long FIXED_OVERHEAD 
= ClassSize
-060  .align(ClassSize.OBJECT + (3 * 
ClassSize.REFERENCE) + Bytes.SIZEOF_LONG);
-061  public final static long DEEP_OVERHEAD 
= FIXED_OVERHEAD + (2 * ClassSize.LINKEDLIST);
-062
-063  private final RegionServicesForStores 
region;
-064  private final 
LinkedList pipeline = new LinkedList<>();
-065  // The list is volatile to avoid 
reading a new allocated reference before the c'tor is executed
-066  private volatile 
LinkedList readOnlyCopy = new LinkedList<>();
-067  // Version is volatile to ensure it is 
atomically read when not using a lock
-068  private volatile long version = 0;
-069
-070  public 
CompactionPipeline(RegionServicesForStores region) {
-071this.region = region;
-072  }
-073
-074  public boolean pushHead(MutableSegment 
segment) {
-075ImmutableSegment immutableSegment = 
SegmentFactory.instance().
-076
createImmutableSegment(segment);
-077synchronized (pipeline){
-078  boolean res = 
addFirst(immutableSegment);
-079  readOnlyCopy = new 
LinkedList<>(pipeline);
-080  return res;
-081}
-082  }
-083
-084  public VersionedSegmentsList 
getVersionedList() {
-085synchronized (pipeline){
-086  return new 
VersionedSegmentsList(readOnlyCopy, version);
-087}
-088  }
-089
-090  public VersionedSegmentsList 
getVersionedTail() {
-091synchronized (pipeline){
-092  List 
segmentList = new ArrayList<>();
-093  if(!pipeline.isEmpty()) {
-094segmentList.add(0, 
pipeline.getLast());
-095  }
-096  return new 
VersionedSegmentsList(segmentList, version);
-097}
-098  }
-099
-100  /**
-101   * Swaps the versioned list at the tail 
of the pipeline with a new segment.
-102   * Swapping only if there were no 
changes to the suffix of the list since the version list was
-103   * created.
-104   * @param versionedList suffix of the 
pipeline to be replaced can be tail or all the pipeline
-105   * @param segment new segment to 
replace the suffix. Can be null if the suffix just needs to be
-106   *removed.
-107   * @param closeSuffix whether to close 
the suffix (to release memory), as part of swapping it out
-108   *During index merge op this 
will be false and for compaction it will be

[29/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
index 2510283..418c60c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
@@ -77,77 +77,77 @@
 069import 
org.apache.hadoop.hbase.client.RowMutations;
 070import 
org.apache.hadoop.hbase.client.Scan;
 071import 
org.apache.hadoop.hbase.client.Table;
-072import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import 
org.apache.hadoop.hbase.filter.Filter;
-074import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import 
org.apache.hadoop.hbase.filter.FilterList;
-076import 
org.apache.hadoop.hbase.filter.PageFilter;
-077import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import 
org.apache.hadoop.hbase.io.compress.Compression;
-080import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import 
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import 
org.apache.hadoop.hbase.regionserver.BloomType;
-084import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import 
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import 
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import 
org.apache.hadoop.hbase.trace.TraceUtil;
-088import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import 
org.apache.hadoop.hbase.util.Bytes;
-090import 
org.apache.hadoop.hbase.util.Hash;
-091import 
org.apache.hadoop.hbase.util.MurmurHash;
-092import 
org.apache.hadoop.hbase.util.Pair;
-093import 
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import 
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import 
org.apache.hadoop.mapreduce.Mapper;
-098import 
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import 
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import 
org.apache.hadoop.util.ToolRunner;
-103import 
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import 
org.apache.htrace.core.TraceScope;
-106import 
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import 
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase 
performance and scalability.  Runs a HBase
-114 * client that steps through one of a set 
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random 
writes test, etc.). Pass on the
-116 * command-line which test to run and how 
many clients are participating in
-117 * this experiment. Run {@code 
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * 

This class sets up and runs the evaluation programs described in -120 * Section 7, Performance Evaluation, of the Bigtable; -122 * paper, pages 8-10. -123 * -124 *

By default, runs as a mapreduce job where each mapper runs a single test -125 * client. Can also run as a non-mapreduce, multithreaded application by -126 * specifying {@code --nomapred}. Each client does about 1GB of data, unless -127 * specified otherwise. -128 */ -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) -130public class PerformanceEvaluation extends Configured implements Tool { -131 static final String RANDOM_SEEK_SCAN = "randomSeekScan"; -132 static final String RANDOM_READ = "randomRead"; -133 private static final Logger LOG = LoggerFactory.getLogger(PerformanceEvaluation.class.getName()); -134 private static final ObjectMapper MAPPER = new ObjectMapper(); -135 static { -136 MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true); -137 } -138 -139 public static final String TABLE_NAME = "TestTable"; -140 public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); -141 public static final byte [] COLUMN_ZERO = Bytes.toBytes("" + 0); -142 public static final byte [] QUALIFIER_NAME = COLUMN_ZERO; +072import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +073import org.apache.hadoop.hbase.filter.BinaryComparator; +074import org.apache.hadoop.hbase.filter.Filter; +075import org.apache.hadoop.hbase.filter


[29/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
index e1bc325..63e7421 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
@@ -66,5125 +66,5224 @@
 058import 
java.util.concurrent.TimeoutException;
 059import 
java.util.concurrent.atomic.AtomicBoolean;
 060import 
java.util.concurrent.atomic.AtomicInteger;
-061import org.apache.commons.io.IOUtils;
-062import 
org.apache.commons.lang3.RandomStringUtils;
-063import 
org.apache.commons.lang3.StringUtils;
-064import 
org.apache.hadoop.conf.Configuration;
-065import 
org.apache.hadoop.conf.Configured;
-066import 
org.apache.hadoop.fs.FSDataOutputStream;
-067import org.apache.hadoop.fs.FileStatus;
-068import org.apache.hadoop.fs.FileSystem;
-069import org.apache.hadoop.fs.Path;
-070import 
org.apache.hadoop.fs.permission.FsAction;
-071import 
org.apache.hadoop.fs.permission.FsPermission;
-072import 
org.apache.hadoop.hbase.Abortable;
-073import org.apache.hadoop.hbase.Cell;
-074import 
org.apache.hadoop.hbase.CellUtil;
-075import 
org.apache.hadoop.hbase.ClusterMetrics;
-076import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-077import 
org.apache.hadoop.hbase.HBaseConfiguration;
-078import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-079import 
org.apache.hadoop.hbase.HConstants;
-080import 
org.apache.hadoop.hbase.HRegionInfo;
-081import 
org.apache.hadoop.hbase.HRegionLocation;
-082import 
org.apache.hadoop.hbase.KeyValue;
-083import 
org.apache.hadoop.hbase.MasterNotRunningException;
-084import 
org.apache.hadoop.hbase.MetaTableAccessor;
-085import 
org.apache.hadoop.hbase.RegionLocations;
-086import 
org.apache.hadoop.hbase.ServerName;
-087import 
org.apache.hadoop.hbase.TableName;
-088import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-089import 
org.apache.hadoop.hbase.client.Admin;
-090import 
org.apache.hadoop.hbase.client.ClusterConnection;
-091import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-092import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-093import 
org.apache.hadoop.hbase.client.Connection;
-094import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-095import 
org.apache.hadoop.hbase.client.Delete;
-096import 
org.apache.hadoop.hbase.client.Get;
-097import 
org.apache.hadoop.hbase.client.Put;
-098import 
org.apache.hadoop.hbase.client.RegionInfo;
-099import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-100import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-101import 
org.apache.hadoop.hbase.client.Result;
-102import 
org.apache.hadoop.hbase.client.RowMutations;
-103import 
org.apache.hadoop.hbase.client.Table;
-104import 
org.apache.hadoop.hbase.client.TableDescriptor;
-105import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-106import 
org.apache.hadoop.hbase.client.TableState;
-107import 
org.apache.hadoop.hbase.io.FileLink;
-108import 
org.apache.hadoop.hbase.io.HFileLink;
-109import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-110import 
org.apache.hadoop.hbase.io.hfile.HFile;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-113import 
org.apache.hadoop.hbase.master.RegionState;
-114import 
org.apache.hadoop.hbase.regionserver.HRegion;
-115import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-116import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-117import 
org.apache.hadoop.hbase.replication.ReplicationException;
-118import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-119import 
org.apache.hadoop.hbase.security.UserProvider;
-120import 
org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-121import 
org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
-122import 
org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
-123import 
org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
-124import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
-125import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
-126import org.apache.hadoop.hbase.wal.WAL;
-127import 
org.apache.hadoop.hbase.wal.WALFactory;
-128import 
org.apache.hadoop.hbase.wal.WALSplitter;
-129import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-130import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-131import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-132import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-133import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-134import 
org.apache.hadoop.ipc.RemoteException;
-135import 
org.apache.hadoop.security.UserGroupInformation;
-136import 
org.apache.had

[29/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
index e63cd50..d8c0d2b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
@@ -422,7 +422,7 @@
 414  }
 415
 416  /**
-417   * {@link #listTables(boolean)}
+417   * {@link 
#listTableDescriptors(boolean)}
 418   */
 419  @Override
 420  public 
CompletableFuture> 
listTableDescriptors(Pattern pattern,
@@ -3476,16 +3476,79 @@
 3468return future;
 3469  }
 3470
-3471  private 
CompletableFuture clearBlockCache(ServerName 
serverName,
-3472  List hris) {
-3473return 
this. newAdminCaller().action((controller, stub) 
-> this
-3474  . adminCall(
-3475controller, stub, 
RequestConverter.buildClearRegionBlockCacheRequest(hris),
-3476(s, c, req, done) -> 
s.clearRegionBlockCache(controller, req, done),
-3477resp -> 
ProtobufUtil.toCacheEvictionStats(resp.getStats(
-3478  .serverName(serverName).call();
-3479  }
-3480}
+3471  @Override
+3472  public CompletableFuture 
cloneTableSchema(TableName tableName, TableName newTableName,
+3473  boolean preserveSplits) {
+3474CompletableFuture future 
= new CompletableFuture<>();
+3475
tableExists(tableName).whenComplete(
+3476  (exist, err) -> {
+3477if (err != null) {
+3478  
future.completeExceptionally(err);
+3479  return;
+3480}
+3481if (!exist) {
+3482  
future.completeExceptionally(new TableNotFoundException(tableName));
+3483  return;
+3484}
+3485
tableExists(newTableName).whenComplete(
+3486  (exist1, err1) -> {
+3487if (err1 != null) {
+3488  
future.completeExceptionally(err1);
+3489  return;
+3490}
+3491if (exist1) {
+3492  
future.completeExceptionally(new TableExistsException(newTableName));
+3493  return;
+3494}
+3495
getDescriptor(tableName).whenComplete(
+3496  (tableDesc, err2) -> 
{
+3497if (err2 != null) {
+3498  
future.completeExceptionally(err2);
+3499  return;
+3500}
+3501TableDescriptor 
newTableDesc
+3502= 
TableDescriptorBuilder.copy(newTableName, tableDesc);
+3503if (preserveSplits) {
+3504  
getTableSplits(tableName).whenComplete((splits, err3) -> {
+3505if (err3 != null) 
{
+3506  
future.completeExceptionally(err3);
+3507} else {
+3508  
createTable(newTableDesc, splits).whenComplete(
+3509(result, err4) 
-> {
+3510  if (err4 != 
null) {
+3511
future.completeExceptionally(err4);
+3512  } else {
+3513
future.complete(result);
+3514  }
+3515});
+3516}
+3517  });
+3518} else {
+3519  
createTable(newTableDesc).whenComplete(
+3520(result, err5) -> 
{
+3521  if (err5 != null) 
{
+3522
future.completeExceptionally(err5);
+3523  } else {
+3524
future.complete(result);
+3525  }
+3526});
+3527}
+3528  });
+3529  });
+3530  });
+3531return future;
+3532  }
+3533
+3534  private 
CompletableFuture clearBlockCache(ServerName 
serverName,
+3535  List hris) {
+3536return 
this. newAdminCaller().action((controller, stub) 
-> this
+3537  . adminCall(
+3538controller, stub, 
RequestConverter.buildClearRegionBlockCacheRequest(hris),
+3539(s, c, req, done) -> 
s.clearRegionBlockCache(controller, req, done),
+3540resp -> 
ProtobufUtil.toCacheEvictionStats(resp.getStats(
+3541  .serverName(serverName).call();
+3542  }
+3543}
 
 
 

http://git-wip-us.apache.org/repos/as

[29/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/org/apache/hadoop/hbase/snapshot/CreateSnapshot.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/snapshot/CreateSnapshot.html 
b/devapidocs/org/apache/hadoop/hbase/snapshot/CreateSnapshot.html
index 65ae57d..4039f9d 100644
--- a/devapidocs/org/apache/hadoop/hbase/snapshot/CreateSnapshot.html
+++ b/devapidocs/org/apache/hadoop/hbase/snapshot/CreateSnapshot.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class CreateSnapshot
+public class CreateSnapshot
 extends AbstractHBaseTool
 This is a command line class that will snapshot a given 
table.
 
@@ -210,7 +210,7 @@ extends 
 protected void
-processOptions(org.apache.commons.cli.CommandLine cmd)
+processOptions(org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine cmd)
 This method is called to process the options after they 
have been parsed.
 
 
@@ -220,7 +220,7 @@ extends AbstractHBaseTool
-addOption,
 addOptNoArg,
 addOptNoArg,
 addOptWithArg,
 addOptWithArg,
 addRequiredOption,
 addRequiredOptWithArg, addRequiredOptWithArg,
 doStaticMain,
 getConf,
 getOptionAsDouble,
 getOptionAsInt,
 getOptionAsLong,
 parseArgs,
 parseInt,
 parseLong,
 printUsage,
 printUsage,
 processOldArgs,
 run,
 setConf
 
+addOption,
 addOptNoArg,
 addOptNoArg,
 addOptWithArg,
 addOptWithArg,
 addRequiredOption,
 addRequiredOptWithArg,
 addRequiredOptWithArg,
 doStaticMain,
 getConf,
 getOptionAsDouble,
 getOptionAsInt,
 getOptionAsLong,
 parseArgs,
 parseInt,
 parseLong,
 printUsage,
 printUsage,
 processOldArgs,
 run
 , setConf
 
 
 
@@ -249,7 +249,7 @@ extends 
 
 snapshotType
-private SnapshotType snapshotType
+private SnapshotType snapshotType
 
 
 
@@ -258,7 +258,7 @@ extends 
 
 tableName
-private TableName tableName
+private TableName tableName
 
 
 
@@ -267,7 +267,7 @@ extends 
 
 snapshotName
-private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String snapshotName
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String snapshotName
 
 
 
@@ -284,7 +284,7 @@ extends 
 
 CreateSnapshot
-public CreateSnapshot()
+public CreateSnapshot()
 
 
 
@@ -301,7 +301,7 @@ extends 
 
 main
-public static void main(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
+public static void main(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
 
 
 
@@ -310,7 +310,7 @@ extends 
 
 addOptions
-protected void addOptions()
+protected void addOptions()
 Description copied from 
class: AbstractHBaseTool
 Override this to add command-line options using AbstractHBaseTool.addOptWithArg(java.lang.String,
 java.lang.String)
  and similar methods.
@@ -320,18 +320,18 @@ extends 
+
 
 
 
 
 processOptions
-protected void processOptions(org.apache.commons.cli.CommandLine cmd)
-Description copied from 
class: AbstractHBaseTool
+protected void processOptions(org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine cmd)
+Description copied from 
class: AbstractHBaseTool
 This method is called to process the options after they 
have been parsed.
 
 Specified by:
-processOptions in
 class AbstractHBaseTool
+processOptions in
 class AbstractHBaseTool
 
 
 
@@ -341,7 +341,7 @@ extends 
 
 doWork
-protected int doWork()
+protected int doWork()
   throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 Description copied from 
class: AbstractHBaseTool
 The "main function" of the tool

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Options.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Options.html 
b/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Options.html
index a7b6b6b..3ab7f5d 100644
--- a/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Options.html
+++ b/devapidocs/org/apache/hadoop/hbase/snapshot/ExportSnapshot.Options.html
@@ -128,51 +128,51 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 Field and Description
 
 
-(package private) static 
org.apache.commons.cli.Option
+(package private) static 
org.apache.hbase.thirdparty.org.apache.commons.cli.Option
 BANDWIDTH 
 
 
-(package private) static 
org.apache.commons.cli.Option
+(package private) static 
org.apache.hbase.thirdparty.org.apache.commons.cli.Option
 CHGROUP 
 
 
-(package private) static 
org.apache.commons.cli.Option
+(package private) static 
org.apache.hbase.thirdparty

[29/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
index 935839d..64dfea4 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
@@ -67,7 +67,7 @@
 059 * To only retrieve columns within a 
specific range of version timestamps, call
 060 * {@link #setTimeRange(long, long) 
setTimeRange}.
 061 * 

-062 * To only retrieve columns with a specific timestamp, call {@link #setTimeStamp(long) setTimestamp} +062 * To only retrieve columns with a specific timestamp, call {@link #setTimestamp(long) setTimestamp} 063 * . 064 *

065 * To limit the number of versions of each column to be returned, call {@link #setMaxVersions(int) @@ -149,7 +149,7 @@ 141 private long maxResultSize = -1; 142 private boolean cacheBlocks = true; 143 private boolean reversed = false; -144 private TimeRange tr = new TimeRange(); +144 private TimeRange tr = TimeRange.allTime(); 145 private Map> familyMap = 146new TreeMap>(Bytes.BYTES_COMPARATOR); 147 private Boolean asyncPrefetch = null; @@ -384,869 +384,887 @@ 376 * @see #setMaxVersions() 377 * @see #setMaxVersions(int) 378 * @return this -379 */ -380 public Scan setTimeStamp(long timestamp) -381 throws IOException { -382try { -383 tr = new TimeRange(timestamp, timestamp+1); -384} catch(Exception e) { -385 // This should never happen, unless integer overflow or something extremely wrong... -386 LOG.error("TimeRange failed, likely caused by integer overflow. ", e); -387 throw e; -388} -389return this; -390 } -391 -392 @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { -393return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); -394 } -395 -396 /** -397 * Set the start row of the scan. -398 *

-399 * If the specified row does not exist, the Scanner will start from the next closest row after the -400 * specified row. -401 * @param startRow row to start scanner at or after -402 * @return this -403 * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length -404 * exceeds {@link HConstants#MAX_ROW_LENGTH}) -405 * @deprecated use {@link #withStartRow(byte[])} instead. This method may change the inclusive of -406 * the stop row to keep compatible with the old behavior. -407 */ -408 @Deprecated -409 public Scan setStartRow(byte[] startRow) { -410withStartRow(startRow); -411if (ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow)) { -412 // for keeping the old behavior that a scan with the same start and stop row is a get scan. -413 this.includeStopRow = true; -414} -415return this; -416 } -417 -418 /** -419 * Set the start row of the scan. -420 *

-421 * If the specified row does not exist, the Scanner will start from the next closest row after the -422 * specified row. -423 * @param startRow row to start scanner at or after -424 * @return this -425 * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length -426 * exceeds {@link HConstants#MAX_ROW_LENGTH}) -427 */ -428 public Scan withStartRow(byte[] startRow) { -429return withStartRow(startRow, true); -430 } -431 -432 /** -433 * Set the start row of the scan. -434 *

-435 * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner -436 * will start from the next closest row after the specified row. -437 * @param startRow row to start scanner at or after -438 * @param inclusive whether we should include the start row when scan -439 * @return this -440 * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length -441 * exceeds {@link HConstants#MAX_ROW_LENGTH}) -442 */ -443 public Scan withStartRow(byte[] startRow, boolean inclusive) { -444if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) { -445 throw new IllegalArgumentException("startRow's length must be less than or equal to " -446 + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key."); -447} -448this.startRow = startRow; -449this.includeStartRow = inclusive; -450return this; -451 } -452 -453 /** -454 * Set the stop row of the scan. -455 *

-456 * The scan will include rows that are lexicographically less than the provided stopRow. -457 *

-458 * Note: When doing a filter for a rowKey


[29/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
index d4d6c7f..c36dd6e 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
@@ -239,15 +239,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ServerName
-HRegionLocation.serverName 
+ServerMetricsBuilder.serverName 
 
 
 private ServerName
-ServerMetricsBuilder.serverName 
+ServerMetricsBuilder.ServerMetricsImpl.serverName 
 
 
 private ServerName
-ServerMetricsBuilder.ServerMetricsImpl.serverName 
+HRegionLocation.serverName 
 
 
 
@@ -306,9 +306,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ServerName
-ClusterMetrics.getMasterName()
-Returns detailed information about the current master ServerName.
-
+ClusterMetricsBuilder.ClusterMetricsImpl.getMasterName() 
 
 
 ServerName
@@ -318,11 +316,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ServerName
-ClusterMetricsBuilder.ClusterMetricsImpl.getMasterName() 
+ClusterMetrics.getMasterName()
+Returns detailed information about the current master ServerName.
+
 
 
 ServerName
-HRegionLocation.getServerName() 
+ServerLoad.getServerName()
+Deprecated. 
+ 
 
 
 ServerName
@@ -330,13 +332,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ServerName
-ServerLoad.getServerName()
-Deprecated. 
- 
+ServerMetricsBuilder.ServerMetricsImpl.getServerName() 
 
 
 ServerName
-ServerMetricsBuilder.ServerMetricsImpl.getServerName() 
+HRegionLocation.getServerName() 
 
 
 ServerName
@@ -405,7 +405,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-ClusterMetrics.getBackupMasterNames() 
+ClusterMetricsBuilder.ClusterMetricsImpl.getBackupMasterNames() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
@@ -415,7 +415,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-ClusterMetricsBuilder.ClusterMetricsImpl.getBackupMasterNames() 
+ClusterMetrics.getBackupMasterNames() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
@@ -428,7 +428,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-ClusterMetrics.getDeadServerNames() 
+ClusterMetricsBuilder.ClusterMetricsImpl.getDeadServerNames() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
@@ -438,7 +438,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-ClusterMetricsBuilder.ClusterMetricsImpl.getDeadServerNames() 
+ClusterMetrics.getDeadServerNames() 
 
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
@@ -448,7 +448,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
-ClusterMetrics.getLiveServerMetrics() 
+ClusterMetricsBuilder.ClusterMetricsImpl.getLiveServerMetrics() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
@@ -458,7 +458,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
-ClusterMetricsBuilder.ClusterMetricsImpl.getLiveServerMetrics() 
+ClusterMetrics.getLiveServerMetrics() 
 
 
 static Pair
@@ -857,31 +857,31 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ServerName
-AsyncRequestFutureImpl.SingleServerRequestRunnable.server 
+FastFailInterceptorContext.ser

[29/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/class-use/MasterNotRunningException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/MasterNotRunningException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/MasterNotRunningException.html
index e959408..cabc286 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/class-use/MasterNotRunningException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/class-use/MasterNotRunningException.html
@@ -128,7 +128,7 @@
 
 
 boolean
-ConnectionImplementation.isMasterRunning()
+ClusterConnection.isMasterRunning()
 Deprecated. 
 this has been deprecated 
without a replacement
 
@@ -136,7 +136,7 @@
 
 
 boolean
-ClusterConnection.isMasterRunning()
+ConnectionImplementation.isMasterRunning()
 Deprecated. 
 this has been deprecated 
without a replacement
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/class-use/NamespaceDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/NamespaceDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/NamespaceDescriptor.html
index bba209a..26611ed 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/NamespaceDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/NamespaceDescriptor.html
@@ -270,32 +270,32 @@
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncHBaseAdmin.getNamespaceDescriptor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name) 
-
-
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
 AsyncAdmin.getNamespaceDescriptor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Get a namespace descriptor by name
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
 RawAsyncHBaseAdmin.getNamespaceDescriptor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name) 
 
-
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
-AsyncHBaseAdmin.listNamespaceDescriptors() 
-
 
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
+AsyncHBaseAdmin.getNamespaceDescriptor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name) 
+
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
 AsyncAdmin.listNamespaceDescriptors()
 List available namespace descriptors
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
 RawAsyncHBaseAdmin.listNamespaceDescriptors() 
 
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
+AsyncHBaseAdmin.listNamespaceDescriptors() 
+
 
 
 
@@ -307,7 +307,9 @@
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-AsyncHBaseAdmin.createNamespace(NamespaceDescriptor descriptor) 
+AsyncAd

[29/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
index ecf500c..0cd5a4e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
@@ -238,8355 +238,8368 @@
 230  public static final String 
HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
 231  public static final int 
DEFAULT_MAX_CELL_SIZE = 10485760;
 232
-233  public static final String 
HBASE_REGIONSERVER_MINIBATCH_SIZE =
-234  
"hbase.regionserver.minibatch.size";
-235  public static final int 
DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
-236
-237  /**
-238   * This is the global default value for 
durability. All tables/mutations not
-239   * defining a durability or using 
USE_DEFAULT will default to this value.
-240   */
-241  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
+233  /**
+234   * This is the global default value for 
durability. All tables/mutations not
+235   * defining a durability or using 
USE_DEFAULT will default to this value.
+236   */
+237  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
+238
+239  public static final String 
HBASE_REGIONSERVER_MINIBATCH_SIZE =
+240  
"hbase.regionserver.minibatch.size";
+241  public static final int 
DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
 242
-243  final AtomicBoolean closed = new 
AtomicBoolean(false);
-244
-245  /* Closing can take some time; use the 
closing flag if there is stuff we don't
-246   * want to do while in closing state; 
e.g. like offer this region up to the
-247   * master as a region to close if the 
carrying regionserver is overloaded.
-248   * Once set, it is never cleared.
-249   */
-250  final AtomicBoolean closing = new 
AtomicBoolean(false);
-251
-252  /**
-253   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
-254   * less that this sequence id.
-255   */
-256  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
-257
-258  /**
-259   * Record the sequence id of last flush 
operation. Can be in advance of
-260   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
-261   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
-262   */
-263  private volatile long lastFlushOpSeqId 
= HConstants.NO_SEQNUM;
-264
-265  /**
-266   * The sequence id of the last replayed 
open region event from the primary region. This is used
-267   * to skip entries before this due to 
the possibility of replay edits coming out of order from
-268   * replication.
-269   */
-270  protected volatile long 
lastReplayedOpenRegionSeqId = -1L;
-271  protected volatile long 
lastReplayedCompactionSeqId = -1L;
-272
-273  
//
-274  // Members
-275  
//
-276
-277  // map from a locked row to the context 
for that lock including:
-278  // - CountDownLatch for threads waiting 
on that row
-279  // - the thread that owns the lock 
(allow reentrancy)
-280  // - reference count of (reentrant) 
locks held by the thread
-281  // - the row itself
-282  private final 
ConcurrentHashMap lockedRows =
-283  new ConcurrentHashMap<>();
-284
-285  protected final Map stores =
-286  new 
ConcurrentSkipListMap<>(Bytes.BYTES_RAWCOMPARATOR);
+243  public static final String 
WAL_HSYNC_CONF_KEY = "hbase.wal.hsync";
+244  public static final boolean 
DEFAULT_WAL_HSYNC = false;
+245
+246  final AtomicBoolean closed = new 
AtomicBoolean(false);
+247
+248  /* Closing can take some time; use the 
closing flag if there is stuff we don't
+249   * want to do while in closing state; 
e.g. like offer this region up to the
+250   * master as a region to close if the 
carrying regionserver is overloaded.
+251   * Once set, it is never cleared.
+252   */
+253  final AtomicBoolean closing = new 
AtomicBoolean(false);
+254
+255  /**
+256   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
+257   * less that this sequence id.
+258   */
+259  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
+260
+261  /**
+262   * Record the sequence id of last flush 
operation. Can be in advance of
+263   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
+264   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
+265   */
+266  private volatile long lastFlushOpSeqId 
= HConstants.NO_SEQNUM;
+2

[29/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html
index f47d627..c3d225c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html
@@ -117,219 +117,219 @@
 109   */
 110  public static boolean 
archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
 111  throws IOException {
-112if (LOG.isDebugEnabled()) {
-113  LOG.debug("ARCHIVING " + 
regionDir.toString());
-114}
-115
-116// otherwise, we archive the files
-117// make sure we can archive
-118if (tableDir == null || regionDir == 
null) {
-119  LOG.error("No archive directory 
could be found because tabledir (" + tableDir
-120  + ") or regiondir (" + 
regionDir + "was null. Deleting files instead.");
-121  deleteRegionWithoutArchiving(fs, 
regionDir);
-122  // we should have archived, but 
failed to. Doesn't matter if we deleted
-123  // the archived files correctly or 
not.
-124  return false;
-125}
-126
-127// make sure the regiondir lives 
under the tabledir
-128
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
-129Path regionArchiveDir = 
HFileArchiveUtil.getRegionArchiveDir(rootdir,
-130FSUtils.getTableName(tableDir),
-131regionDir.getName());
-132
-133FileStatusConverter getAsFile = new 
FileStatusConverter(fs);
-134// otherwise, we attempt to archive 
the store files
-135
-136// build collection of just the store 
directories to archive
-137Collection toArchive = 
new ArrayList<>();
-138final PathFilter dirFilter = new 
FSUtils.DirFilter(fs);
-139PathFilter nonHidden = new 
PathFilter() {
-140  @Override
-141  public boolean accept(Path file) 
{
-142return dirFilter.accept(file) 
&& !file.getName().toString().startsWith(".");
-143  }
-144};
-145FileStatus[] storeDirs = 
FSUtils.listStatus(fs, regionDir, nonHidden);
-146// if there no files, we can just 
delete the directory and return;
-147if (storeDirs == null) {
-148  LOG.debug("Region directory " + 
regionDir + " empty.");
-149  return 
deleteRegionWithoutArchiving(fs, regionDir);
-150}
-151
-152// convert the files in the region to 
a File
-153
toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
-154LOG.debug("Archiving " + 
toArchive);
-155List failedArchive = 
resolveAndArchive(fs, regionArchiveDir, toArchive,
-156
EnvironmentEdgeManager.currentTime());
-157if (!failedArchive.isEmpty()) {
-158  throw new 
FailedArchiveException("Failed to archive/delete all the files for region:"
-159  + regionDir.getName() + " into 
" + regionArchiveDir
-160  + ". Something is probably awry 
on the filesystem.",
-161  
Collections2.transform(failedArchive, FUNC_FILE_TO_PATH));
-162}
-163// if that was successful, then we 
delete the region
-164return 
deleteRegionWithoutArchiving(fs, regionDir);
-165  }
-166
-167  /**
-168   * Remove from the specified region the 
store files of the specified column family,
-169   * either by archiving them or outright 
deletion
-170   * @param fs the filesystem where the 
store files live
-171   * @param conf {@link Configuration} to 
examine to determine the archive directory
-172   * @param parent Parent region hosting 
the store files
-173   * @param tableDir {@link Path} to 
where the table is being stored (for building the archive path)
-174   * @param family the family hosting the 
store files
-175   * @throws IOException if the files 
could not be correctly disposed.
-176   */
-177  public static void 
archiveFamily(FileSystem fs, Configuration conf,
-178  RegionInfo parent, Path tableDir, 
byte[] family) throws IOException {
-179Path familyDir = new Path(tableDir, 
new Path(parent.getEncodedName(), Bytes.toString(family)));
-180archiveFamilyByFamilyDir(fs, conf, 
parent, familyDir, family);
-181  }
-182
-183  /**
-184   * Removes from the specified region 
the store files of the specified column family,
-185   * either by archiving them or outright 
deletion
-186   * @param fs the filesystem where the 
store files live
-187   * @param conf {@link Configuration} to 
examine to determine the archive directory
-188   * @param parent Parent region hosting 
the store files
-189   * @param familyDir {@link Path} to 
where the family is being stored
-190   * @param family the family hosting the 
store files
-191   * @throws IOException if the files 
could not be correctly disposed.

[29/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptor.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptor.html
index 18597dd..d5a3666 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ColumnFamilyDescriptor.html
@@ -319,16 +319,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-TableDescriptorBuilder
-TableDescriptorBuilder.addColumnFamily(ColumnFamilyDescriptor family) 
-
-
-TableDescriptorBuilder.ModifyableTableDescriptor
-TableDescriptorBuilder.ModifyableTableDescriptor.addColumnFamily(ColumnFamilyDescriptor family)
-Adds a column family.
-
-
-
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
 AsyncHBaseAdmin.addColumnFamily(TableName tableName,
ColumnFamilyDescriptor columnFamily) 
@@ -444,6 +434,16 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 TableDescriptorBuilder.ModifyableTableDescriptor.putColumnFamily(ColumnFamilyDescriptor family) 
 
 
+TableDescriptorBuilder
+TableDescriptorBuilder.setColumnFamily(ColumnFamilyDescriptor family) 
+
+
+TableDescriptorBuilder.ModifyableTableDescriptor
+TableDescriptorBuilder.ModifyableTableDescriptor.setColumnFamily(ColumnFamilyDescriptor family)
+Adds a column family.
+
+
+
 static byte[]
 ColumnFamilyDescriptorBuilder.toByteArray(ColumnFamilyDescriptor desc) 
 
@@ -466,6 +466,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 static https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true";
 title="class or interface in java.util">Comparator
 TableDescriptor.getComparator(https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true";
 title="class or interface in java.util">Comparator cfComparator) 
 
+
+TableDescriptorBuilder
+TableDescriptorBuilder.setColumnFamilies(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection families) 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/org/apache/hadoop/hbase/client/class-use/CoprocessorDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/CoprocessorDescriptor.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/CoprocessorDescriptor.html
new file mode 100644
index 000..e46c0f2
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/CoprocessorDescriptor.html
@@ -0,0 +1,266 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Interface org.apache.hadoop.hbase.client.CoprocessorDescriptor 
(Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Interfaceorg.apache.hadoop.hbase.client.CoprocessorDescriptor
+
+
+
+
+
+Packages that use CoprocessorDescriptor 
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase
+ 
+
+
+org.apache.hadoop.hbase.client
+
+Provides HBase Client
+
+
+
+
+
+
+
+
+
+
+Uses of CoprocessorDescriptor 
in org.apache.hadoop.hbase
+
+Methods in org.apache.hadoop.hbase
 that return types with arguments of type CoprocessorDescriptor 
+
+Modifier and Type
+Method and Description
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
+HTableDescriptor.getCoprocessorDescriptors()
+Deprecated. 
+ 
+
+
+
+
+
+
+
+Uses of CoprocessorDescriptor 
in org.apache.hadoop.hbase.client
+
+Classes in org.apache.hadoop.hbase.client
 that implement CoprocessorDescriptor 
+
+Modifier and Type
+Cla

[29/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index 61b92e6..a6a4db7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class AssignmentManager.RegionInTransitionStat
+public static class AssignmentManager.RegionInTransitionStat
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -266,7 +266,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ritThreshold
-private final int ritThreshold
+private final int ritThreshold
 
 
 
@@ -275,7 +275,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ritsOverThreshold
-private https://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true";
 title="class or interface in java.util">HashMapString,RegionState> ritsOverThreshold
+private https://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true";
 title="class or interface in java.util">HashMapString,RegionState> ritsOverThreshold
 
 
 
@@ -284,7 +284,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 statTimestamp
-private long statTimestamp
+private long statTimestamp
 
 
 
@@ -293,7 +293,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 oldestRITTime
-private long oldestRITTime
+private long oldestRITTime
 
 
 
@@ -302,7 +302,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 totalRITsTwiceThreshold
-private int totalRITsTwiceThreshold
+private int totalRITsTwiceThreshold
 
 
 
@@ -311,7 +311,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 totalRITs
-private int totalRITs
+private int totalRITs
 
 
 
@@ -328,7 +328,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 RegionInTransitionStat
-public RegionInTransitionStat(org.apache.hadoop.conf.Configuration conf)
+public RegionInTransitionStat(org.apache.hadoop.conf.Configuration conf)
 
 
 
@@ -345,7 +345,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getRITThreshold
-public int getRITThreshold()
+public int getRITThreshold()
 
 
 
@@ -354,7 +354,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getTimestamp
-public long getTimestamp()
+public long getTimestamp()
 
 
 
@@ -363,7 +363,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getTotalRITs
-public int getTotalRITs()
+public int getTotalRITs()
 
 
 
@@ -372,7 +372,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getOldestRITTime
-public long getOldestRITTime()
+public long getOldestRITTime()
 
 
 
@@ -381,7 +381,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getTotalRITsOverThreshold
-public int getTotalRITsOverThreshold()
+public int getTotalRITsOverThreshold()
 
 
 
@@ -390,7 +390,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 hasRegionsTwiceOverThreshold
-public boolean hasRegionsTwiceOverThreshold()
+public boolean hasRegionsTwiceOverThreshold()
 
 
 
@@ -399,7 +399,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 hasRegionsOverThreshold
-public boolean hasRegionsOverThreshold()
+public boolean hasRegionsOverThreshold()
 
 
 
@@ -408,7 +408,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getRegionOverThreshold
-public https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection getRegionOverThreshold()
+public https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection getRegionOverThreshold()
 
 
 
@@ -417,7 +417,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isRegionOverThreshold
-public boolean isRegionOverThreshold(RegionInfo regionInfo)
+public boolean isRegionOverThreshold(RegionInfo regionInf

[29/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index bba78a1..490e321 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -2052,119 +2052,119 @@ service.
 
 
 private TableName
-SnapshotDescription.table 
+RegionCoprocessorRpcChannel.table 
 
 
 private TableName
-RegionCoprocessorRpcChannel.table 
+SnapshotDescription.table 
 
 
 private TableName
-RawAsyncTableImpl.tableName 
+HRegionLocator.tableName 
 
 
 private TableName
-RegionServerCallable.tableName 
+ScannerCallableWithReplicas.tableName 
 
 
 protected TableName
-RegionAdminServiceCallable.tableName 
+ClientScanner.tableName 
 
 
 private TableName
-BufferedMutatorImpl.tableName 
+AsyncClientScanner.tableName 
 
 
 private TableName
-AsyncProcessTask.tableName 
+AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.tableName 
 
 
 private TableName
-AsyncProcessTask.Builder.tableName 
+AsyncRpcRetryingCallerFactory.BatchCallerBuilder.tableName 
 
 
 private TableName
-AsyncRequestFutureImpl.tableName 
+RegionInfoBuilder.tableName 
 
 
-protected TableName
-TableBuilderBase.tableName 
+private TableName
+RegionInfoBuilder.MutableRegionInfo.tableName 
 
 
 private TableName
-AsyncBatchRpcRetryingCaller.tableName 
+RawAsyncTableImpl.tableName 
 
 
 private TableName
-RegionInfoBuilder.tableName 
+RegionCoprocessorRpcChannelImpl.tableName 
 
 
 private TableName
-RegionInfoBuilder.MutableRegionInfo.tableName 
+AsyncTableRegionLocatorImpl.tableName 
 
 
-private TableName
-HTable.tableName 
+protected TableName
+RegionAdminServiceCallable.tableName 
 
 
 private TableName
-TableState.tableName 
+HTable.tableName 
 
 
-protected TableName
-RpcRetryingCallerWithReadReplicas.tableName 
+private TableName
+BufferedMutatorImpl.tableName 
 
 
-protected TableName
-AsyncTableBuilderBase.tableName 
+private TableName
+AsyncBatchRpcRetryingCaller.tableName 
 
 
 private TableName
-AsyncSingleRequestRpcRetryingCaller.tableName 
+BufferedMutatorParams.tableName 
 
 
 private TableName
-ScannerCallableWithReplicas.tableName 
+HBaseAdmin.TableFuture.tableName 
 
 
-protected TableName
-RawAsyncHBaseAdmin.TableProcedureBiConsumer.tableName 
+private TableName
+AsyncRequestFutureImpl.tableName 
 
 
 private TableName
-AsyncTableRegionLocatorImpl.tableName 
+AsyncProcessTask.tableName 
 
 
 private TableName
-HBaseAdmin.TableFuture.tableName 
+AsyncProcessTask.Builder.tableName 
 
 
-private TableName
-RegionCoprocessorRpcChannelImpl.tableName 
+protected TableName
+RawAsyncHBaseAdmin.TableProcedureBiConsumer.tableName 
 
 
-protected TableName
-ClientScanner.tableName 
+private TableName
+RegionServerCallable.tableName 
 
 
 private TableName
-BufferedMutatorParams.tableName 
+AsyncSingleRequestRpcRetryingCaller.tableName 
 
 
-private TableName
-AsyncClientScanner.tableName 
+protected TableName
+TableBuilderBase.tableName 
 
 
-private TableName
-AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.tableName 
+protected TableName
+RpcRetryingCallerWithReadReplicas.tableName 
 
 
-private TableName
-AsyncRpcRetryingCallerFactory.BatchCallerBuilder.tableName 
+protected TableName
+AsyncTableBuilderBase.tableName 
 
 
 private TableName
-HRegionLocator.tableName 
+TableState.tableName 
 
 
 
@@ -2206,83 +2206,83 @@ service.
 
 
 TableName
-RawAsyncTableImpl.getName() 
+AsyncTable.getName()
+Gets the fully qualified table name instance of this 
table.
+
 
 
 TableName
-RegionLocator.getName()
+Table.getName()
 Gets the fully qualified table name instance of this 
table.
 
 
 
 TableName
-BufferedMutatorImpl.getName() 
+HRegionLocator.getName() 
 
 
 TableName
-BufferedMutator.getName()
-Gets the fully qualified table name instance of the table 
that this BufferedMutator writes to.
+AsyncTableRegionLocator.getName()
+Gets the fully qualified table name instance of the table 
whose region we want to locate.
 
 
 
 TableName
-HTable.getName() 
+AsyncTableImpl.getName() 
 
 
 TableName
-AsyncBufferedMutator.getName()
-Gets the fully qualified table name instance of the table 
that this
- AsyncBufferedMutator writes to.
-
+RawAsyncTableImpl.getName() 
 
 
 TableName
-Table.getName()
-Gets the fully qualified table name instance of this 
table.
-
+AsyncTableRegionLocatorImpl.getName() 
 
 
 TableName
-AsyncTableImpl.getName() 
+BufferedMutator.getName()
+Gets the fully qualified table name instance of the table 
that this BufferedMutator writes to.
+
 
 
 TableName
-AsyncTable.getName()
+RegionLocator.getName()
 Gets the fully qualified table name instance of this 
table.
 
 
 
 TableName
-AsyncTableRegionLocatorImpl.getName() 
+AsyncBufferedMutatorImpl.getName() 
 
 
 TableName
-AsyncTableRegionLoc

[29/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
index 57ce04e..44ca7f5 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
@@ -239,15 +239,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ServerName
-ServerMetricsBuilder.serverName 
+HRegionLocation.serverName 
 
 
 private ServerName
-ServerMetricsBuilder.ServerMetricsImpl.serverName 
+ServerMetricsBuilder.serverName 
 
 
 private ServerName
-HRegionLocation.serverName 
+ServerMetricsBuilder.ServerMetricsImpl.serverName 
 
 
 
@@ -306,7 +306,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ServerName
-ClusterMetricsBuilder.ClusterMetricsImpl.getMasterName() 
+ClusterMetrics.getMasterName()
+Returns detailed information about the current master ServerName.
+
 
 
 ServerName
@@ -316,15 +318,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ServerName
-ClusterMetrics.getMasterName()
-Returns detailed information about the current master ServerName.
-
+ClusterMetricsBuilder.ClusterMetricsImpl.getMasterName() 
 
 
 ServerName
-ServerLoad.getServerName()
-Deprecated. 
- 
+HRegionLocation.getServerName() 
 
 
 ServerName
@@ -332,11 +330,13 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ServerName
-ServerMetricsBuilder.ServerMetricsImpl.getServerName() 
+ServerLoad.getServerName()
+Deprecated. 
+ 
 
 
 ServerName
-HRegionLocation.getServerName() 
+ServerMetricsBuilder.ServerMetricsImpl.getServerName() 
 
 
 ServerName
@@ -405,7 +405,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-ClusterMetricsBuilder.ClusterMetricsImpl.getBackupMasterNames() 
+ClusterMetrics.getBackupMasterNames() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
@@ -415,7 +415,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-ClusterMetrics.getBackupMasterNames() 
+ClusterMetricsBuilder.ClusterMetricsImpl.getBackupMasterNames() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
@@ -428,7 +428,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-ClusterMetricsBuilder.ClusterMetricsImpl.getDeadServerNames() 
+ClusterMetrics.getDeadServerNames() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
@@ -438,7 +438,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-ClusterMetrics.getDeadServerNames() 
+ClusterMetricsBuilder.ClusterMetricsImpl.getDeadServerNames() 
 
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
@@ -448,7 +448,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
-ClusterMetricsBuilder.ClusterMetricsImpl.getLiveServerMetrics() 
+ClusterMetrics.getLiveServerMetrics() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
@@ -458,7 +458,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
-ClusterMetrics.getLiveServerMetrics() 
+ClusterMetricsBuilder.ClusterMetricsImpl.getLiveServerMetrics() 
 
 
 static Pair
@@ -857,31 +857,31 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ServerName
-FastFailInterceptorContext.server 
+AsyncRequestFutureImpl.SingleServerRequestRunnable.ser

[29/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.html
index 67d82a0..91643d6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class NamespaceTableCfWALEntryFilter
+public class NamespaceTableCfWALEntryFilter
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements WALEntryFilter, WALCellFilter
 Filter a WAL Entry by the peer config: replicate_all flag, 
namespaces config, table-cfs config,
@@ -150,10 +150,6 @@ implements bulkLoadFilter 
 
 
-private static org.slf4j.Logger
-LOG 
-
-
 private ReplicationPeer
 peer 
 
@@ -236,22 +232,13 @@ implements 
-
-
-
-
-LOG
-private static final org.slf4j.Logger LOG
-
-
 
 
 
 
 
 peer
-private final ReplicationPeer peer
+private final ReplicationPeer peer
 
 
 
@@ -260,7 +247,7 @@ implements 
 
 bulkLoadFilter
-private BulkLoadCellFilter bulkLoadFilter
+private BulkLoadCellFilter bulkLoadFilter
 
 
 
@@ -277,7 +264,7 @@ implements 
 
 NamespaceTableCfWALEntryFilter
-public NamespaceTableCfWALEntryFilter(ReplicationPeer peer)
+public NamespaceTableCfWALEntryFilter(ReplicationPeer peer)
 
 
 
@@ -294,7 +281,7 @@ implements 
 
 filter
-public WAL.Entry filter(WAL.Entry entry)
+public WAL.Entry filter(WAL.Entry entry)
 Description copied from 
interface: WALEntryFilter
 Applies the filter, possibly returning a different Entry 
instance.
  If null is returned, the entry will be skipped.
@@ -315,7 +302,7 @@ implements 
 
 filterCell
-public Cell filterCell(WAL.Entry entry,
+public Cell filterCell(WAL.Entry entry,
Cell cell)
 Description copied from 
interface: WALCellFilter
 Applies the filter, possibly returning a different Cell 
instance.
@@ -338,7 +325,7 @@ implements 
 
 filterByExcludeTableCfs
-private boolean filterByExcludeTableCfs(TableName tableName,
+private boolean filterByExcludeTableCfs(TableName tableName,
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String family,
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapListString>> excludeTableCfs)
 
@@ -349,7 +336,7 @@ implements 
 
 filterByTableCfs
-private boolean filterByTableCfs(TableName tableName,
+private boolean filterByTableCfs(TableName tableName,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String family,
  https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapListString>> tableCfs)
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.html 
b/devapidocs/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.html
index 503c7a9..c73f534 100644
--- a/devapidocs/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ScopeWALEntryFilter
+public class ScopeWALEntryFilter
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements WALEntryFilter, WALCellFilter
 Keeps KVs that are scoped other than local
@@ -137,7 +137,7 @@ implements Field and Description
 
 
-(package private) BulkLoadCellFilter
+private BulkLoadCellFilter
 bulkLoadFilter 
 
 
@@ -213,7 +213,

[29/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/Cell.html 
b/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
index 96d745d..531711f 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
@@ -259,7 +259,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 static Cell
 CellUtil.createCell(Cell cell,
-  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List tags)
+  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List tags)
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
 
@@ -409,7 +409,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 static int
 CellUtil.copyFamilyTo(Cell cell,
-http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
+https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
 int destinationOffset)
 Copies the family to the given bytebuffer
 
@@ -425,7 +425,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 static int
 CellUtil.copyQualifierTo(Cell cell,
-   http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
+   https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
int destinationOffset)
 Copies the qualifier to the given bytebuffer
 
@@ -447,7 +447,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 static int
 CellUtil.copyRowTo(Cell cell,
- http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
+ https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
  int destinationOffset)
 Copies the row to the given bytebuffer
 
@@ -465,7 +465,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 static int
 CellUtil.copyTagTo(Cell cell,
- http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
+ https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
  int destinationOffset)
 Deprecated. 
 As of HBase-2.0. Will be 
removed in 3.0.
@@ -483,7 +483,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 static int
 CellUtil.copyValueTo(Cell cell,
-   http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
+   https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
int destinationOffset)
 Copies the value to the given bytebuffer
 
@@ -510,7 +510,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 static Cell
 CellUtil.createCell(Cell cell,
-  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List tags)
+  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List tags)
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
 
@@ -617,7 +617,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 CellUtil.getCellKeyAsString(Cell cell) 
 
 
@@ -629,7 +629,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer
+static 

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/filter/class-use/ColumnValueFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/ColumnValueFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/ColumnValueFilter.html
new file mode 100644
index 000..93700df
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/ColumnValueFilter.html
@@ -0,0 +1,170 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.filter.ColumnValueFilter (Apache 
HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.filter.ColumnValueFilter
+
+
+
+
+
+Packages that use ColumnValueFilter 
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.filter
+
+Provides row-level filters applied to HRegion scan results 
during calls to
+ ResultScanner.next().
+
+
+
+
+
+
+
+
+
+
+Uses of ColumnValueFilter in org.apache.hadoop.hbase.filter
+
+Methods in org.apache.hadoop.hbase.filter
 that return ColumnValueFilter 
+
+Modifier and Type
+Method and Description
+
+
+
+static ColumnValueFilter
+ColumnValueFilter.parseFrom(byte[] pbBytes)
+Parse protobuf bytes to a ColumnValueFilter
+
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
index 0c342b2..ee00b7e 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
@@ -259,109 +259,113 @@
 
 
 Filter.ReturnCode
-QualifierFilter.filterCell(Cell c) 
+ColumnValueFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FilterWrapper.filterCell(Cell c) 
+QualifierFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterColumn(Cell cell) 
+FilterWrapper.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-MultipleColumnPrefixFilter.filterColumn(Cell cell) 
+ColumnPrefixFilter.filterColumn(Cell cell) 
 
 
 Filter.ReturnCode
+MultipleColumnPrefixFilter.filterColumn(Cell cell) 
+
+
+Filter.ReturnCode
 ValueFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 SkipFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 FilterListBase.filterKeyValue(Cell c) 
 
-
+
 Filter.ReturnCode
 FamilyFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 ColumnPrefixFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 PageFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 RowFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 ColumnRangeFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 ColumnCountGetFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 MultipleColumnPrefixFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 ColumnPaginationFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 DependentColumnFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 InclusiveStopFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 KeyOnlyFilter.filterKeyValue(Cell ignored)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 MultiRowRangeFilter.filterKeyValue(Cell ignored)
 Deprecated. 
 
 
-
+
 Filter.ReturnCode
 Filter.filterKeyValue(Cell c)
 Deprecated. 
@@ -370,93 +374,93 @@
 
 
 
-
+
 Filter.ReturnCode
 FirstKeyOnlyFilter.filterKeyValue(Cell c

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
index d143ef8..4583895 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
@@ -258,7 +258,7 @@
 250
 251@Override
 252public long heapSize() {
-253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+253  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 254  if (this.tags != null) {
 255sum += 
ClassSize.sizeOf(this.tags);
 256  }
@@ -454,7 +454,7 @@
 446
 447@Override
 448public long heapSize() {
-449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedHeapSizeOf(cell);
+449  long sum = HEAP_SIZE_OVERHEAD + 
estimatedSizeOfCell(cell);
 450  // this.tags is on heap byte[]
 451  if (this.tags != null) {
 452sum += 
ClassSize.sizeOf(this.tags);
@@ -2791,192 +2791,193 @@
 2783   * {@link HeapSize} we call {@link 
HeapSize#heapSize()} so cell can give a correct value. In other
 2784   * cases we just consider the bytes 
occupied by the cell components ie. row, CF, qualifier,
 2785   * timestamp, type, value and tags.
-2786   * @param cell
-2787   * @return estimate of the heap 
space
-2788   */
-2789  public static long 
estimatedHeapSizeOf(final Cell cell) {
-2790if (cell instanceof HeapSize) {
-2791  return ((HeapSize) 
cell).heapSize();
-2792}
-2793// TODO: Add sizing of references 
that hold the row, family, etc., arrays.
-2794return 
estimatedSerializedSizeOf(cell);
-2795  }
-2796
-2797  /**
-2798   * This method exists just to 
encapsulate how we serialize keys. To be replaced by a factory that
-2799   * we query to figure what the Cell 
implementation is and then, what serialization engine to use
-2800   * and further, how to serialize the 
key for inclusion in hfile index. TODO.
-2801   * @param cell
-2802   * @return The key portion of the Cell 
serialized in the old-school KeyValue way or null if passed
-2803   * a null 
cell
-2804   */
-2805  public static byte[] 
getCellKeySerializedAsKeyValueKey(final Cell cell) {
-2806if (cell == null) return null;
-2807byte[] b = new 
byte[KeyValueUtil.keyLength(cell)];
-2808KeyValueUtil.appendKeyTo(cell, b, 
0);
-2809return b;
-2810  }
-2811
-2812  /**
-2813   * Create a Cell that is smaller than 
all other possible Cells for the given Cell's row.
-2814   * @param cell
-2815   * @return First possible Cell on 
passed Cell's row.
-2816   */
-2817  public static Cell 
createFirstOnRow(final Cell cell) {
-2818if (cell instanceof 
ByteBufferExtendedCell) {
-2819  return new 
FirstOnRowByteBufferExtendedCell(
-2820  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2821  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength());
-2822}
-2823return new 
FirstOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
-2824  }
-2825
-2826  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength) {
-2827return new FirstOnRowCell(row, 
roffset, rlength);
-2828  }
-2829
-2830  public static Cell 
createFirstOnRow(final byte[] row, final byte[] family, final byte[] col) {
-2831return createFirstOnRow(row, 0, 
(short) row.length, family, 0, (byte) family.length, col, 0,
-2832col.length);
-2833  }
-2834
-2835  public static Cell 
createFirstOnRow(final byte[] row, int roffset, short rlength,
-2836  final byte[] family, int foffset, 
byte flength, final byte[] col, int coffset, int clength) {
-2837return new FirstOnRowColCell(row, 
roffset, rlength, family, foffset, flength, col, coffset,
-2838clength);
-2839  }
-2840
-2841  public static Cell 
createFirstOnRow(final byte[] row) {
-2842return createFirstOnRow(row, 0, 
(short) row.length);
-2843  }
-2844
-2845  public static Cell 
createFirstOnRowFamily(Cell cell, byte[] fArray, int foff, int flen) {
-2846if (cell instanceof 
ByteBufferExtendedCell) {
-2847  return new 
FirstOnRowColByteBufferExtendedCell(
-2848  ((ByteBufferExtendedCell) 
cell).getRowByteBuffer(),
-2849  ((ByteBufferExtendedCell) 
cell).getRowPosition(), cell.getRowLength(),
-2850  ByteBuffer.wrap(fArray), foff, 
(byte) flen, HConstants.EMPTY_BYTE_BUFFER, 0, 0);
-2851}
-2852return new 
FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength(),
-2853fArray, foff, (byte) flen, 
HConstants.EMPTY_BYTE_ARRAY, 0, 0);
-2854  }
-2855
-2856  public static Cell 
createFirstO

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.QueueRowAccess.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.QueueRowAccess.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.QueueRowAccess.html
index 66f3dc6..44bd3a6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.QueueRowAccess.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.QueueRowAccess.html
@@ -24,486 +24,461 @@
 016package org.apache.hadoop.hbase.client;
 017
 018import static 
org.apache.hadoop.hbase.client.BufferedMutatorParams.UNSET;
-019import java.io.IOException;
-020import java.io.InterruptedIOException;
-021import java.util.Collections;
-022import java.util.Iterator;
-023import java.util.List;
-024import 
java.util.NoSuchElementException;
-025import java.util.Timer;
-026import java.util.TimerTask;
-027import 
java.util.concurrent.ConcurrentLinkedQueue;
-028import 
java.util.concurrent.ExecutorService;
-029import java.util.concurrent.TimeUnit;
-030import 
java.util.concurrent.atomic.AtomicInteger;
-031import 
java.util.concurrent.atomic.AtomicLong;
-032import 
org.apache.hadoop.conf.Configuration;
-033import 
org.apache.hadoop.hbase.TableName;
-034import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import 
org.apache.yetus.audience.InterfaceStability;
-037import org.slf4j.Logger;
-038import org.slf4j.LoggerFactory;
-039
-040import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-041
-042/**
-043 * 

-044 * Used to communicate with a single HBase table similar to {@link Table} -045 * but meant for batched, potentially asynchronous puts. Obtain an instance from -046 * a {@link Connection} and call {@link #close()} afterwards. Provide an alternate -047 * to this implementation by setting {@link BufferedMutatorParams#implementationClassName(String)} -048 * or by setting alternate classname via the key {} in Configuration. -049 *

-050 * -051 *

-052 * While this can be used across threads, great care should be used when doing so. -053 * Errors are global to the buffered mutator and the Exceptions can be thrown on any -054 * thread that causes the flush for requests. -055 *

-056 * -057 * @see ConnectionFactory -058 * @see Connection -059 * @since 1.0.0 -060 */ -061@InterfaceAudience.Private -062@InterfaceStability.Evolving -063public class BufferedMutatorImpl implements BufferedMutator { -064 -065 private static final Logger LOG = LoggerFactory.getLogger(BufferedMutatorImpl.class); -066 -067 private final ExceptionListener listener; -068 -069 private final TableName tableName; -070 -071 private final Configuration conf; -072 private final ConcurrentLinkedQueue writeAsyncBuffer = new ConcurrentLinkedQueue<>(); -073 private final AtomicLong currentWriteBufferSize = new AtomicLong(0); -074 /** -075 * Count the size of {@link BufferedMutatorImpl#writeAsyncBuffer}. -076 * The {@link ConcurrentLinkedQueue#size()} is NOT a constant-time operation. -077 */ -078 private final AtomicInteger undealtMutationCount = new AtomicInteger(0); -079 private final long writeBufferSize; -080 -081 private final AtomicLong writeBufferPeriodicFlushTimeoutMs = new AtomicLong(0); -082 private final AtomicLong writeBufferPeriodicFlushTimerTickMs = -083 new AtomicLong(MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); -084 private Timer writeBufferPeriodicFlushTimer = null; -085 -086 private final int maxKeyValueSize; -087 private final ExecutorService pool; -088 private final AtomicInteger rpcTimeout; -089 private final AtomicInteger operationTimeout; -090 private final boolean cleanupPoolOnClose; -091 private volatile boolean closed = false; -092 private final AsyncProcess ap; -093 -094 @VisibleForTesting -095 BufferedMutatorImpl(ClusterConnection conn, BufferedMutatorParams params, AsyncProcess ap) { -096if (conn == null || conn.isClosed()) { -097 throw new IllegalArgumentException("Connection is null or closed."); -098} -099this.tableName = params.getTableName(); -100this.conf = conn.getConfiguration(); -101this.listener = params.getListener(); -102if (params.getPool() == null) { -103 this.pool = HTable.getDefaultExecutor(conf); -104 cleanupPoolOnClose = true; -105} else { -106 this.pool = params.getPool(); -107 cleanupPoolOnClose = false; -108} -109ConnectionConfiguration tableConf = new ConnectionConfiguration(conf); -110this.writeBufferSize = -111params.getWriteBufferSize() != UNSET ? -112params.getWriteBufferSize() : tableConf.getWriteBufferSize(); -113 -114// Set via the setter because it does

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
index 53d69df..597d1ba 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
@@ -355,24 +355,24 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-BufferedMutatorImpl.mutate(Mutation m) 
-
-
-void
 BufferedMutator.mutate(Mutation mutation)
 Sends a Mutation to 
the table.
 
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
+AsyncBufferedMutatorImpl.mutate(Mutation mutation) 
+
 
+void
+BufferedMutatorImpl.mutate(Mutation m) 
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
 AsyncBufferedMutator.mutate(Mutation mutation)
 Sends a Mutation to 
the table.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-AsyncBufferedMutatorImpl.mutate(Mutation mutation) 
-
 
 
 
@@ -390,24 +390,24 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-BufferedMutatorImpl.mutate(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List ms) 
-
-
-void
 BufferedMutator.mutate(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List mutations)
 Send some Mutations to 
the table.
 
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListCompletableFutureVoid>>
+AsyncBufferedMutatorImpl.mutate(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List mutations) 
+
 
+void
+BufferedMutatorImpl.mutate(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List ms) 
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListCompletableFutureVoid>>
 AsyncBufferedMutator.mutate(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List mutations)
 Send some Mutations to 
the table.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListCompletableFutureVoid>>
-AsyncBufferedMutatorImpl.mutate(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List mutations) 
-
 
 static RowMutations
 RowMutations.of(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List mutations)
@@ -543,15 +543,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 MutationSerialization.getDeserializer(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html
index 1d50582..7eb7661 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html
@@ -104,14 +104,14 @@
 
 
 void
-HMaster.checkTableModifiable(TableName tableName) 
-
-
-void
 MasterServices.checkTableModifiable(TableName tableName)
 Check table is modifiable; i.e.
 
 
+
+void
+HMaster.checkTableModifiable(TableName tableName) 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
index a32bb19..b1a475b 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html
@@ -170,14 +170,14 @@
 
 
 void
-HMaster.checkTableModifiable(TableName tableName) 
-
-
-void
 MasterServices.checkTableModifiable(TableName tableName)
 Check table is modifiable; i.e.
 
 
+
+void
+HMaster.checkTableModifiable(TableName tableName) 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
index 594e74b..90f6967 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Tag.html
@@ -243,6 +243,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+PrivateCellUtil.getTags(Cell cell) 
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 CellUtil.getTags(Cell cell)
 Deprecated. 
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
@@ -250,10 +254,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-PrivateCellUtil.getTags(Cell cell) 
-
 
 static http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 CellUtil.tagsIterator(byte[] tags,
@@ -395,6 +395,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Cell
+PrivateCellUtil.createCell(Cell cell,
+  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tags) 
+
+
+static Cell
 CellUtil.createCell(Cell cell,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tags)
 Deprecated. 
@@ -402,11 +407,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-
-static Cell
-PrivateCellUtil.createCell(Cell cell,
-  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tags) 
-
 
 static byte[]
 TagUtil.fromList(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tags)
@@ -415,16 +415,16 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-RawCellBuilder
-RawCellBuilder.setTags(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tags) 
+ExtendedCellBuilder
+ExtendedCellBuilderImpl.setTags(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tags) 
 
 
-ExtendedCellBuilder
-ExtendedCellBuilder.setTags(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tags) 
+RawCellBuilder
+RawCellBuilder.setTags(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List tags) 
 
 
 ExtendedCellBuilder
-ExtendedCellBuilderImpl.setTags(http

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
index c7fba62..eca6413 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
@@ -127,30 +127,28 @@ the order they are declared.
 
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-RawAsyncHBaseAdmin.compact(TableName tableName,
+private void
+HBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
boolean major,
CompactType compactType)
-Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
+Compact a table.
 
 
 
-private void
-HBaseAdmin.compact(TableName tableName,
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
+RawAsyncHBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
boolean major,
CompactType compactType)
-Compact a table.
+Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-AsyncAdmin.compact(TableName tableName,
+AsyncHBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
-   CompactType compactType)
-Compact a column family within a table.
-
+   CompactType compactType) 
 
 
 void
@@ -161,14 +159,16 @@ the order they are declared.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-RawAsyncHBaseAdmin.compact(TableName tableName,
+void
+HBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
-   CompactType compactType) 
+   CompactType compactType)
+Compact a column family within a table.
+
 
 
-void
-HBaseAdmin.compact(TableName tableName,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
+AsyncAdmin.compact(TableName tableName,
byte[] columnFamily,
CompactType compactType)
 Compact a column family within a table.
@@ -176,16 +176,14 @@ the order they are declared.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-AsyncHBaseAdmin.compact(TableName tableName,
+RawAsyncHBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
CompactType compactType) 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-AsyncAdmin.compact(TableName tableName,
-   CompactType compactType)
-Compact a table.
-
+AsyncHBaseAdmin.compact(TableName tableName,
+   CompactType compactType) 
 
 
 void
@@ -195,28 +193,28 @@ the order they are declared.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-RawAsyncHBaseAdmin.compact(TableName tableName,
-   CompactType compactType) 
-
-
 void
 HBaseAdmin.compact(TableName tableName,
CompactType compactType)
 Compact a tabl

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Size.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
index 6c0e3b8..20b7674 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
@@ -189,130 +189,130 @@
 
 
 Size
-RegionLoad.getBloomFilterSize()
-Deprecated. 
- 
-
-
-Size
 RegionMetrics.getBloomFilterSize() 
 
-
-Size
-RegionMetricsBuilder.RegionMetricsImpl.getBloomFilterSize() 
-
 
 Size
-ServerLoad.getMaxHeapSize()
+RegionLoad.getBloomFilterSize()
 Deprecated. 
  
 
 
 Size
-ServerMetrics.getMaxHeapSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getBloomFilterSize() 
 
 
 Size
-ServerMetricsBuilder.ServerMetricsImpl.getMaxHeapSize() 
+ServerMetrics.getMaxHeapSize() 
 
 
 Size
-RegionLoad.getMemStoreSize()
+ServerLoad.getMaxHeapSize()
 Deprecated. 
  
 
 
 Size
-RegionMetrics.getMemStoreSize() 
+ServerMetricsBuilder.ServerMetricsImpl.getMaxHeapSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getMemStoreSize() 
+RegionMetrics.getMemStoreSize() 
 
 
 Size
-RegionLoad.getStoreFileIndexSize()
+RegionLoad.getMemStoreSize()
 Deprecated. 
  
 
 
 Size
+RegionMetricsBuilder.RegionMetricsImpl.getMemStoreSize() 
+
+
+Size
 RegionMetrics.getStoreFileIndexSize()
 TODO: why we pass the same value to different counters? 
Currently, the value from
  getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize()
  see HRegionServer#createRegionLoad.
 
 
-
-Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileIndexSize() 
-
 
 Size
-RegionLoad.getStoreFileRootLevelIndexSize()
+RegionLoad.getStoreFileIndexSize()
 Deprecated. 
  
 
 
 Size
-RegionMetrics.getStoreFileRootLevelIndexSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileIndexSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileRootLevelIndexSize() 
+RegionMetrics.getStoreFileRootLevelIndexSize() 
 
 
 Size
-RegionLoad.getStoreFileSize()
+RegionLoad.getStoreFileRootLevelIndexSize()
 Deprecated. 
  
 
 
 Size
-RegionMetrics.getStoreFileSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileRootLevelIndexSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileSize() 
+RegionMetrics.getStoreFileSize() 
 
 
 Size
-RegionLoad.getStoreFileUncompressedDataIndexSize()
+RegionLoad.getStoreFileSize()
 Deprecated. 
  
 
 
 Size
-RegionMetrics.getStoreFileUncompressedDataIndexSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileUncompressedDataIndexSize() 
+RegionMetrics.getStoreFileUncompressedDataIndexSize() 
 
 
 Size
-RegionLoad.getUncompressedStoreFileSize()
+RegionLoad.getStoreFileUncompressedDataIndexSize()
 Deprecated. 
  
 
 
 Size
-RegionMetrics.getUncompressedStoreFileSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileUncompressedDataIndexSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getUncompressedStoreFileSize() 
+RegionMetrics.getUncompressedStoreFileSize() 
 
 
 Size
-ServerLoad.getUsedHeapSize()
+RegionLoad.getUncompressedStoreFileSize()
 Deprecated. 
  
 
 
 Size
+RegionMetricsBuilder.RegionMetricsImpl.getUncompressedStoreFileSize() 
+
+
+Size
 ServerMetrics.getUsedHeapSize() 
 
+
+Size
+ServerLoad.getUsedHeapSize()
+Deprecated. 
+ 
+
 
 Size
 ServerMetricsBuilder.ServerMetricsImpl.getUsedHeapSize() 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
index bc99516..7f3e934 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
@@ -122,11 +122,11 @@
 
 
 TableDescriptors
-HMaster.getTableDescriptors() 
+MasterServices.getTableDescriptors() 
 
 
 TableDescriptors
-MasterServices.getTableDescriptors() 
+HMaster.getTableDescriptors() 
 
 
 
@@ -138,7 +138,7 @@
 
 
 
-static void
+private void
 TableStateManager.fixTableStates(TableDescriptors tableDescriptors,
   Connection connection) 
 



[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
index c7fba62..eca6413 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactType.html
@@ -127,30 +127,28 @@ the order they are declared.
 
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-RawAsyncHBaseAdmin.compact(TableName tableName,
+private void
+HBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
boolean major,
CompactType compactType)
-Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
+Compact a table.
 
 
 
-private void
-HBaseAdmin.compact(TableName tableName,
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
+RawAsyncHBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
boolean major,
CompactType compactType)
-Compact a table.
+Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-AsyncAdmin.compact(TableName tableName,
+AsyncHBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
-   CompactType compactType)
-Compact a column family within a table.
-
+   CompactType compactType) 
 
 
 void
@@ -161,14 +159,16 @@ the order they are declared.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-RawAsyncHBaseAdmin.compact(TableName tableName,
+void
+HBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
-   CompactType compactType) 
+   CompactType compactType)
+Compact a column family within a table.
+
 
 
-void
-HBaseAdmin.compact(TableName tableName,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
+AsyncAdmin.compact(TableName tableName,
byte[] columnFamily,
CompactType compactType)
 Compact a column family within a table.
@@ -176,16 +176,14 @@ the order they are declared.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-AsyncHBaseAdmin.compact(TableName tableName,
+RawAsyncHBaseAdmin.compact(TableName tableName,
byte[] columnFamily,
CompactType compactType) 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-AsyncAdmin.compact(TableName tableName,
-   CompactType compactType)
-Compact a table.
-
+AsyncHBaseAdmin.compact(TableName tableName,
+   CompactType compactType) 
 
 
 void
@@ -195,28 +193,28 @@ the order they are declared.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
-RawAsyncHBaseAdmin.compact(TableName tableName,
-   CompactType compactType) 
-
-
 void
 HBaseAdmin.compact(TableName tableName,
CompactType compactType)
 Compact a tabl

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
index 7676dbd..dd70c74 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
@@ -399,10 +399,8 @@ service.
 
 
 
-default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureBoolean>
-AsyncTable.exists(Get get)
-Test for the existence of columns in the table, as 
specified by the Get.
-
+boolean
+HTable.exists(Get get) 
 
 
 boolean
@@ -411,32 +409,34 @@ service.
 
 
 
-boolean
-HTable.exists(Get get) 
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureBoolean>
+AsyncTable.exists(Get get)
+Test for the existence of columns in the table, as 
specified by the Get.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncTable.get(Get get)
-Extracts certain cells from a given row.
-
+RawAsyncTableImpl.get(Get get) 
 
 
 Result
+HTable.get(Get get) 
+
+
+Result
 Table.get(Get get)
 Extracts certain cells from a given row.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncTableImpl.get(Get get) 
-
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-RawAsyncTableImpl.get(Get get) 
+AsyncTableImpl.get(Get get) 
 
 
-Result
-HTable.get(Get get) 
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
+AsyncTable.get(Get get)
+Extracts certain cells from a given row.
+
 
 
 private Result
@@ -457,10 +457,8 @@ service.
 
 
 
-default http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListCompletableFutureBoolean>>
-AsyncTable.exists(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List gets)
-Test for the existence of columns in the table, as 
specified by the Gets.
-
+boolean[]
+HTable.exists(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List gets) 
 
 
 boolean[]
@@ -469,16 +467,12 @@ service.
 
 
 
-boolean[]
-HTable.exists(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List gets) 
-
-
-default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureListBoolean>>
-AsyncTable.existsAll(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List gets)
-A simple version for batch exists.
+default http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListCompletableFutureBoolean>>
+AsyncTable.exists(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List gets)

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
index ad601c4..53e455f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
@@ -1117,1183 +1117,1186 @@
 1109  @Nullable
 1110  public static TableState 
getTableState(Connection conn, TableName tableName)
   throws IOException {
-1112Table metaHTable = 
getMetaHTable(conn);
-1113Get get = new 
Get(tableName.getName()).addColumn(getTableFamily(), getTableStateColumn());
-1114long time = 
EnvironmentEdgeManager.currentTime();
-1115get.setTimeRange(0, time);
-1116Result result =
-1117metaHTable.get(get);
-1118return getTableState(result);
-1119  }
-1120
-1121  /**
-1122   * Fetch table states from META 
table
-1123   * @param conn connection to use
-1124   * @return map {tableName -> 
state}
-1125   * @throws IOException
-1126   */
-1127  public static Map getTableStates(Connection conn)
-1128  throws IOException {
-1129final Map states = new LinkedHashMap<>();
-1130Visitor collector = new Visitor() 
{
-1131  @Override
-1132  public boolean visit(Result r) 
throws IOException {
-1133TableState state = 
getTableState(r);
-1134if (state != null)
-1135  
states.put(state.getTableName(), state);
-1136return true;
-1137  }
-1138};
-1139fullScanTables(conn, collector);
-1140return states;
-1141  }
-1142
-1143  /**
-1144   * Updates state in META
-1145   * @param conn connection to use
-1146   * @param tableName table to look 
for
-1147   * @throws IOException
-1148   */
-1149  public static void 
updateTableState(Connection conn, TableName tableName,
-1150  TableState.State actual) throws 
IOException {
-1151updateTableState(conn, new 
TableState(tableName, actual));
-1152  }
-1153
-1154  /**
-1155   * Decode table state from META 
Result.
-1156   * Should contain cell from 
HConstants.TABLE_FAMILY
-1157   * @param r result
-1158   * @return null if not found
-1159   * @throws IOException
-1160   */
-1161  @Nullable
-1162  public static TableState 
getTableState(Result r)
-1163  throws IOException {
-1164Cell cell = 
r.getColumnLatestCell(getTableFamily(), getTableStateColumn());
-1165if (cell == null) return null;
-1166try {
-1167  return 
TableState.parseFrom(TableName.valueOf(r.getRow()),
-1168  
Arrays.copyOfRange(cell.getValueArray(),
-1169  cell.getValueOffset(), 
cell.getValueOffset() + cell.getValueLength()));
-1170} catch (DeserializationException e) 
{
-1171  throw new IOException(e);
-1172}
-1173
-1174  }
-1175
-1176  /**
-1177   * Implementations 'visit' a catalog 
table row.
-1178   */
-1179  public interface Visitor {
-1180/**
-1181 * Visit the catalog table row.
-1182 * @param r A row from catalog 
table
-1183 * @return True if we are to proceed 
scanning the table, else false if
-1184 * we are to stop now.
-1185 */
-1186boolean visit(final Result r) throws 
IOException;
-1187  }
-1188
-1189  /**
-1190   * Implementations 'visit' a catalog 
table row but with close() at the end.
-1191   */
-1192  public interface CloseableVisitor 
extends Visitor, Closeable {
-1193  }
-1194
-1195  /**
-1196   * A {@link Visitor} that collects 
content out of passed {@link Result}.
-1197   */
-1198  static abstract class 
CollectingVisitor implements Visitor {
-1199final List results = new 
ArrayList<>();
-1200@Override
-1201public boolean visit(Result r) 
throws IOException {
-1202  if (r ==  null || r.isEmpty()) 
return true;
-1203  add(r);
-1204  return true;
-1205}
-1206
-1207abstract void add(Result r);
-1208
-1209/**
-1210 * @return Collected results; wait 
till visits complete to collect all
-1211 * possible results
-1212 */
-1213List getResults() {
-1214  return this.results;
-1215}
-1216  }
-1217
-1218  /**
-1219   * Collects all returned.
-1220   */
-1221  static class CollectAllVisitor extends 
CollectingVisitor {
-1222@Override
-1223void add(Result r) {
-1224  this.results.add(r);
-1225}
-1226  }
-1227
-1228  /**
-1229   * A Visitor that skips offline 
regions and split parents
-1230   */
-1231  public static abstract class 
DefaultVisitorBase implements Visitor {
-1232
-1233public DefaultVisitorBase() {
-1234  super();
-1235}
-1236
-1237public abstract boolean 
visitInternal(Result rowResult) throws IOException;
-1238
-1239

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
index fef1692..852f85c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":9,"i38":10,"i39":10,"i40":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ReplicationSourceManager
+public class ReplicationSourceManager
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements ReplicationListener
 This class is responsible to manage all the replication 
sources. There are two classes of
@@ -135,14 +135,15 @@ implements walsById.
 There are four methods which modify it,
  addPeer(String),
 removePeer(String),
- cleanOldLogs(SortedSet,
 String, String) and preLogRoll(Path).
 walsById
- is a ConcurrentHashMap and there is a Lock for peer id in PeerProcedureHandlerImpl.
 So
- there is no race between addPeer(String)
 and removePeer(String).
- cleanOldLogs(SortedSet,
 String, String) is called by ReplicationSourceInterface.
- So no race with addPeer(String).
 removePeer(String)
 will terminate the
- ReplicationSourceInterface
 firstly, then remove the wals from walsById.
 So no
- race with removePeer(String).
 The only case need synchronized is
- cleanOldLogs(SortedSet,
 String, String) and preLogRoll(Path).
+ cleanOldLogs(SortedSet,
 String, String) and preLogRoll(Path).
+ walsById
 is a ConcurrentHashMap and there is a Lock for peer id in
+ PeerProcedureHandlerImpl.
 So there is no race between addPeer(String)
 and
+ removePeer(String).
 cleanOldLogs(SortedSet,
 String, String) is called by
+ ReplicationSourceInterface.
 So no race with addPeer(String).
+ removePeer(String)
 will terminate the ReplicationSourceInterface
 firstly, then
+ remove the wals from walsById.
 So no race with removePeer(String).
 The only
+ case need synchronized is cleanOldLogs(SortedSet,
 String, String) and
+ preLogRoll(Path).
  No need synchronized on walsByIdRecoveredQueues.
 There are three methods which
  modify it, removePeer(String)
 , cleanOldLogs(SortedSet,
 String, String) and
  ReplicationSourceManager.NodeFailoverWorker.run().
@@ -328,7 +329,7 @@ implements 
-All Methods Instance Methods Concrete Methods 
+All Methods Static Methods Instance Methods Concrete Methods 
 
 Modifier and Type
 Method and Description
@@ -498,11 +499,11 @@ implements 
-(package private) void
+void
 postLogRoll(org.apache.hadoop.fs.Path newLog) 
 
 
-(package private) void
+void
 preLogRoll(org.apache.hadoop.fs.Path newLog) 
 
 
@@ -537,16 +538,29 @@ implements 
+(package private) void
+scopeWALEdits(WALKey logKey,
+ WALEdit logEdit) 
+
+
+(package private) static void
+scopeWALEdits(WALKey logKey,
+ WALEdit logEdit,
+ org.apache.hadoop.conf.Configuration conf)
+Utility method used to set the correct scopes on each log 
key.
+
+
+
 private void
 throwIOExceptionWhenFail(ReplicationSourceManager.ReplicationQueueOperation op) 
 
-
+
 private void
 transferQueues(ServerName deadRS)
 Transfer all the queues of the specified to this region 
server.
 
 
-
+
 (package private) void
 waitUntilCanBePushed(byte[] encodedName,
 long seq,
@@ -582,7 +596,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -591,7 +605,7 @@ implements 
 
 sources
-private final http://docs.oracle.com/javase

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.html 
b/testdevapidocs/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.html
index b5b0c71..89459e5 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.html
@@ -142,80 +142,84 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 CELL_HEAP_SIZE 
 
 
+static HBaseClassTestRule
+CLASS_RULE 
+
+
 private static byte[][]
 FAMILIES 
 
-
+
 private static byte[]
 FAMILY 
 
-
+
 private static org.slf4j.Logger
 LOG 
 
-
+
 private static int
 MINICLUSTER_SIZE 
 
-
+
 org.junit.rules.TestName
 name 
 
-
+
 private static int
 NUM_COLS 
 
-
+
 private static int
 NUM_FAMILIES 
 
-
+
 private static int
 NUM_QUALIFIERS 
 
-
+
 private static int
 NUM_ROWS 
 
-
+
 private static byte[]
 QUALIFIER 
 
-
+
 private static byte[][]
 QUALIFIERS 
 
-
+
 private static byte[]
 ROW 
 
-
+
 private static byte[][]
 ROWS 
 
-
+
 private static 
org.apache.hadoop.hbase.client.Table
 TABLE 
 
-
+
 private static 
org.apache.hadoop.hbase.TableName
 TABLE_NAME
 Table configuration
 
 
-
+
 private static HBaseTestingUtility
 TEST_UTIL 
 
-
+
 private static long
 timeout 
 
-
+
 private static byte[]
 VALUE 
 
-
+
 private static int
 VALUE_SIZE 
 
@@ -495,13 +499,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Field Detail
+
+
+
+
+
+CLASS_RULE
+public static final HBaseClassTestRule CLASS_RULE
+
+
 
 
 
 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -510,7 +523,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private static final HBaseTestingUtility TEST_UTIL
+private static final HBaseTestingUtility TEST_UTIL
 
 
 
@@ -519,7 +532,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MINICLUSTER_SIZE
-private static final int MINICLUSTER_SIZE
+private static final int MINICLUSTER_SIZE
 
 See Also:
 Constant
 Field Values
@@ -532,7 +545,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TABLE
-private static org.apache.hadoop.hbase.client.Table TABLE
+private static org.apache.hadoop.hbase.client.Table TABLE
 
 
 
@@ -541,7 +554,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TABLE_NAME
-private static org.apache.hadoop.hbase.TableName TABLE_NAME
+private static org.apache.hadoop.hbase.TableName TABLE_NAME
 Table configuration
 
 
@@ -551,7 +564,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_ROWS
-private static int NUM_ROWS
+private static int NUM_ROWS
 
 
 
@@ -560,7 +573,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ROW
-private static byte[] ROW
+private static byte[] ROW
 
 
 
@@ -569,7 +582,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ROWS
-private static byte[][] ROWS
+private static byte[][] ROWS
 
 
 
@@ -578,7 +591,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_FAMILIES
-private static int NUM_FAMILIES
+private static int NUM_FAMILIES
 
 
 
@@ -587,7 +600,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FAMILY
-private static byte[] FAMILY
+private static byte[] FAMILY
 
 
 
@@ -596,7 +609,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FAMILIES
-private static byte[][] FAMILIES
+private static byte[][] FAMILIES
 
 
 
@@ -605,7 +618,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_QUALIFIERS
-private static int NUM_QUALIFIERS
+private static int NUM_QUALIFIERS
 
 
 
@@ -614,7 +627,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 QUALIFIER
-private static byte[] QUALIFIER
+private static byte[] QUALIFIER
 
 
 
@@ -623,7 +636,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 QUALIFIERS
-private static byte[][] QUALIFIERS
+private static byte[][] QUALIFIERS
 
 
 
@@ -632,7 +645,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 VALUE_SIZE
-private static int VALUE_SIZE
+private static int VALUE_SIZE
 
 
 
@@ -641,7 +654,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 VALUE
-private static byte[] VALUE
+private static byte[] VALUE
 
 
 
@@ -650,7 +663,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_COLS
-private static int NUM_COLS
+private static int NUM_COLS
 
 
 
@@ -659,7 +672,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Obj

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.Command.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.Command.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.Command.html
index eb9e252..667152a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.Command.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.Command.html
@@ -28,22 +28,22 @@
 020
 021import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
 022import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
-023import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
-024import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
-025import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
-026import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
-027import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
-028import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
-029import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
-030import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
-031import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
-032import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
-033import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
-034import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
-035import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
-036import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
-037import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
-038import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
+023import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
+024import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
+025import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
+026import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
+027import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
+028import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
+029import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
+030import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
+031import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
+032import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
+033import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
+034import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
+035import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
+036import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
+037import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
+038import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
 039
 040import java.io.IOException;
 041import java.net.URI;
@@ -70,194 +70,194 @@
 062import 
org.apache.hadoop.hbase.backup.util.BackupUtils;
 063import 
org.apache.hadoop.hbase.client.Connection;
 064import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-065import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-066import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-067import 
org.apache.yetus.audience.InterfaceAudience;
-068
-069/**
-070 * General backup commands, options and 
usage messages
-071 */
-072
+065import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+066import 
org.apache.yetus.audience.InterfaceAudience;
+067
+068import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+069
+070/**
+071 * General backup commands, options and 
usage messages
+072 */
 073@InterfaceAudience.Private
 074public final class BackupCommands {
-075
-076  public final static String 
INCORRECT_USAGE = "Incorrect usage";
-077
-078  public final static String 
TOP_LEVEL_NOT_ALLOWED =
-079  "Top level (root) folder is not 
allowed to be a backup destination";
-080
-081  public static final String USAGE = 
"Usage: hbase backup COMMAND [command-specific 

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index ca8be5e..b8e6dfa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -6398,514 +6398,514 @@
 6390  int initialBatchProgress = 
scannerContext.getBatchProgress();
 6391  long initialSizeProgress = 
scannerContext.getDataSizeProgress();
 6392  long initialHeapSizeProgress = 
scannerContext.getHeapSizeProgress();
-6393  long initialTimeProgress = 
scannerContext.getTimeProgress();
-6394
-6395  // The loop here is used only when 
at some point during the next we determine
-6396  // that due to effects of filters 
or otherwise, we have an empty row in the result.
-6397  // Then we loop and try again. 
Otherwise, we must get out on the first iteration via return,
-6398  // "true" if there's more data to 
read, "false" if there isn't (storeHeap is at a stop row,
-6399  // and joinedHeap has no more data 
to read for the last row (if set, joinedContinuationRow).
-6400  while (true) {
-6401// Starting to scan a new row. 
Reset the scanner progress according to whether or not
-6402// progress should be kept.
-6403if 
(scannerContext.getKeepProgress()) {
-6404  // Progress should be kept. 
Reset to initial values seen at start of method invocation.
-6405  
scannerContext.setProgress(initialBatchProgress, initialSizeProgress,
-6406  initialHeapSizeProgress, 
initialTimeProgress);
-6407} else {
-6408  
scannerContext.clearProgress();
-6409}
-6410if (rpcCall.isPresent()) {
-6411  // If a user specifies a 
too-restrictive or too-slow scanner, the
-6412  // client might time out and 
disconnect while the server side
-6413  // is still processing the 
request. We should abort aggressively
-6414  // in that case.
-6415  long afterTime = 
rpcCall.get().disconnectSince();
-6416  if (afterTime >= 0) {
-6417throw new 
CallerDisconnectedException(
-6418"Aborting on region " + 
getRegionInfo().getRegionNameAsString() + ", call " +
-6419this + " after " + 
afterTime + " ms, since " +
-6420"caller 
disconnected");
-6421  }
-6422}
-6423
-6424// Let's see what we have in the 
storeHeap.
-6425Cell current = 
this.storeHeap.peek();
-6426
-6427boolean shouldStop = 
shouldStop(current);
-6428// When has filter row is true 
it means that the all the cells for a particular row must be
-6429// read before a filtering 
decision can be made. This means that filters where hasFilterRow
-6430// run the risk of 
enLongAddering out of memory errors in the case that they are applied to a
-6431// table that has very large 
rows.
-6432boolean hasFilterRow = 
this.filter != null && this.filter.hasFilterRow();
-6433
-6434// If filter#hasFilterRow is 
true, partial results are not allowed since allowing them
-6435// would prevent the filters 
from being evaluated. Thus, if it is true, change the
-6436// scope of any limits that 
could potentially create partial results to
-6437// LimitScope.BETWEEN_ROWS so 
that those limits are not reached mid-row
-6438if (hasFilterRow) {
-6439  if (LOG.isTraceEnabled()) {
-6440
LOG.trace("filter#hasFilterRow is true which prevents partial results from 
being "
-6441+ " formed. Changing 
scope of limits that may create partials");
-6442  }
-6443  
scannerContext.setSizeLimitScope(LimitScope.BETWEEN_ROWS);
-6444  
scannerContext.setTimeLimitScope(LimitScope.BETWEEN_ROWS);
-6445}
-6446
-6447// Check if we were getting data 
from the joinedHeap and hit the limit.
-6448// If not, then it's main path - 
getting results from storeHeap.
-6449if (joinedContinuationRow == 
null) {
-6450  // First, check if we are at a 
stop row. If so, there are no more results.
-6451  if (shouldStop) {
-6452if (hasFilterRow) {
-6453  
filter.filterRowCells(results);
-6454}
-6455return 
scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
-6456  }
-6457
-6458  // Check if rowkey filter 
wants to exclude this row. If so, loop to next.
-6459  // Technically, if we hit 
limits before on this row, we don't need this call.
-6460  if (filterRowKey(current)) {
-6461  

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
index 9c7f010..9df0ee6 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static interface HFileBlock.FSReader
+static interface HFileBlock.FSReader
 An HFile block reader with iteration ability.
 
 
@@ -199,7 +199,7 @@ var activeTableTab = "activeTableTab";
 
 
 readBlockData
-HFileBlock readBlockData(long offset,
+HFileBlock readBlockData(long offset,
  long onDiskSize,
  boolean pread,
  boolean updateMetrics)
@@ -224,7 +224,7 @@ var activeTableTab = "activeTableTab";
 
 
 blockRange
-HFileBlock.BlockIterator blockRange(long startOffset,
+HFileBlock.BlockIterator blockRange(long startOffset,
 long endOffset)
 Creates a block iterator over the given portion of the HFile.
  The iterator returns blocks starting with offset such that offset <=
@@ -246,7 +246,7 @@ var activeTableTab = "activeTableTab";
 
 
 closeStreams
-void closeStreams()
+void closeStreams()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Closes the backing streams
 
@@ -261,7 +261,7 @@ var activeTableTab = "activeTableTab";
 
 
 getBlockDecodingContext
-HFileBlockDecodingContext getBlockDecodingContext()
+HFileBlockDecodingContext getBlockDecodingContext()
 Get a decoder for BlockType.ENCODED_DATA
 blocks from this file.
 
 
@@ -271,7 +271,7 @@ var activeTableTab = "activeTableTab";
 
 
 getDefaultBlockDecodingContext
-HFileBlockDecodingContext getDefaultBlockDecodingContext()
+HFileBlockDecodingContext getDefaultBlockDecodingContext()
 Get the default decoder for blocks from this file.
 
 
@@ -281,7 +281,7 @@ var activeTableTab = "activeTableTab";
 
 
 setIncludesMemStoreTS
-void setIncludesMemStoreTS(boolean includesMemstoreTS)
+void setIncludesMemStoreTS(boolean includesMemstoreTS)
 
 
 
@@ -290,7 +290,7 @@ var activeTableTab = "activeTableTab";
 
 
 setDataBlockEncoder
-void setDataBlockEncoder(HFileDataBlockEncoder encoder)
+void setDataBlockEncoder(HFileDataBlockEncoder encoder)
 
 
 
@@ -299,7 +299,7 @@ var activeTableTab = "activeTableTab";
 
 
 unbufferStream
-void unbufferStream()
+void unbufferStream()
 To close the stream's socket. Note: This can be 
concurrently called from multiple threads and
  implementation should take care of thread safety.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
index 82418c0..b19e81d 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HFileBlock.FSReaderImpl
+static class HFileBlock.FSReaderImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements HFileBlock.FSReader
 Reads version 2 HFile blocks from the filesystem.
@@ -376,7 +376,7 @@ implements 
 
 streamWrapper
-private FSDataInputStreamWrapper streamWrapper
+private FSDataInputStreamWrapper streamWrapper
 The file system stream of the underlying HFile that
  does or doesn't do checksum validations in the filesystem
 
@@ -387,7 +387,7 @@ implements 
 
 encodedBlockDecodingCtx
-private HFileBlockDecodingContext encodedBlockDecodingCtx
+private HFileBlockDecodingContext encodedBlockDecodingCtx
 
 
 
@@ -396,7 +396,7 @@ implements 
 
 defaultDecodingCtx
-private final HFileBlockDefaultDecodingContext defaultDecodingCtx
+private final HFileBlockDefaultDecodingContext defaultDecodingCtx
 Default context used when BlockType != BlockType.ENCODED_DATA.
 
 
@@ -406,7 +406,7 @@ implements 
 
 prefetchedHeader
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicReference prefetchedHeader
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true";
 title="class or interface in 
java.util.concurrent.a

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.FailedOpenUpdaterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.FailedOpenUpdaterThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.FailedOpenUpdaterThread.html
index e05510e..2e114d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.FailedOpenUpdaterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.FailedOpenUpdaterThread.html
@@ -54,891 +54,884 @@
 046import 
org.apache.hadoop.hbase.HTableDescriptor;
 047import 
org.apache.hadoop.hbase.MetaTableAccessor;
 048import 
org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase;
-049import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.TableName;
-052import 
org.apache.hadoop.hbase.client.ClusterConnection;
-053import 
org.apache.hadoop.hbase.client.Delete;
-054import 
org.apache.hadoop.hbase.client.Get;
-055import 
org.apache.hadoop.hbase.client.Mutation;
-056import 
org.apache.hadoop.hbase.client.Put;
-057import 
org.apache.hadoop.hbase.client.RegionInfo;
-058import 
org.apache.hadoop.hbase.client.Result;
-059import 
org.apache.hadoop.hbase.client.Scan;
-060import 
org.apache.hadoop.hbase.client.Table;
-061import 
org.apache.hadoop.hbase.client.TableState;
-062import 
org.apache.hadoop.hbase.constraint.ConstraintException;
-063import 
org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
-064import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-065import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-066import 
org.apache.hadoop.hbase.master.MasterServices;
-067import 
org.apache.hadoop.hbase.master.ServerListener;
-068import 
org.apache.hadoop.hbase.master.TableStateManager;
-069import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-070import 
org.apache.hadoop.hbase.net.Address;
-071import 
org.apache.hadoop.hbase.procedure2.Procedure;
-072import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-073import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-074import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
-075import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
-076import 
org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
-077import 
org.apache.hadoop.hbase.security.access.AccessControlLists;
-078import 
org.apache.hadoop.hbase.util.Bytes;
-079import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-080import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-081import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-082import 
org.apache.yetus.audience.InterfaceAudience;
-083import 
org.apache.zookeeper.KeeperException;
-084import org.slf4j.Logger;
-085import org.slf4j.LoggerFactory;
-086
-087import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-088import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-089import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-092
-093/**
-094 * This is an implementation of {@link 
RSGroupInfoManager} which makes
-095 * use of an HBase table as the 
persistence store for the group information.
-096 * It also makes use of zookeeper to 
store group information needed
-097 * for bootstrapping during offline 
mode.
-098 *
-099 * 

Concurrency

-100 * RSGroup state is kept locally in Maps. There is a rsgroup name to cached -101 * RSGroupInfo Map at {@link #rsGroupMap} and a Map of tables to the name of the -102 * rsgroup they belong too (in {@link #tableMap}). These Maps are persisted to the -103 * hbase:rsgroup table (and cached in zk) on each modification. -104 * -105 *

Mutations on state are synchronized but reads can continue without having -106 * to wait on an instance monitor, mutations do wholesale replace of the Maps on -107 * update -- Copy-On-Write; the local Maps of state are read-only, just-in-case -108 * (see flushConfig). -109 * -110 *

Reads must not block else there is a danger we'll deadlock. -111 * -112 *

Clients of this class, the {@link RSGroupAdminEndpoint} for example, want to query and -113 * then act on the results of the query modifying cache in zookeeper without another thread -114 * making intermediate modifications. These clients synchronize on the 'this' instance so -115 * no other has access concurrently. Reads must be able to continue concurrently. -116 */ -117@InterfaceAudience.Private -118final class RSGroupInfoManagerImpl implements RSGroupInfoManager { -119 private static final Logger LOG = LoggerFactory.getLogger(RSGr


[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
index 5b66298..ea864e9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
@@ -39,319 +39,329 @@
 031import 
java.util.concurrent.atomic.AtomicBoolean;
 032
 033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035import 
org.apache.zookeeper.KeeperException;
-036import 
org.apache.zookeeper.KeeperException.Code;
-037import org.apache.zookeeper.ZooKeeper;
-038import org.apache.zookeeper.data.Stat;
-039import org.slf4j.Logger;
-040import org.slf4j.LoggerFactory;
-041import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-042
-043/**
-044 * A very simple read only zookeeper 
implementation without watcher support.
-045 */
-046@InterfaceAudience.Private
-047public final class ReadOnlyZKClient 
implements Closeable {
-048
-049  private static final Logger LOG = 
LoggerFactory.getLogger(ReadOnlyZKClient.class);
-050
-051  public static final String 
RECOVERY_RETRY = "zookeeper.recovery.retry";
-052
-053  private static final int 
DEFAULT_RECOVERY_RETRY = 30;
-054
-055  public static final String 
RECOVERY_RETRY_INTERVAL_MILLIS =
-056  
"zookeeper.recovery.retry.intervalmill";
-057
-058  private static final int 
DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS = 1000;
-059
-060  public static final String 
KEEPALIVE_MILLIS = "zookeeper.keep-alive.time";
-061
-062  private static final int 
DEFAULT_KEEPALIVE_MILLIS = 6;
-063
-064  private static final 
EnumSet FAIL_FAST_CODES = EnumSet.of(Code.NOAUTH, 
Code.AUTHFAILED);
-065
-066  private final String connectString;
-067
-068  private final int sessionTimeoutMs;
-069
-070  private final int maxRetries;
-071
-072  private final int retryIntervalMs;
-073
-074  private final int keepAliveTimeMs;
-075
-076  private static abstract class Task 
implements Delayed {
-077
-078protected long time = 
System.nanoTime();
-079
-080public boolean needZk() {
-081  return false;
-082}
-083
-084public void exec(ZooKeeper zk) {
-085}
-086
-087public void connectFailed(IOException 
e) {
-088}
-089
-090public void closed(IOException e) {
-091}
-092
-093@Override
-094public int compareTo(Delayed o) {
-095  Task that = (Task) o;
-096  int c = Long.compare(time, 
that.time);
-097  if (c != 0) {
-098return c;
-099  }
-100  return 
Integer.compare(System.identityHashCode(this), 
System.identityHashCode(that));
-101}
-102
-103@Override
-104public long getDelay(TimeUnit unit) 
{
-105  return unit.convert(time - 
System.nanoTime(), TimeUnit.NANOSECONDS);
-106}
-107  }
-108
-109  private static final Task CLOSE = new 
Task() {
-110  };
-111
-112  private final DelayQueue 
tasks = new DelayQueue<>();
-113
-114  private final AtomicBoolean closed = 
new AtomicBoolean(false);
-115
-116  private ZooKeeper zookeeper;
-117
-118  private String getId() {
-119return String.format("0x%08x", 
System.identityHashCode(this));
-120  }
-121
-122  public ReadOnlyZKClient(Configuration 
conf) {
-123this.connectString = 
ZKConfig.getZKQuorumServersString(conf);
-124this.sessionTimeoutMs = 
conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT);
-125this.maxRetries = 
conf.getInt(RECOVERY_RETRY, DEFAULT_RECOVERY_RETRY);
-126this.retryIntervalMs =
-127
conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, 
DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS);
-128this.keepAliveTimeMs = 
conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS);
-129LOG.info("Start read only zookeeper 
connection " + getId() + " to " + connectString +
-130", session timeout " + 
sessionTimeoutMs + " ms, retries " + maxRetries +
-131", retry interval " + 
retryIntervalMs + " ms, keep alive " + keepAliveTimeMs + " ms");
-132Thread t = new Thread(this::run, 
"ReadOnlyZKClient");
-133t.setDaemon(true);
-134t.start();
-135  }
-136
-137  private abstract class ZKTask 
extends Task {
-138
-139protected final String path;
-140
-141private final 
CompletableFuture future;
-142
-143private final String operationType;
-144
-145private int retries;
-146
-147protected ZKTask(String path, 
CompletableFuture future, String operationType) {
-148  this.path = path;
-149  this.future = future;
-150  this.operationType = 
operationType;
-151}
-152
-153protected final void 
onComplete(ZooKeeper zk, int rc, T ret, boolean errorIfNoNode) {
-154  tasks.add(new Task() {
-155
-1

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
index 972d795..d4f4a3d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.Reader.html
@@ -39,489 +39,490 @@
 031import org.apache.hadoop.fs.Path;
 032import 
org.apache.hadoop.hbase.HConstants;
 033import 
org.apache.hadoop.hbase.ServerName;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035import 
org.apache.yetus.audience.InterfaceStability;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038import 
org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
-039import 
org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-040import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-041import 
org.apache.hadoop.hbase.util.FSUtils;
-042import 
org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
-043import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-044
-045/**
-046 * Base class of a WAL Provider that 
returns a single thread safe WAL that writes to Hadoop FS. By
-047 * default, this implementation picks a 
directory in Hadoop FS based on a combination of
-048 * 
    -049 *
  • the HBase root directory -050 *
  • HConstants.HREGION_LOGDIR_NAME -051 *
  • the given factory's factoryId (usually identifying the regionserver by host:port) -052 *
-053 * It also uses the providerId to differentiate among files. -054 */ -055@InterfaceAudience.Private -056@InterfaceStability.Evolving -057public abstract class AbstractFSWALProvider> implements WALProvider { -058 -059 private static final Logger LOG = LoggerFactory.getLogger(AbstractFSWALProvider.class); -060 -061 /** Separate old log into different dir by regionserver name **/ -062 public static final String SEPARATE_OLDLOGDIR = "hbase.separate.oldlogdir.by.regionserver"; -063 public static final boolean DEFAULT_SEPARATE_OLDLOGDIR = false; -064 -065 // Only public so classes back in regionserver.wal can access -066 public interface Reader extends WAL.Reader { -067/** -068 * @param fs File system. -069 * @param path Path. -070 * @param c Configuration. -071 * @param s Input stream that may have been pre-opened by the caller; may be null. -072 */ -073void init(FileSystem fs, Path path, Configuration c, FSDataInputStream s) throws IOException; -074 } -075 -076 protected volatile T wal; -077 protected WALFactory factory = null; -078 protected Configuration conf = null; -079 protected List listeners = null; -080 protected String providerId = null; -081 protected AtomicBoolean initialized = new AtomicBoolean(false); -082 // for default wal provider, logPrefix won't change -083 protected String logPrefix = null; -084 -085 /** -086 * we synchronized on walCreateLock to prevent wal recreation in different threads -087 */ -088 private final Object walCreateLock = new Object(); -089 -090 /** -091 * @param factory factory that made us, identity used for FS layout. may not be null -092 * @param conf may not be null -093 * @param listeners may be null -094 * @param providerId differentiate between providers from one factory, used for FS layout. may be -095 * null -096 */ -097 @Override -098 public void init(WALFactory factory, Configuration conf, List listeners, -099 String providerId) throws IOException { -100if (!initialized.compareAndSet(false, true)) { -101 throw new IllegalStateException("WALProvider.init should only be called once."); -102} -103this.factory = factory; -104this.conf = conf; -105this.listeners = listeners; -106this.providerId = providerId; -107// get log prefix -108StringBuilder sb = new StringBuilder().append(factory.factoryId); -109if (providerId != null) { -110 if (providerId.startsWith(WAL_FILE_NAME_DELIMITER)) { -111sb.append(providerId); -112 } else { -113 sb.append(WAL_FILE_NAME_DELIMITER).append(providerId); -114 } -115} -116logPrefix = sb.toString(); -117doInit(conf); -118 } -119 -120 @Override -121 public List getWALs() { -122if (wal == null) { -123 return Collections.emptyList(); -124} -125List wals = new ArrayList<>(1); -126wals.add(wal); -127return wals; -128 } -129 -130 @Override -131 public T getWAL(byte[] identifier, byte[] namespace) throws IOException { -132T walCopy = wal; -133if (walCopy == null) { -134 // only lock when need to create wal,

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 616ee36..a9ecb58 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":18,"i15":18,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":18,"i47":6,"i48":6,"i49":18,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":18,"i57":18,"i58":18,"i59":6,"i60":6,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":18,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":18,"i83":6,"i84":6,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":18,"i91":6,"i92":6,"i93":6,"i94":18,"i95":6,"i96":6,"i97":6,"i98":6,"i99":6,"i100":18,"i101":18,"i102":6,"i103":6,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119"
 
:6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":18,"i129":18,"i130":6,"i131":6,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6};
+var methods = 
{"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":18,"i16":18,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":18,"i48":6,"i49":6,"i50":18,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":18,"i58":18,"i59":18,"i60":6,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":18,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":18,"i84":6,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":18,"i92":6,"i93":6,"i94":6,"i95":18,"i96":6,"i97":6,"i98":6,"i99":6,"i100":6,"i101":18,"i102":18,"i103":6,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119"
 
:6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":18,"i130":18,"i131":6,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public interface AsyncAdmin
+public interface AsyncAdmin
 The asynchronous administrative API for HBase.
 
 Since:
@@ -201,39 +201,45 @@ public interface 
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
+clearBlockCache(TableName tableName)
+Clear all the blocks corresponding to this table from 
BlockCache.
+
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
 clearCompactionQueues(ServerName serverName,
  http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetString> queues)
 Clear compacting queues on a region server.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
 clearDeadServers(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers)
 Clear dead region servers from master.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFu

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
index 9dbe9f6..41c2a3b 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":42,"i27":42,"i28":42,"i29":42,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":42,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":9,"i80":10,"i81":10,"i82":9,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":41,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":42,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109
 
":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":9,"i115":10,"i116":10,"i117":10,"i118":42,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":42,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":42,"i197":10,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i2
 
09":10,"i210":10,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":42,"i27":42,"i28":42,"i29":42,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":42,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":9,"i80":10,"i81":10,"i82":9,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":41,"i93":10,"i94":10,"i95":10,"i96":10,"i97":42,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109
 
":10,"i110":10,"i111":10,"i112":9,"i113":10,"i114":10,"i115":10,"i116":42,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":42,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":42,"i195":10,"i196":10,"i197":10,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i2
 
09":10,"i210":10,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134impo

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
index 1d28d02..f930336 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
@@ -432,6 +432,6 @@ extends Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
index 6224aa4..011c1ae 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
@@ -343,6 +343,6 @@ extends Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
index 85dc7a0..9037686 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
@@ -397,6 +397,6 @@ extends Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
index 68112c8..d5ae071 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
@@ -362,6 +362,6 @@ extends Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
index 1ca323c..2837bc8 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
@@ -362,6 +362,6 @@ extends Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
index 66ec9ea..c6f1393 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
@@ -347,6 +347,6 @@ extends Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbas

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index d405629..3ec93bb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -371,1638 +371,1646 @@
 363if (params.getWriteBufferSize() == 
BufferedMutatorParams.UNSET) {
 364  
params.writeBufferSize(connectionConfig.getWriteBufferSize());
 365}
-366if (params.getMaxKeyValueSize() == 
BufferedMutatorParams.UNSET) {
-367  
params.maxKeyValueSize(connectionConfig.getMaxKeyValueSize());
-368}
-369// Look to see if an alternate 
BufferedMutation implementation is wanted.
-370// Look in params and in config. If 
null, use default.
-371String implementationClassName = 
params.getImplementationClassName();
-372if (implementationClassName == null) 
{
-373  implementationClassName = 
this.alternateBufferedMutatorClassName;
-374}
-375if (implementationClassName == null) 
{
-376  return new 
BufferedMutatorImpl(this, rpcCallerFactory, rpcControllerFactory, params);
-377}
-378try {
-379  return 
(BufferedMutator)ReflectionUtils.newInstance(Class.forName(implementationClassName),
-380  this, rpcCallerFactory, 
rpcControllerFactory, params);
-381} catch (ClassNotFoundException e) 
{
-382  throw new RuntimeException(e);
-383}
-384  }
-385
-386  @Override
-387  public BufferedMutator 
getBufferedMutator(TableName tableName) {
-388return getBufferedMutator(new 
BufferedMutatorParams(tableName));
-389  }
-390
-391  @Override
-392  public RegionLocator 
getRegionLocator(TableName tableName) throws IOException {
-393return new HRegionLocator(tableName, 
this);
-394  }
-395
-396  @Override
-397  public Admin getAdmin() throws 
IOException {
-398return new HBaseAdmin(this);
-399  }
-400
-401  @Override
-402  public MetricsConnection 
getConnectionMetrics() {
-403return this.metrics;
-404  }
-405
-406  private ExecutorService getBatchPool() 
{
-407if (batchPool == null) {
-408  synchronized (this) {
-409if (batchPool == null) {
-410  int threads = 
conf.getInt("hbase.hconnection.threads.max", 256);
-411  this.batchPool = 
getThreadPool(threads, threads, "-shared", null);
-412  this.cleanupPool = true;
-413}
-414  }
-415}
-416return this.batchPool;
-417  }
-418
-419  private ExecutorService 
getThreadPool(int maxThreads, int coreThreads, String nameHint,
-420  BlockingQueue 
passedWorkQueue) {
-421// shared HTable thread executor not 
yet initialized
-422if (maxThreads == 0) {
-423  maxThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-424}
-425if (coreThreads == 0) {
-426  coreThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-427}
-428long keepAliveTime = 
conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
-429BlockingQueue 
workQueue = passedWorkQueue;
-430if (workQueue == null) {
-431  workQueue =
-432new 
LinkedBlockingQueue<>(maxThreads *
-433
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-434
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
-435  coreThreads = maxThreads;
-436}
-437ThreadPoolExecutor tpe = new 
ThreadPoolExecutor(
-438coreThreads,
-439maxThreads,
-440keepAliveTime,
-441TimeUnit.SECONDS,
-442workQueue,
-443
Threads.newDaemonThreadFactory(toString() + nameHint));
-444tpe.allowCoreThreadTimeOut(true);
-445return tpe;
-446  }
-447
-448  private ExecutorService 
getMetaLookupPool() {
-449if (this.metaLookupPool == null) {
-450  synchronized (this) {
-451if (this.metaLookupPool == null) 
{
-452  //Some of the threads would be 
used for meta replicas
-453  //To start with, 
threads.max.core threads can hit the meta (including replicas).
-454  //After that, requests will get 
queued up in the passed queue, and only after
-455  //the queue is full, a new 
thread will be started
-456  int threads = 
conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128);
-457  this.metaLookupPool = 
getThreadPool(
-458 threads,
-459 threads,
-460 "-metaLookup-shared-", new 
LinkedBlockingQueue<>());
-461}
-462  }
-463}
-464return this.metaLookupPool;
-465  }
-466
-467  protected ExecutorService 
getCurrentMetaLookupPool() {
-468return metaLookupPool;
-469  }
-470
-471  protected ExecutorService 
getCurrentBatchPool() {

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Mutation.html 
b/devapidocs/org/apache/hadoop/hbase/client/Mutation.html
index 44fddac..0dbc915 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Mutation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Mutation.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":9,"i3":9,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":42,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9,"i42":9,"i43":9,"i44":10};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":9,"i4":9,"i5":9,"i6":42,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":42,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9,"i42":9,"i43":9,"i44":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -212,6 +212,13 @@ implements Fields inherited from class org.apache.hadoop.hbase.client.OperationWithAttributes
 ID_ATRIBUTE
 
+
+
+
+
+Fields inherited from interface org.apache.hadoop.hbase.client.Row
+COMPARATOR
+
 
 
 
@@ -287,7 +294,12 @@ implements 
 
 int
-compareTo(Row d) 
+compareTo(Row d)
+Deprecated. 
+As of release 2.0.0, this 
will be removed in HBase 3.0.0.
+ Use Row.COMPARATOR
 instead
+
+
 
 
 (package private) KeyValue
@@ -919,10 +931,15 @@ public 
 
 compareTo
-public int compareTo(Row d)
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
+public int compareTo(Row d)
+Deprecated. As of release 2.0.0, this will be removed in HBase 
3.0.0.
+ Use Row.COMPARATOR
 instead
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true#compareTo-T-";
 title="class or interface in java.lang">compareTo in 
interface http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable
+Specified by:
+compareTo in
 interface Row
 
 
 
@@ -932,7 +949,7 @@ public 
 
 getTimeStamp
-public long getTimeStamp()
+public long getTimeStamp()
 Method for retrieving the timestamp
 
 Returns:
@@ -946,7 +963,7 @@ public 
 
 setClusterIds
-public Mutation setClusterIds(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListUUID> clusterIds)
+public Mutation setClusterIds(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListUUID> clusterIds)
 Marks that the clusters with the given clusterIds have 
consumed the mutation
 
 Parameters:
@@ -960,7 +977,7 @@ public 
 
 getClusterIds
-public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListUUID> getClusterIds()
+public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListUUID> getClusterIds()
 
 Returns:
 the set of clusterIds that have consumed the mutation
@@ -973,7 +990,7 @@ public 
 
 setCellVisibility
-public Mutation setCellVisibility(CellVisibility expression)
+public Mutation setCellVisibility(CellVisibility expression)
 Sets the visibility expression associated with cells in 
this Mutation.
 
 Parameters:
@@ -987,7 +1004,7 @@ public 
 
 getCellVisibility
-public CellVisibility getCellVisibility()
+public CellVisibility getCellVisibility()
  throws DeserializationException
 
 Returns:
@@ -1003,7 +1020,7 @@ public 
 
 toCellVisibility
-static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility)
+static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Ce

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColByteBufferExtendedCell.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColByteBufferExtendedCell.html
 
b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColByteBufferExtendedCell.html
new file mode 100644
index 000..5cdfa16
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowColByteBufferExtendedCell.html
@@ -0,0 +1,609 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+PrivateCellUtil.LastOnRowColByteBufferExtendedCell (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class PrivateCellUtil.LastOnRowColByteBufferExtendedCell
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ByteBufferExtendedCell
+
+
+org.apache.hadoop.hbase.PrivateCellUtil.EmptyByteBufferExtendedCell
+
+
+org.apache.hadoop.hbase.PrivateCellUtil.LastOnRowByteBufferExtendedCell
+
+
+org.apache.hadoop.hbase.PrivateCellUtil.LastOnRowColByteBufferExtendedCell
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true";
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
+
+
+Enclosing class:
+PrivateCellUtil
+
+
+
+private static class PrivateCellUtil.LastOnRowColByteBufferExtendedCell
+extends PrivateCellUtil.LastOnRowByteBufferExtendedCell
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.Cell
+Cell.Type
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer
+fBuffer 
+
+
+private static int
+FIXED_OVERHEAD 
+
+
+private byte
+flength 
+
+
+private int
+foffset 
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer
+qBuffer 
+
+
+private int
+qlength 
+
+
+private int
+qoffset 
+
+
+
+
+
+
+Fields inherited from interface org.apache.hadoop.hbase.ExtendedCell
+CELL_NOT_BASED_ON_CHUNK
+
+
+
+
+
+Fields inherited from interface org.apache.hadoop.hbase.RawCell
+MAX_TAGS_LENGTH
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+LastOnRowColByteBufferExtendedCell(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer rBuffer,
+  int roffset,
+  short rlength,
+  http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer fBuffer,
+  int foffset,
+  byte flength,
+  http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer qBuffer,
+  int qoffset,
+  int qlength) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer
+getFamilyByteBuffer() 
+
+
+byte
+getFamilyLength() 
+
+
+int
+getFamilyPosition() 
+
+
+http://docs.oracle.com/javase/8/docs/api/java

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
index b66bab7..4ee0640 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Private
  @InterfaceStability.Evolving
-public class HBaseAdmin
+public class HBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements Admin
 HBaseAdmin is no longer a client API. It is marked 
InterfaceAudience.Private indicating that
@@ -1839,7 +1839,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -1848,7 +1848,7 @@ implements 
 
 connection
-private ClusterConnection connection
+private ClusterConnection connection
 
 
 
@@ -1857,7 +1857,7 @@ implements 
 
 conf
-private final org.apache.hadoop.conf.Configuration conf
+private final org.apache.hadoop.conf.Configuration conf
 
 
 
@@ -1866,7 +1866,7 @@ implements 
 
 pause
-private final long pause
+private final long pause
 
 
 
@@ -1875,7 +1875,7 @@ implements 
 
 numRetries
-private final int numRetries
+private final int numRetries
 
 
 
@@ -1884,7 +1884,7 @@ implements 
 
 syncWaitTimeout
-private final int syncWaitTimeout
+private final int syncWaitTimeout
 
 
 
@@ -1893,7 +1893,7 @@ implements 
 
 aborted
-private boolean aborted
+private boolean aborted
 
 
 
@@ -1902,7 +1902,7 @@ implements 
 
 operationTimeout
-private int operationTimeout
+private int operationTimeout
 
 
 
@@ -1911,7 +1911,7 @@ implements 
 
 rpcTimeout
-private int rpcTimeout
+private int rpcTimeout
 
 
 
@@ -1920,7 +1920,7 @@ implements 
 
 rpcCallerFactory
-private RpcRetryingCallerFactory 
rpcCallerFactory
+private RpcRetryingCallerFactory 
rpcCallerFactory
 
 
 
@@ -1929,7 +1929,7 @@ implements 
 
 rpcControllerFactory
-private RpcControllerFactory rpcControllerFactory
+private RpcControllerFactory rpcControllerFactory
 
 
 
@@ -1938,7 +1938,7 @@ implements 
 
 ng
-private NonceGenerator ng
+private NonceGenerator ng
 
 
 
@@ -1955,7 +1955,7 @@ implements 
 
 HBaseAdmin
-HBaseAdmin(ClusterConnection connection)
+HBaseAdmin(ClusterConnection connection)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1977,7 +1977,7 @@ implements 
 
 getOperationTimeout
-public int getOperationTimeout()
+public int getOperationTimeout()
 
 Specified by:
 getOperationTimeout in
 interface Admin
@@ -1990,7 +1990,7 @@ implements 
 
 abort
-public void abort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why,
+public void abort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why,
   http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable e)
 Description copied from 
interface: Abortable
 Abort the server or client.
@@ -2011,7 +2011,7 @@ implements 
 
 isAborted
-public boolean isAborted()
+public boolean isAborted()
 Description copied from 
interface: Abortable
 Check if the server or client was aborted.
 
@@ -2030,7 +2030,7 @@ implements 
 
 abortProcedure
-public boolean abortProcedure(long procId,
+public boolean abortProcedure(long procId,
   boolean mayInterruptIfRunning)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
interface: Admin
@@ -2054,7 +2054,7 @@ implements 
 
 abortProcedureAsync
-public http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureBoolean> abortProcedureAsync(long procId,
+public http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureBoolean> abortProcedureAsync(long procId,
boolean mayInterruptIfRunning)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
index eb95695..24f54ef 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -1064,88 +1064,92 @@
  
 
 
+Mutation.CellWrapper
+ 
+
+
 NoncedRegionServerCallable
 
 Implementations make an rpc call against a RegionService 
via a protobuf Service.
 
 
-
+
 NoOpRetryableCallerInterceptor
 
 Class that acts as a NoOpInterceptor.
 
 
-
+
 NoOpRetryingInterceptorContext
  
 
-
+
 Operation
 
 Superclass for any type that maps to a potentially 
application-level query.
 
 
-
+
 OperationWithAttributes
  
 
-
+
 PackagePrivateFieldAccessor
 
 A helper class used to access the package private field in 
o.a.h.h.client package.
 
 
-
+
 PerClientRandomNonceGenerator
 
 NonceGenerator implementation that uses client ID hash + 
random int as nonce group, and random
  numbers as nonces.
 
 
-
+
 PreemptiveFastFailInterceptor
 
 The concrete RetryingCallerInterceptor 
class that implements the preemptive fast fail
  feature.
 
 
-
+
 Put
 
 Used to perform Put operations for a single row.
 
 
-
+
 Query
 
 Base class for HBase read operations; e.g.
 
 
-
+
 QuotaStatusCalls
 
 Client class to wrap RPCs to HBase servers for space quota 
status information.
 
 
-
+
 RawAsyncHBaseAdmin
 
 The implementation of AsyncAdmin.
 
 
-
+
 RawAsyncTableImpl
 
 The implementation of RawAsyncTable.
 
 
-
+
 RegionAdminServiceCallable
 
 Similar to RegionServerCallable but for the AdminService 
interface.
 
 
-
+
 RegionCoprocessorRpcChannel
 
 Provides clients with an RPC connection to call Coprocessor 
Endpoint
@@ -1153,103 +1157,103 @@
  against a given table region.
 
 
-
+
 RegionCoprocessorRpcChannelImpl
 
 The implementation of a region based coprocessor rpc 
channel.
 
 
-
+
 RegionCoprocessorServiceExec
 
 Represents a coprocessor service method execution against a 
single region.
 
 
-
+
 RegionInfoBuilder
  
 
-
+
 RegionInfoBuilder.MutableRegionInfo
 
 An implementation of RegionInfo that adds mutable methods 
so can build a RegionInfo instance.
 
 
-
+
 RegionInfoDisplay
 
 Utility used composing RegionInfo for 'display'; e.g.
 
 
-
+
 RegionLoadStats
 
 POJO representing region server load
 
 
-
+
 RegionReplicaUtil
 
 Utility methods which contain the logic for regions and 
replicas.
 
 
-
+
 RegionServerCallable
 
 Implementations make a RPC call against a RegionService via 
a protobuf Service.
 
 
-
+
 RegionServerCoprocessorRpcChannelImpl
 
 The implementation of a region server based coprocessor rpc 
channel.
 
 
-
+
 RequestControllerFactory
 
 A factory class that constructs an RequestController.
 
 
-
+
 Result
 
 Single row result of a Get or Scan query.
 
 
-
+
 ResultBoundedCompletionService
 
 A completion service for the RpcRetryingCallerFactory.
 
 
-
+
 ResultStatsUtil
 
 A Result with some statistics 
about the server/region status
 
 
-
+
 RetriesExhaustedException.ThrowableWithExtraContext
 
 Datastructure that allows adding more info around Throwable 
incident.
 
 
-
+
 RetryingCallerInterceptor
 
 This class is designed to fit into the RetryingCaller class 
which forms the
  central piece of intelligence for the client side retries for most 
calls.
 
 
-
+
 RetryingCallerInterceptorContext
 
 The context object used in the RpcRetryingCaller to enable
  RetryingCallerInterceptor to 
intercept calls.
 
 
-
+
 RetryingCallerInterceptorFactory
 
 Factory implementation to provide the ConnectionImplementation with
@@ -1257,186 +1261,186 @@
  to intercept the RpcRetryingCaller during the 
course of their calls.
 
 
-
+
 RetryingTimeTracker
 
 Tracks the amount of time remaining for an operation.
 
 
-
+
 ReversedClientScanner
 
 A reversed client scanner which support backward 
scanning
 
 
-
+
 ReversedScannerCallable
 
 A reversed ScannerCallable which supports backward 
scanning.
 
 
-
+
 RowMutations
 
 Performs multiple mutations atomically on a single 
row.
 
 
-
+
 RpcRetryingCallable
 
 A RetryingCallable for RPC connection operations.
 
 
-
+
 RpcRetryingCallerFactory
 
 Factory to create an RpcRetryingCaller
 
 
-
+
 RpcRetryingCallerImpl
 
 Runs an rpc'ing RetryingCallable.
 
 
-
+
 RpcRetryingCallerWithReadReplicas
 
 Caller that goes to replica if the primary region does no 
answer within a configurable
  timeout.
 
 
-
+
 Scan
 
 Used to perform Scan operations.
 
 
-
+
 ScannerCallable
 
 Scanner operations such as create, next, etc.
 
 
-
+
 ScannerCallableWithReplicas
 
 This class has the logic for handling scanners for regions 
with and without replicas.
 
 
-
+
 SecureBulkLoadClient
 
 Client proxy for SecureBulkLoadProtocol
 
 
-
+
 ServerSt

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
new file mode 100644
index 000..71630ed
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.html
@@ -0,0 +1,370 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+ReplicationPeerConfigBuilder (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.replication
+Interface 
ReplicationPeerConfigBuilder
+
+
+
+
+
+
+All Known Implementing Classes:
+ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl
+
+
+
+@InterfaceAudience.Public
+public interface ReplicationPeerConfigBuilder
+For creating ReplicationPeerConfig.
+
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Abstract Methods 
+
+Modifier and Type
+Method and Description
+
+
+ReplicationPeerConfig
+build() 
+
+
+ReplicationPeerConfigBuilder
+setBandwidth(long bandwidth) 
+
+
+ReplicationPeerConfigBuilder
+setClusterKey(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String clusterKey)
+Set the clusterKey which is the concatenation of the slave 
cluster's:
+ 
hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
+
+
+
+ReplicationPeerConfigBuilder
+setConfiguration(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String> configuration) 
+
+
+ReplicationPeerConfigBuilder
+setExcludeNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetString> namespaces) 
+
+
+ReplicationPeerConfigBuilder
+setExcludeTableCFsMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapListString>> tableCFsMap) 
+
+
+ReplicationPeerConfigBuilder
+setNamespaces(http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetString> namespaces) 
+
+
+ReplicationPeerConfigBuilder
+setPeerData(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in 
java.util">Map peerData) 
+
+
+ReplicationPeerConfigBuilder
+setReplicateAllUserTables(boolean replicateAllUserTables) 
+
+
+ReplicationPeerConfigBuilder
+setReplicationEndpointImpl(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String replicationEndpointImpl)
+Sets the ReplicationEndpoint plugin class for this 
peer.
+
+
+
+ReplicationPeerConfigBuilder
+setTableCFsMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/apidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html 
b/apidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
index 093e878..df42ef7 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
@@ -32,375 +32,553 @@
 024import java.util.List;
 025import java.util.Map;
 026import java.util.TreeMap;
-027import java.util.TreeSet;
-028
-029import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Objects;
-030import 
org.apache.yetus.audience.InterfaceAudience;
-031import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-032import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-033import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.Coprocessor;
-034import 
org.apache.hadoop.hbase.replication.ReplicationLoadSink;
-035import 
org.apache.hadoop.hbase.replication.ReplicationLoadSource;
-036import 
org.apache.hadoop.hbase.util.Bytes;
-037import 
org.apache.hadoop.hbase.util.Strings;
-038
-039/**
-040 * This class is used for exporting 
current state of load on a RegionServer.
-041 */
-042@InterfaceAudience.Public
-043public class ServerLoad {
-044  private int stores = 0;
-045  private int storefiles = 0;
-046  private int storeUncompressedSizeMB = 
0;
-047  private int storefileSizeMB = 0;
-048  private int memstoreSizeMB = 0;
-049  private long storefileIndexSizeKB = 
0;
-050  private long readRequestsCount = 0;
-051  private long filteredReadRequestsCount 
= 0;
-052  private long writeRequestsCount = 0;
-053  private int rootIndexSizeKB = 0;
-054  private int totalStaticIndexSizeKB = 
0;
-055  private int totalStaticBloomSizeKB = 
0;
-056  private long totalCompactingKVs = 0;
-057  private long currentCompactedKVs = 0;
-058  private long reportTime = 0;
-059
-060  @InterfaceAudience.Private
-061  public 
ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
-062this.serverLoad = serverLoad;
-063this.reportTime = 
System.currentTimeMillis();
-064for (ClusterStatusProtos.RegionLoad 
rl: serverLoad.getRegionLoadsList()) {
-065  stores += rl.getStores();
-066  storefiles += rl.getStorefiles();
-067  storeUncompressedSizeMB += 
rl.getStoreUncompressedSizeMB();
-068  storefileSizeMB += 
rl.getStorefileSizeMB();
-069  memstoreSizeMB += 
rl.getMemStoreSizeMB();
-070  storefileIndexSizeKB += 
rl.getStorefileIndexSizeKB();
-071  readRequestsCount += 
rl.getReadRequestsCount();
-072  filteredReadRequestsCount += 
rl.getFilteredReadRequestsCount();
-073  writeRequestsCount += 
rl.getWriteRequestsCount();
-074  rootIndexSizeKB += 
rl.getRootIndexSizeKB();
-075  totalStaticIndexSizeKB += 
rl.getTotalStaticIndexSizeKB();
-076  totalStaticBloomSizeKB += 
rl.getTotalStaticBloomSizeKB();
-077  totalCompactingKVs += 
rl.getTotalCompactingKVs();
-078  currentCompactedKVs += 
rl.getCurrentCompactedKVs();
-079}
-080  }
-081
-082  // NOTE: Function name cannot start 
with "get" because then an OpenDataException is thrown because
-083  // HBaseProtos.ServerLoad cannot be 
converted to an open data type(see HBASE-5967).
-084  /* @return the underlying ServerLoad 
protobuf object */
-085  @InterfaceAudience.Private
-086  public ClusterStatusProtos.ServerLoad 
obtainServerLoadPB() {
-087return serverLoad;
-088  }
-089
-090  protected 
ClusterStatusProtos.ServerLoad serverLoad;
-091
-092  /* @return number of requests  since 
last report. */
-093  public long getNumberOfRequests() {
-094return 
serverLoad.getNumberOfRequests();
-095  }
-096  public boolean hasNumberOfRequests() 
{
-097return 
serverLoad.hasNumberOfRequests();
-098  }
-099
-100  /* @return total Number of requests 
from the start of the region server. */
-101  public long getTotalNumberOfRequests() 
{
-102return 
serverLoad.getTotalNumberOfRequests();
-103  }
-104  public boolean 
hasTotalNumberOfRequests() {
-105return 
serverLoad.hasTotalNumberOfRequests();
-106  }
-107
-108  /* @return the amount of used heap, in 
MB. */
-109  public int getUsedHeapMB() {
-110return serverLoad.getUsedHeapMB();
-111  }
-112  public boolean hasUsedHeapMB() {
-113return serverLoad.hasUsedHeapMB();
-114  }
-115
-116  /* @return the maximum allowable size 
of the heap, in MB. */
-117  public int getMaxHeapMB() {
-118return serverLoad.getMaxHeapMB();
-119  }
-120  public boolean hasMaxHeapMB() {
-121return serverLoad.hasMaxHeapMB();
-122  }
-123
-124  public int getStores() {
-125return stores;
-126  }
-127
-128  public int getStorefiles() {
-129return storefiles;
+027import java.util.stream.Collectors;
+028import 
org.apache.hadoop.hbase.replication.ReplicationLoadSink;
+029import 
org.apache.hadoop.hbase.replication.ReplicationLoadSource;
+030im

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/apidocs/src-html/org/apache/hadoop/hbase/rest/client/Response.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/rest/client/Response.html 
b/apidocs/src-html/org/apache/hadoop/hbase/rest/client/Response.html
index 4eb0b84..7b7da4d 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/rest/client/Response.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/rest/client/Response.html
@@ -30,9 +30,9 @@
 022import java.io.IOException;
 023import java.io.InputStream;
 024
-025import org.apache.commons.logging.Log;
-026import 
org.apache.commons.logging.LogFactory;
-027import 
org.apache.yetus.audience.InterfaceAudience;
+025import 
org.apache.yetus.audience.InterfaceAudience;
+026import org.slf4j.Logger;
+027import org.slf4j.LoggerFactory;
 028import org.apache.http.Header;
 029import org.apache.http.HttpResponse;
 030
@@ -41,7 +41,7 @@
 033 */
 034@InterfaceAudience.Public
 035public class Response {
-036  private static final Log LOG = 
LogFactory.getLog(Response.class);
+036  private static final Logger LOG = 
LoggerFactory.getLogger(Response.class);
 037
 038  private int code;
 039  private Header[] headers;



[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d0f1a9f6/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
index cb2d47f..8ffd84c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
@@ -184,1798 +184,1797 @@
 176  }
 177
 178  private void checkSystemTable() throws 
IOException {
-179try (Admin admin = 
connection.getAdmin();) {
-180
-181  verifyNamespaceExists(admin);
-182
-183  if (!admin.tableExists(tableName)) 
{
-184HTableDescriptor backupHTD =
-185
BackupSystemTable.getSystemTableDescriptor(connection.getConfiguration());
-186admin.createTable(backupHTD);
-187  }
-188  waitForSystemTable(admin);
-189}
-190  }
-191
-192  private void 
verifyNamespaceExists(Admin admin) throws IOException {
-193String namespaceName = 
tableName.getNamespaceAsString();
-194NamespaceDescriptor ns = 
NamespaceDescriptor.create(namespaceName).build();
-195NamespaceDescriptor[] list = 
admin.listNamespaceDescriptors();
-196boolean exists = false;
-197for (NamespaceDescriptor nsd : list) 
{
-198  if 
(nsd.getName().equals(ns.getName())) {
-199exists = true;
-200break;
-201  }
-202}
-203if (!exists) {
-204  admin.createNamespace(ns);
-205}
-206  }
-207
-208  private void waitForSystemTable(Admin 
admin) throws IOException {
-209long TIMEOUT = 6;
-210long startTime = 
EnvironmentEdgeManager.currentTime();
-211while (!admin.tableExists(tableName) 
|| !admin.isTableAvailable(tableName)) {
-212  try {
-213Thread.sleep(100);
-214  } catch (InterruptedException e) 
{
-215  }
-216  if 
(EnvironmentEdgeManager.currentTime() - startTime > TIMEOUT) {
-217throw new IOException("Failed to 
create backup system table after " + TIMEOUT + "ms");
-218  }
-219}
-220LOG.debug("Backup table exists and 
available");
-221
-222  }
-223
-224  @Override
-225  public void close() {
-226// do nothing
-227  }
-228
-229  /**
-230   * Updates status (state) of a backup 
session in backup system table table
-231   * @param info backup info
-232   * @throws IOException exception
-233   */
-234  public void updateBackupInfo(BackupInfo 
info) throws IOException {
-235
-236if (LOG.isTraceEnabled()) {
-237  LOG.trace("update backup status in 
backup system table for: " + info.getBackupId()
-238  + " set status=" + 
info.getState());
-239}
-240try (Table table = 
connection.getTable(tableName)) {
-241  Put put = 
createPutForBackupInfo(info);
-242  table.put(put);
-243}
-244  }
-245
-246  /*
-247   * @param backupId the backup Id
-248   * @return Map of rows to path of bulk 
loaded hfile
-249   */
-250  Map 
readBulkLoadedFiles(String backupId) throws IOException {
-251Scan scan = 
BackupSystemTable.createScanForBulkLoadedFiles(backupId);
-252try (Table table = 
connection.getTable(tableName);
-253ResultScanner scanner = 
table.getScanner(scan)) {
-254  Result res = null;
-255  Map map = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
-256  while ((res = scanner.next()) != 
null) {
-257res.advance();
-258byte[] row = 
CellUtil.cloneRow(res.listCells().get(0));
-259for (Cell cell : res.listCells()) 
{
-260  if 
(CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0,
-261
BackupSystemTable.PATH_COL.length) == 0) {
-262map.put(row, 
Bytes.toString(CellUtil.cloneValue(cell)));
-263  }
-264}
-265  }
-266  return map;
-267}
-268  }
-269
-270  /*
-271   * Used during restore
-272   * @param backupId the backup Id
-273   * @param sTableList List of tables
-274   * @return array of Map of family to 
List of Paths
-275   */
-276  public Map>[] readBulkLoadedFiles(String backupId, 
List sTableList)
-277  throws IOException {
-278Scan scan = 
BackupSystemTable.createScanForBulkLoadedFiles(backupId);
-279Map>[] 
mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()];
-280try (Table table = 
connection.getTable(tableName);
-281ResultScanner scanner = 
table.getScanner(scan)) {
-282  Result res = null;
-283  while ((res = scanner.next()) != 
null) {
-284res.advance();
-285TableName tbl = null;
-286byte[] fam = null;
-287String path = null;
-288for (Cell cell : res.listCells()) 
{
-289  if 
(CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dad9a249/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteByteBufferCell.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteByteBufferCell.html
 
b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteByteBufferCell.html
index 4a2da54..6aaf50a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteByteBufferCell.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteByteBufferCell.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -126,7 +126,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PrivateCellUtil.TagRewriteByteBufferCell
+static class PrivateCellUtil.TagRewriteByteBufferCell
 extends ByteBufferCell
 implements ExtendedCell
 
@@ -135,6 +135,21 @@ implements 
 
 
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.Cell
+Cell.DataType
+
+
+
 
 
 
@@ -290,89 +305,101 @@ implements getSerializedSize(boolean withTags) 
 
 
+http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
+getTag(byte type)
+Returns the specific tag of the given type
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+getTags()
+Creates a list of tags in the current cell
+
+
+
 byte[]
 getTagsArray()
 Contiguous raw bytes representing tags that may start at 
any index in the containing array.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer
 getTagsByteBuffer() 
 
-
+
 int
 getTagsLength()
 HBase internally uses 2 bytes to store tags length in 
Cell.
 
 
-
+
 int
 getTagsOffset() 
 
-
+
 int
 getTagsPosition() 
 
-
+
 long
 getTimestamp() 
 
-
+
 byte
 getTypeByte() 
 
-
+
 byte[]
 getValueArray()
 Contiguous raw bytes that may start at any index in the 
containing array.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer
 getValueByteBuffer() 
 
-
+
 int
 getValueLength() 
 
-
+
 int
 getValueOffset() 
 
-
+
 int
 getValuePosition() 
 
-
+
 long
 heapSize() 
 
-
+
 void
 setSequenceId(long seqId)
 Sets with the given seqId.
 
 
-
+
 void
 setTimestamp(byte[] ts)
 Sets with the given timestamp.
 
 
-
+
 void
 setTimestamp(long ts)
 Sets with the given timestamp.
 
 
-
+
 void
 write(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer buf,
  int offset)
 Write this Cell into the given buf's offset in a KeyValue format.
 
 
-
+
 int
 write(http://docs.oracle.com/javase/8/docs/api/java/io/OutputStream.html?is-external=true";
 title="class or interface in java.io">OutputStream out,
  boolean withTags)
@@ -392,14 +419,14 @@ implements 
 
 Methods inherited from interface org.apache.hadoop.hbase.ExtendedCell
-getChunkId
+getChunkId,
 getType
 
 
 
 
 
 Methods inherited from interface org.apache.hadoop.hbase.RawCell
-checkForTagsLength,
 cloneTags,
 getTag,
 getTags
+checkForTagsLength,
 cloneTags
 
 
 
@@ -421,7 +448,7 @@ implements 
 
 cell
-protected ByteBufferCell cell
+protected ByteBufferCell cell
 
 
 
@@ -430,7 +457,7 @@ implements 
 
 tags
-protected byte[] tags
+protected byte[] tags
 
 
 
@@ -439,7 +466,7 @@ implements 
 
 HEAP_SIZE_OVERHEAD
-private static final int HEAP_SIZE_OVERHEAD
+private static final int HEAP_SIZE_OVERHEAD
 
 
 
@@ -456,7 +483,7 @@ implements 
 
 TagRewriteByteBufferCell
-public TagRewriteByteBufferCell(ByteBufferCell cell,
+public TagRewriteByteBufferCell(ByteBufferCell cell,
 byte[] tags)
 
 Parameters:
@@ -479,7 +506,7 @@ implements 
 
 getRowArray
-public byte[] getRowArray()
+public byte[] getRowArray()
 Description copied from 
interface: Cell
 Contiguous raw bytes that may start at any index in the 
co

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
index ecc4759..79f73a0 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
@@ -304,285 +304,286 @@
 296return this;
 297  }
 298
-299  public Get 
setLoadColumnFamiliesOnDemand(boolean value) {
-300return (Get) 
super.setLoadColumnFamiliesOnDemand(value);
-301  }
-302
-303  /**
-304   * Set the maximum number of values to 
return per row per Column Family
-305   * @param limit the maximum number of 
values returned / row / CF
-306   * @return this for invocation 
chaining
-307   */
-308  public Get 
setMaxResultsPerColumnFamily(int limit) {
-309this.storeLimit = limit;
-310return this;
-311  }
-312
-313  /**
-314   * Set offset for the row per Column 
Family. This offset is only within a particular row/CF
-315   * combination. It gets reset back to 
zero when we move to the next row or CF.
-316   * @param offset is the number of kvs 
that will be skipped.
-317   * @return this for invocation 
chaining
-318   */
-319  public Get 
setRowOffsetPerColumnFamily(int offset) {
-320this.storeOffset = offset;
-321return this;
-322  }
-323
-324  @Override
-325  public Get setFilter(Filter filter) {
-326super.setFilter(filter);
-327return this;
-328  }
-329
-330  /* Accessors */
-331
-332  /**
-333   * Set whether blocks should be cached 
for this Get.
-334   * 

-335 * This is true by default. When true, default settings of the table and -336 * family are used (this will never override caching blocks if the block -337 * cache is disabled for that family or entirely). -338 * -339 * @param cacheBlocks if false, default settings are overridden and blocks -340 * will not be cached -341 */ -342 public Get setCacheBlocks(boolean cacheBlocks) { -343this.cacheBlocks = cacheBlocks; -344return this; -345 } -346 -347 /** -348 * Get whether blocks should be cached for this Get. -349 * @return true if default caching should be used, false if blocks should not -350 * be cached -351 */ -352 public boolean getCacheBlocks() { -353return cacheBlocks; -354 } -355 -356 /** -357 * Method for retrieving the get's row -358 * @return row -359 */ -360 @Override -361 public byte [] getRow() { -362return this.row; -363 } -364 -365 /** -366 * Method for retrieving the get's maximum number of version -367 * @return the maximum number of version to fetch for this get -368 */ -369 public int getMaxVersions() { -370return this.maxVersions; -371 } -372 -373 /** -374 * Method for retrieving the get's maximum number of values -375 * to return per Column Family -376 * @return the maximum number of values to fetch per CF -377 */ -378 public int getMaxResultsPerColumnFamily() { -379return this.storeLimit; -380 } -381 -382 /** -383 * Method for retrieving the get's offset per row per column -384 * family (#kvs to be skipped) -385 * @return the row offset -386 */ -387 public int getRowOffsetPerColumnFamily() { -388return this.storeOffset; -389 } -390 -391 /** -392 * Method for retrieving the get's TimeRange -393 * @return timeRange -394 */ -395 public TimeRange getTimeRange() { -396return this.tr; -397 } -398 -399 /** -400 * Method for retrieving the keys in the familyMap -401 * @return keys in the current familyMap -402 */ -403 public Set familySet() { -404return this.familyMap.keySet(); -405 } -406 -407 /** -408 * Method for retrieving the number of families to get from -409 * @return number of families -410 */ -411 public int numFamilies() { -412return this.familyMap.size(); -413 } -414 -415 /** -416 * Method for checking if any families have been inserted into this Get -417 * @return true if familyMap is non empty false otherwise -418 */ -419 public boolean hasFamilies() { -420return !this.familyMap.isEmpty(); -421 } -422 -423 /** -424 * Method for retrieving the get's familyMap -425 * @return familyMap -426 */ -427 public Map> getFamilyMap() { -428return this.familyMap; -429 } -430 -431 /** -432 * Compile the table and column family (i.e. schema) information -433 * into a String. Useful for parsing and aggregation by debugging, -434 * logging, and administration tools. -435 * @return Map -436 */ -437 @Override -438 public Map getFingerprint() { -439Map map = new HashMap<>(); -440List families = new ArrayList<>(this.familyMap.entrySet().size()); -441map.put("families", families); -442for (Map.Entry> en


[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
index db9a1c3..9489ebe 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class AccessControlLists
+public class AccessControlLists
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Maintains lists of permission grants to users and groups to 
allow for
  authorization checks by AccessController.
@@ -464,7 +464,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_TABLE_NAME
-public static final TableName ACL_TABLE_NAME
+public static final TableName ACL_TABLE_NAME
 Internal storage table for access control lists
 
 
@@ -474,7 +474,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_GLOBAL_NAME
-public static final byte[] ACL_GLOBAL_NAME
+public static final byte[] ACL_GLOBAL_NAME
 
 
 
@@ -483,7 +483,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_LIST_FAMILY_STR
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ACL_LIST_FAMILY_STR
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String ACL_LIST_FAMILY_STR
 Column family used to store ACL grants
 
 See Also:
@@ -497,7 +497,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_LIST_FAMILY
-public static final byte[] ACL_LIST_FAMILY
+public static final byte[] ACL_LIST_FAMILY
 
 
 
@@ -506,7 +506,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_TAG_TYPE
-public static final byte ACL_TAG_TYPE
+public static final byte ACL_TAG_TYPE
 KV tag to store per cell access control lists
 
 See Also:
@@ -520,7 +520,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NAMESPACE_PREFIX
-public static final char NAMESPACE_PREFIX
+public static final char NAMESPACE_PREFIX
 
 See Also:
 Constant
 Field Values
@@ -533,7 +533,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_KEY_DELIMITER
-public static final char ACL_KEY_DELIMITER
+public static final char ACL_KEY_DELIMITER
 Delimiter to separate user, column family, and qualifier in
  _acl_ table info: column keys
 
@@ -548,7 +548,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -557,7 +557,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LIST_CODE
-private static final int LIST_CODE
+private static final int LIST_CODE
 
 See Also:
 Constant
 Field Values
@@ -570,7 +570,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 WRITABLE_CODE
-private static final int WRITABLE_CODE
+private static final int WRITABLE_CODE
 
 See Also:
 Constant
 Field Values
@@ -583,7 +583,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 WRITABLE_NOT_ENCODED
-private static final int WRITABLE_NOT_ENCODED
+private static final int WRITABLE_NOT_ENCODED
 
 See Also:
 Constant
 Field Values
@@ -604,7 +604,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 AccessControlLists
-public AccessControlLists()
+public AccessControlLists()
 
 
 
@@ -621,7 +621,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 addUserPermission
-static void addUserPermission(org.apache.hadoop.conf.Configuration conf,
+static void addUserPermission(org.apache.hadoop.conf.Configuration conf,
   UserPermission userPerm,
   Table t,
   boolean mergeExistingPermissions)
@@ -643,7 +643,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 addUserPermission
-static void addUserPermission(org.apache.hadoop.conf.Configuration conf,
+static void addUserPermission(org.apache.hadoop.conf.Configuration conf,
   UserPermission userPerm,
   Table t)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -659,

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
index 7c59e27..c904c56 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
@@ -119,4048 +119,4054 @@
 111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
-156import 

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/deprecated-list.html
--
diff --git a/devapidocs/deprecated-list.html b/devapidocs/deprecated-list.html
index b4eba88..d584846 100644
--- a/devapidocs/deprecated-list.html
+++ b/devapidocs/deprecated-list.html
@@ -329,7 +329,7 @@
 
 org.apache.hadoop.hbase.HConstants.OLDEST_TIMESTAMP
 Should not be public since 
hbase-1.3.0. For internal use only. Move internal to
- Scanners flagged as special timestamp value never to be returned as timestamp 
on a Cell.
+   Scanners flagged as special timestamp value never to be returned as 
timestamp on a Cell.
 
 
 
@@ -489,9 +489,16 @@
 
 
 
+org.apache.hadoop.hbase.client.Table.checkAndDelete(byte[],
 byte[], byte[], byte[], Delete)
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
+
+
+org.apache.hadoop.hbase.client.HTable.checkAndDelete(byte[],
 byte[], byte[], byte[], Delete)
+
+
 org.apache.hadoop.hbase.client.Table.checkAndDelete(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Delete)
-Since 2.0.0. Will be 
removed in 3.0.0. Use
-  Table.checkAndDelete(byte[],
 byte[], byte[], byte[], Delete)
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
 
 
 
@@ -501,29 +508,71 @@
 org.apache.hadoop.hbase.rest.client.RemoteHTable.checkAndDelete(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Delete)
 
 
-org.apache.hadoop.hbase.client.Table.checkAndMutate(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], RowMutations)
-Since 2.0.0. Will be 
removed in 3.0.0. Use
- Table.checkAndMutate(byte[],
 byte[], byte[], CompareOperator, byte[], RowMutations)
+org.apache.hadoop.hbase.client.Table.checkAndDelete(byte[],
 byte[], byte[], CompareOperator, byte[], Delete)
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
 
 
 
-org.apache.hadoop.hbase.client.HTable.checkAndMutate(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], RowMutations)
+org.apache.hadoop.hbase.client.HTable.checkAndDelete(byte[],
 byte[], byte[], CompareOperator, byte[], Delete)
+
+
+org.apache.hadoop.hbase.rest.client.RemoteHTable.checkAndDelete(byte[],
 byte[], byte[], CompareOperator, byte[], Delete)
+
+
+org.apache.hadoop.hbase.client.Table.checkAndMutate(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], RowMutations)
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 
+org.apache.hadoop.hbase.client.HTable.checkAndMutate(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], RowMutations)
+
+
 org.apache.hadoop.hbase.rest.client.RemoteHTable.checkAndMutate(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], RowMutations)
 
+
+org.apache.hadoop.hbase.client.Table.checkAndMutate(byte[],
 byte[], byte[], CompareOperator, byte[], RowMutations)
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
+
 
-org.apache.hadoop.hbase.client.Table.checkAndPut(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Put)
-Since 2.0.0. Will be 
removed in 3.0.0. Use
-  Table.checkAndPut(byte[],
 byte[], byte[], CompareOperator, byte[], Put)}
+org.apache.hadoop.hbase.client.HTable.checkAndMutate(byte[],
 byte[], byte[], CompareOperator, byte[], RowMutations)
+
+
+org.apache.hadoop.hbase.rest.client.RemoteHTable.checkAndMutate(byte[],
 byte[], byte[], CompareOperator, byte[], RowMutations)
+
+
+org.apache.hadoop.hbase.client.Table.checkAndPut(byte[],
 byte[], byte[], byte[], Put)
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
 
 
 
-org.apache.hadoop.hbase.client.HTable.checkAndPut(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Put)
+org.apache.hadoop.hbase.client.HTable.checkAndPut(byte[],
 byte[], byte[], byte[], Put)
+
+
+org.apache.hadoop.hbase.rest.client.RemoteHTable.checkAndPut(byte[],
 byte[], byte[], byte[], Put)
+
+
+org.apache.hadoop.hbase.client.Table.checkAndPut(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Put)
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 
+org.apache.hadoop.hbase.client.HTable.checkAndPut(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Put)
+
+
 org.apache.hadoop.hbase.rest.client.RemoteHTable.checkAndPut(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Put)
 
+
+org.apache.hadoop.hbase.client.Table.checkAndPut(byte[],
 byte[], byte[], CompareOperator, byte[], Put)
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
+
+
+org.apache.hadoop.hbase.client.HTable.checkAndPut(byte[],
 byte[], byte[], CompareOperator, byte[], Put)
+
+
+org.apache.hadoop.hbase.rest.client.RemoteHTable.checkAndPut(byte[],
 byte[], byte[], CompareOperator, byte[], Put)
+
 
 org.apache.hadoop.hbase.security.visibility.VisibilityClient.clearAuths(Configuration,
 String[], String)
 Use VisibilityClient.clearAuths(Conne

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/Abortable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/Abortable.html 
b/devapidocs/org/apache/hadoop/hbase/Abortable.html
index 2a8957c..3d8a5d0 100644
--- a/devapidocs/org/apache/hadoop/hbase/Abortable.html
+++ b/devapidocs/org/apache/hadoop/hbase/Abortable.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-BackupHFileCleaner, ConnectionImplementation, 
ConnectionUtils.MasterlessConnection, ConnectionUtils.ShortCircuitingClusterConnection,
 DumpReplicationQueues.WarnOnlyAbortable,
 HBaseAdmin, HBaseAdmin.ThrowableAbortable, HBaseInterClusterReplicationEndpoint,
 HBaseReplicationEndpoint, HMaster, HMasterCommandLine.LocalHMaster, HRegionServer, LogRollBackupSubprocedurePool, 
RegionReplicaReplicationEndpoint,
 ReplicationHFileCleaner.WarnOnlyAbortable,
 ReplicationLogCleaner.WarnOnlyAbortable,
 ReplicationPeerZKImpl, ReplicationSyncUp.DummyServer,
 ZKWatcher
+BackupHFileCleaner, ConnectionImplementation, 
ConnectionUtils.MasterlessConnection, ConnectionUtils.ShortCircuitingClusterConnection,
 DumpReplicationQueues.WarnOnlyAbortable,
 HBaseAdmin, HBaseAdmin.ThrowableAbortable, HBaseInterClusterReplicationEndpoint,
 HBaseReplicationEndpoint, HMaster, HMasterCommandLine.LocalHMaster, HRegionServer, LogRollBackupSubprocedurePool, 
RegionReplicaReplicationEndpoint,
 ReplicationHFileCleaner.WarnOnlyAbortable,
 ReplicationLogCleaner.WarnOnlyAbortable,
 ReplicationPeerZKImpl, ReplicationSyncUp.DummyServer,
 SharedConnection, ZKWatcher
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/ByteBufferKeyValue.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ByteBufferKeyValue.html 
b/devapidocs/org/apache/hadoop/hbase/ByteBufferKeyValue.html
index 14308be..e1ea5a0 100644
--- a/devapidocs/org/apache/hadoop/hbase/ByteBufferKeyValue.html
+++ b/devapidocs/org/apache/hadoop/hbase/ByteBufferKeyValue.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true";
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell, SettableSequenceId, SettableTimestamp
+http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true";
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
 
 
 Direct Known Subclasses:
@@ -422,8 +422,7 @@ implements 
 
 void
-setTimestamp(byte[] ts,
-int tsOffset)
+setTimestamp(byte[] ts)
 Sets with the given timestamp.
 
 
@@ -845,11 +844,13 @@ implements 
 setSequenceId
 public void setSequenceId(long seqId)
-Description copied from 
interface: SettableSequenceId
+Description copied from 
interface: ExtendedCell
 Sets with the given seqId.
 
 Specified by:
-setSequenceId in
 interface SettableSequenceId
+setSequenceId in
 interface ExtendedCell
+Parameters:
+seqId - sequence ID
 
 
 
@@ -1212,11 +1213,13 @@ implements setTimestamp
 public void setTimestamp(long ts)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
-Description copied from 
interface: SettableTimestamp
+Description copied from 
interface: ExtendedCell
 Sets with the given timestamp.
 
 Specified by:
-setTimestamp in
 interface SettableTimestamp
+setTimestamp in
 interface ExtendedCell
+Parameters:
+ts - timestamp
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -1231,23 +1234,21 @@ implements private int getTimestampOffset()
 
 
-
+
 
 
 
 
 setTimestamp
-public void setTimestamp(byte[] ts,
- int tsOffset)
+public void setTimestamp(byte[] ts)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
-Description copied from 
interface: SettableTimestamp
+Description copied from 
interface: ExtendedCell
 Sets with the given timestamp.
 
 Specified by:
-setTimestamp in
 interface SettableTimestamp
+setTimestamp in
 interface ExtendedCell
 Parameters:
 ts - buffer containing the timestamp value
-tsOffset - offset to the new timestamp
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/CellUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/CellUtil.html 
b/devapidocs/org/

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
index 686fe97..e2e01c7 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
@@ -32,348 +32,331 @@
 024import java.util.Map;
 025import java.util.NavigableMap;
 026import java.util.UUID;
-027
-028import org.apache.hadoop.hbase.Cell;
-029import 
org.apache.hadoop.hbase.CellUtil;
-030import 
org.apache.hadoop.hbase.HConstants;
-031import 
org.apache.hadoop.hbase.KeyValue;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033import 
org.apache.hadoop.hbase.security.access.Permission;
-034import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-035import 
org.apache.hadoop.hbase.util.Bytes;
-036
-037/**
-038 * Used to perform Delete operations on a 
single row.
-039 * 

-040 * To delete an entire row, instantiate a Delete object with the row -041 * to delete. To further define the scope of what to delete, perform -042 * additional methods as outlined below. -043 *

-044 * To delete specific families, execute {@link #addFamily(byte[]) deleteFamily} -045 * for each family to delete. -046 *

-047 * To delete multiple versions of specific columns, execute -048 * {@link #addColumns(byte[], byte[]) deleteColumns} -049 * for each column to delete. -050 *

-051 * To delete specific versions of specific columns, execute -052 * {@link #addColumn(byte[], byte[], long) deleteColumn} -053 * for each column version to delete. -054 *

-055 * Specifying timestamps, deleteFamily and deleteColumns will delete all -056 * versions with a timestamp less than or equal to that passed. If no -057 * timestamp is specified, an entry is added with a timestamp of 'now' -058 * where 'now' is the servers's System.currentTimeMillis(). -059 * Specifying a timestamp to the deleteColumn method will -060 * delete versions only with a timestamp equal to that specified. -061 * If no timestamp is passed to deleteColumn, internally, it figures the -062 * most recent cell's timestamp and adds a delete at that timestamp; i.e. -063 * it deletes the most recently added cell. -064 *

The timestamp passed to the constructor is used ONLY for delete of -065 * rows. For anything less -- a deleteColumn, deleteColumns or -066 * deleteFamily -- then you need to use the method overrides that take a -067 * timestamp. The constructor timestamp is not referenced. -068 */ -069@InterfaceAudience.Public -070public class Delete extends Mutation implements Comparable { -071 /** -072 * Create a Delete operation for the specified row. -073 *

-074 * If no further operations are done, this will delete everything -075 * associated with the specified row (all versions of all columns in all -076 * families), with timestamp from current point in time to the past. -077 * Cells defining timestamp for a future point in time -078 * (timestamp > current time) will not be deleted. -079 * @param row row key -080 */ -081 public Delete(byte [] row) { -082this(row, HConstants.LATEST_TIMESTAMP); -083 } -084 -085 /** -086 * Create a Delete operation for the specified row and timestamp.

-087 * -088 * If no further operations are done, this will delete all columns in all -089 * families of the specified row with a timestamp less than or equal to the -090 * specified timestamp.

-091 * -092 * This timestamp is ONLY used for a delete row operation. If specifying -093 * families or columns, you must specify each timestamp individually. -094 * @param row row key -095 * @param timestamp maximum version timestamp (only for delete row) -096 */ -097 public Delete(byte [] row, long timestamp) { -098this(row, 0, row.length, timestamp); -099 } -100 -101 /** -102 * Create a Delete operation for the specified row and timestamp.

-103 * -104 * If no further operations are done, this will delete all columns in all -105 * families of the specified row with a timestamp less than or equal to the -106 * specified timestamp.

-107 * -108 * This timestamp is ONLY used for a delete row operation. If specifying -109 * families or columns, you must specify each timestamp individually. -110 * @param row We make a local copy of this passed in row. -111 * @param rowOffset -112 * @param rowLength -113 */ -114 public Delete(final byte[] row, final int rowOffset, final int rowLength) { -115this(row, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP); -116 } -117 -118 /** -119 * Create a Delete operation for the specified row and timestamp.

-120 * -121 * If no further operations are done, this will d


[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
index 3edfbef..9707b2c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
@@ -2459,5936 +2459,5935 @@
 2451  }
 2452
 2453  for (HStore s : storesToFlush) {
-2454MemStoreSize flushableSize = 
s.getFlushableSize();
-2455
totalSizeOfFlushableStores.incMemStoreSize(flushableSize);
-2456
storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(),
-2457  
s.createFlushContext(flushOpSeqId, tracker));
-2458// for writing stores to WAL
-2459
committedFiles.put(s.getColumnFamilyDescriptor().getName(), null);
-2460
storeFlushableSize.put(s.getColumnFamilyDescriptor().getName(), 
flushableSize);
-2461  }
-2462
-2463  // write the snapshot start to 
WAL
-2464  if (wal != null && 
!writestate.readOnly) {
-2465FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
-2466getRegionInfo(), 
flushOpSeqId, committedFiles);
-2467// No sync. Sync is below where 
no updates lock and we do FlushAction.COMMIT_FLUSH
-2468WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2469mvcc);
-2470  }
-2471
-2472  // Prepare flush (take a 
snapshot)
-2473  for (StoreFlushContext flush : 
storeFlushCtxs.values()) {
-2474flush.prepare();
-2475  }
-2476} catch (IOException ex) {
-2477  doAbortFlushToWAL(wal, 
flushOpSeqId, committedFiles);
-2478  throw ex;
-2479} finally {
-2480  
this.updatesLock.writeLock().unlock();
-2481}
-2482String s = "Finished memstore 
snapshotting " + this + ", syncing WAL and waiting on mvcc, " +
-2483"flushsize=" + 
totalSizeOfFlushableStores;
-2484status.setStatus(s);
-2485doSyncOfUnflushedWALChanges(wal, 
getRegionInfo());
-2486return new 
PrepareFlushResult(storeFlushCtxs, committedFiles, storeFlushableSize, 
startTime,
-2487flushOpSeqId, flushedSeqId, 
totalSizeOfFlushableStores);
-2488  }
-2489
-2490  /**
-2491   * Utility method broken out of 
internalPrepareFlushCache so that method is smaller.
-2492   */
-2493  private void 
logFatLineOnFlush(Collection storesToFlush, long sequenceId) {
-2494if (!LOG.isInfoEnabled()) {
-2495  return;
-2496}
-2497// Log a fat line detailing what is 
being flushed.
-2498StringBuilder perCfExtras = null;
-2499if (!isAllFamilies(storesToFlush)) 
{
-2500  perCfExtras = new 
StringBuilder();
-2501  for (HStore store: storesToFlush) 
{
-2502perCfExtras.append("; 
").append(store.getColumnFamilyName());
-2503perCfExtras.append("=")
-2504
.append(StringUtils.byteDesc(store.getFlushableSize().getDataSize()));
-2505  }
-2506}
-2507LOG.info("Flushing " + + 
storesToFlush.size() + "/" + stores.size() +
-2508" column families, memstore=" + 
StringUtils.byteDesc(this.memstoreDataSize.get()) +
-2509((perCfExtras != null && 
perCfExtras.length() > 0)? perCfExtras.toString(): "") +
-2510((wal != null) ? "" : "; WAL is 
null, using passed sequenceid=" + sequenceId));
-2511  }
-2512
-2513  private void doAbortFlushToWAL(final 
WAL wal, final long flushOpSeqId,
-2514  final Map> committedFiles) {
-2515if (wal == null) return;
-2516try {
-2517  FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
-2518  getRegionInfo(), flushOpSeqId, 
committedFiles);
-2519  WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2520  mvcc);
-2521} catch (Throwable t) {
-2522  LOG.warn("Received unexpected 
exception trying to write ABORT_FLUSH marker to WAL:" +
-2523  
StringUtils.stringifyException(t));
-2524  // ignore this since we will be 
aborting the RS with DSE.
-2525}
-2526// we have called 
wal.startCacheFlush(), now we have to abort it
-2527
wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
-2528  }
-2529
-2530  /**
-2531   * Sync unflushed WAL changes. See 
HBASE-8208 for details
-2532   */
-2533  private static void 
doSyncOfUnflushedWALChanges(final WAL wal, final RegionInfo hri)
-2534  throws IOException {
-2535if (wal == null) {
-2536  return;
-2537}
-2538try {
-2539  wal.sync(); // ensure that flush 
marker is sync'ed
-2540} catch (IOException ioe) {
-2541  
wal.abortCacheFlush(hri.getEncodedN

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CloseRegionRemoteCall.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CloseRegionRemoteCall.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CloseRegionRemoteCall.html
index e017759..377d410 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CloseRegionRemoteCall.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CloseRegionRemoteCall.html
@@ -122,8 +122,10 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class RSProcedureDispatcher.CloseRegionRemoteCall
+private final class RSProcedureDispatcher.CloseRegionRemoteCall
 extends RSProcedureDispatcher.AbstractRSRemoteCall
+Compatibility class used by RSProcedureDispatcher.CompatRemoteProcedureResolver
 to close regions using old
+ AdminService#closeRegion(RpcController, CloseRegionRequest, 
RpcCallback) rpc.
 
 
 
@@ -233,7 +235,7 @@ extends 
 
 operation
-private final RSProcedureDispatcher.RegionCloseOperation
 operation
+private final RSProcedureDispatcher.RegionCloseOperation
 operation
 
 
 
@@ -250,7 +252,7 @@ extends 
 
 CloseRegionRemoteCall
-public CloseRegionRemoteCall(ServerName serverName,
+public CloseRegionRemoteCall(ServerName serverName,
  RSProcedureDispatcher.RegionCloseOperation operation)
 
 
@@ -268,7 +270,7 @@ extends 
 
 call
-public http://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void call()
+public http://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void call()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true#call--";
 title="class or interface in java.util.concurrent">call in 
interface http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">CallableVoid>
@@ -283,7 +285,7 @@ extends 
 
 sendRequest
-private org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse sendRequest(ServerName serverName,
+private org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse sendRequest(ServerName serverName,

   
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest request)

throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -298,7 +300,7 @@ extends 
 
 remoteCallCompleted
-private void remoteCallCompleted(MasterProcedureEnv env,
+private void remoteCallCompleted(MasterProcedureEnv env,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse response)
 
 
@@ -308,7 +310,7 @@ extends 
 
 remoteCallFailed
-private void remoteCallFailed(MasterProcedureEnv env,
+private void remoteCallFailed(MasterProcedureEnv env,
   http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException e)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CompatRemoteProcedureResolver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CompatRemoteProcedureResolver.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CompatRemoteProcedureResolver.html
index 2eb3914..fc30b4a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CompatRemoteProcedureResolver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CompatRemoteProcedureResolver.html
@@ -117,9 +117,11 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected class RSProcedureDispatcher.CompatRemoteProcedureResolver
+protected class RSProcedureDispatcher.CompatRemoteProcedureResolver
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">Callable

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/MultiServerCallable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/MultiServerCallable.html 
b/devapidocs/org/apache/hadoop/hbase/client/MultiServerCallable.html
index 1a8ad60..df460a8 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/MultiServerCallable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/MultiServerCallable.html
@@ -133,7 +133,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class MultiServerCallable
+class MultiServerCallable
 extends CancellableRegionServerCallable
 Callable that handles the multi method call 
going against a single
  regionserver; i.e. A RegionServerCallable for the multi call (It is NOT a
@@ -296,7 +296,7 @@ extends 
 
 multiAction
-private MultiAction multiAction
+private MultiAction multiAction
 
 
 
@@ -305,7 +305,7 @@ extends 
 
 cellBlock
-private boolean cellBlock
+private boolean cellBlock
 
 
 
@@ -322,7 +322,7 @@ extends 
 
 MultiServerCallable
-MultiServerCallable(ClusterConnection connection,
+MultiServerCallable(ClusterConnection connection,
 TableName tableName,
 ServerName location,
 MultiAction multi,
@@ -346,7 +346,7 @@ extends 
 
 reset
-public void reset(ServerName location,
+public void reset(ServerName location,
   MultiAction multiAction)
 
 
@@ -356,7 +356,7 @@ extends 
 
 getLocation
-protected HRegionLocation getLocation()
+protected HRegionLocation getLocation()
 
 Overrides:
 getLocation in
 class RegionServerCallable
@@ -369,7 +369,7 @@ extends 
 
 getHRegionInfo
-public HRegionInfo getHRegionInfo()
+public HRegionInfo getHRegionInfo()
 
 Overrides:
 getHRegionInfo in
 class RegionServerCallable
@@ -384,7 +384,7 @@ extends 
 
 getMulti
-MultiAction getMulti()
+MultiAction getMulti()
 
 
 
@@ -393,7 +393,7 @@ extends 
 
 rpcCall
-protected MultiResponse rpcCall()
+protected MultiResponse rpcCall()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 Description copied from 
class: RegionServerCallable
 Run the RPC call. Implement this method. To get at the 
rpcController that has been created
@@ -415,7 +415,7 @@ extends 
 
 isCellBlock
-private boolean isCellBlock()
+private boolean isCellBlock()
 
 Returns:
 True if we should send data in cellblocks.  This is an expensive call.  
Cache the
@@ -429,7 +429,7 @@ extends 
 
 prepare
-public void prepare(boolean reload)
+public void prepare(boolean reload)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
interface: RetryingCallable
 Prepare by setting up any connections to servers, etc., 
ahead of call invocation.
@@ -452,7 +452,7 @@ extends 
 
 getServerName
-ServerName getServerName()
+ServerName getServerName()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 3557e34..96dc671 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
+private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
 extends RawAsyncHBaseAdmin.TableProcedureBiConsumer
 
 
@@ -232,7 +232,7 @@ extends 
 
 AddColumnFamilyProcedureBiConsumer
-AddColumnFamilyProcedureBiConsumer(TableName tableName)
+AddColumnFamilyProcedureBiConsumer(TableName tableName)
 
 
 
@@ -249,7 +249,7 @@ extends 
 
 getOperationType
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getOperationType()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getOperationType()
 
 Specified by:
 getOperationType in
 class RawAsyncHBaseAdmin.TableProcedureBiConsumer

http://git-wip-us.apache.org/repos/asf/hbase-sit

  1   2   3   4   >