[15/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index c62e029..36ceec7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -634,7 +634,7 @@
 626checkClosed();
 627try {
 628  if (!isTableEnabled(tableName)) {
-629LOG.debug("Table " + tableName + 
" not enabled");
+629LOG.debug("Table {} not enabled", 
tableName);
 630return false;
 631  }
 632  ListPairRegionInfo, 
ServerName locations =
@@ -645,1411 +645,1407 @@
 637  for (PairRegionInfo, 
ServerName pair : locations) {
 638RegionInfo info = 
pair.getFirst();
 639if (pair.getSecond() == null) {
-640  if (LOG.isDebugEnabled()) {
-641LOG.debug("Table " + 
tableName + " has not deployed region " + pair.getFirst()
-642.getEncodedName());
-643  }
-644  notDeployed++;
-645} else if (splitKeys != null
-646 
!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-647  for (byte[] splitKey : 
splitKeys) {
-648// Just check if the splitkey 
is available
-649if 
(Bytes.equals(info.getStartKey(), splitKey)) {
-650  regionCount++;
-651  break;
-652}
-653  }
-654} else {
-655  // Always empty start row 
should be counted
-656  regionCount++;
-657}
-658  }
-659  if (notDeployed  0) {
-660if (LOG.isDebugEnabled()) {
-661  LOG.debug("Table " + tableName 
+ " has " + notDeployed + " regions");
-662}
-663return false;
-664  } else if (splitKeys != null 
 regionCount != splitKeys.length + 1) {
-665if (LOG.isDebugEnabled()) {
-666  LOG.debug("Table " + tableName 
+ " expected to have " + (splitKeys.length + 1)
-667  + " regions, but only " + 
regionCount + " available");
-668}
-669return false;
-670  } else {
-671if (LOG.isDebugEnabled()) {
-672  LOG.debug("Table " + tableName 
+ " should be available");
-673}
-674return true;
-675  }
-676} catch (TableNotFoundException tnfe) 
{
-677  LOG.warn("Table " + tableName + " 
not enabled, it is not exists");
-678  return false;
-679}
-680  }
-681
-682  @Override
-683  public HRegionLocation 
locateRegion(final byte[] regionName) throws IOException {
-684RegionLocations locations = 
locateRegion(RegionInfo.getTable(regionName),
-685  RegionInfo.getStartKey(regionName), 
false, true);
-686return locations == null ? null : 
locations.getRegionLocation();
-687  }
-688
-689  private boolean isDeadServer(ServerName 
sn) {
-690if (clusterStatusListener == null) 
{
-691  return false;
-692} else {
-693  return 
clusterStatusListener.isDeadServer(sn);
-694}
-695  }
-696
-697  @Override
-698  public ListHRegionLocation 
locateRegions(TableName tableName) throws IOException {
-699return locateRegions(tableName, 
false, true);
-700  }
-701
-702  @Override
-703  public ListHRegionLocation 
locateRegions(TableName tableName, boolean useCache,
-704  boolean offlined) throws 
IOException {
-705ListRegionInfo regions;
-706if 
(TableName.isMetaTableName(tableName)) {
-707  regions = 
Collections.singletonList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-708} else {
-709  regions = 
MetaTableAccessor.getTableRegions(this, tableName, !offlined);
-710}
-711ListHRegionLocation locations 
= new ArrayList();
-712for (RegionInfo regionInfo : regions) 
{
-713  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
-714continue;
-715  }
-716  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-717  if (list != null) {
-718for (HRegionLocation loc : 
list.getRegionLocations()) {
-719  if (loc != null) {
-720locations.add(loc);
-721  }
-722}
-723  }
-724}
-725return locations;
-726  }
-727
-728  @Override
-729  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-730  throws IOException {
-731RegionLocations locations = 
locateRegion(tableName, row, true, true);
-732return locations == null ? null : 
locations.getRegionLocation();
-733  }
-734
-735  @Override
-736  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
-737  throws IOException {
-738RegionLocations locations =
-739  relocateRegion(tableName, row, 

[15/26] hbase-site git commit: Published site at 6f15cecaed2f1f76bfe1880b7c578ed369daa5d5.

2018-11-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dccdd274/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index 6369c27..ea05301 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -603,3251 +603,3256 @@
 595  // init superusers and add the 
server principal (if using security)
 596  // or process owner as default 
super user.
 597  Superusers.initialize(conf);
-598
-599  regionServerAccounting = new 
RegionServerAccounting(conf);
+598  regionServerAccounting = new 
RegionServerAccounting(conf);
+599
 600  boolean isMasterNotCarryTable =
 601  this instanceof HMaster 
 !LoadBalancer.isTablesOnMaster(conf);
-602  cacheConfig = new CacheConfig(conf, 
!isMasterNotCarryTable);
-603  mobCacheConfig = new 
MobCacheConfig(conf, !isMasterNotCarryTable);
-604  uncaughtExceptionHandler = new 
UncaughtExceptionHandler() {
-605@Override
-606public void 
uncaughtException(Thread t, Throwable e) {
-607  abort("Uncaught exception in 
executorService thread " + t.getName(), e);
-608}
-609  };
-610
-611  initializeFileSystem();
-612  spanReceiverHost = 
SpanReceiverHost.getInstance(getConfiguration());
-613
-614  this.configurationManager = new 
ConfigurationManager();
-615  setupWindows(getConfiguration(), 
getConfigurationManager());
-616
-617  // Some unit tests don't need a 
cluster, so no zookeeper at all
-618  if 
(!conf.getBoolean("hbase.testing.nocluster", false)) {
-619// Open connection to zookeeper 
and set primary watcher
-620zooKeeper = new ZKWatcher(conf, 
getProcessName() + ":" +
-621  rpcServices.isa.getPort(), 
this, canCreateBaseZNode());
-622// If no master in cluster, skip 
trying to track one or look for a cluster status.
-623if (!this.masterless) {
-624  this.csm = new 
ZkCoordinatedStateManager(this);
-625
-626  masterAddressTracker = new 
MasterAddressTracker(getZooKeeper(), this);
-627  masterAddressTracker.start();
-628
-629  clusterStatusTracker = new 
ClusterStatusTracker(zooKeeper, this);
-630  clusterStatusTracker.start();
-631} else {
-632  masterAddressTracker = null;
-633  clusterStatusTracker = null;
-634}
-635  } else {
-636zooKeeper = null;
-637masterAddressTracker = null;
-638clusterStatusTracker = null;
-639  }
-640  
this.rpcServices.start(zooKeeper);
-641  // This violates 'no starting stuff 
in Constructor' but Master depends on the below chore
-642  // and executor being created and 
takes a different startup route. Lots of overlap between HRS
-643  // and M (An M IS A HRS now). Need 
to refactor so less duplication between M and its super
-644  // Master expects Constructor to 
put up web servers. Ugh.
-645  // class HRS. TODO.
-646  this.choreService = new 
ChoreService(getName(), true);
-647  this.executorService = new 
ExecutorService(getName());
-648  putUpWebUI();
-649} catch (Throwable t) {
-650  // Make sure we log the exception. 
HRegionServer is often started via reflection and the
-651  // cause of failed startup is 
lost.
-652  LOG.error("Failed construction 
RegionServer", t);
-653  throw t;
-654}
-655  }
-656
-657  // HMaster should override this method 
to load the specific config for master
-658  protected String 
getUseThisHostnameInstead(Configuration conf) throws IOException {
-659String hostname = 
conf.get(RS_HOSTNAME_KEY);
-660if 
(conf.getBoolean(RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)) {
-661  if (!StringUtils.isBlank(hostname)) 
{
-662String msg = 
RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY + " and " + RS_HOSTNAME_KEY +
-663  " are mutually exclusive. Do 
not set " + RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY +
-664  " to true while " + 
RS_HOSTNAME_KEY + " is used";
-665throw new IOException(msg);
-666  } else {
-667return 
rpcServices.isa.getHostName();
-668  }
-669} else {
-670  return hostname;
-671}
-672  }
-673
-674  /**
-675   * If running on Windows, do 
windows-specific setup.
-676   */
-677  private static void setupWindows(final 
Configuration conf, ConfigurationManager cm) {
-678if (!SystemUtils.IS_OS_WINDOWS) {
-679  Signal.handle(new Signal("HUP"), 
new SignalHandler() {
-680@Override
-681public void handle(Signal signal) 
{
-682  

[15/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
index 6428b67..c7efa9f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
@@ -260,2307 +260,2316 @@
 252}
 253
 254/**
-255 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
-256 * Do our own codes.
-257 * @param b
-258 * @return Type associated with 
passed code.
-259 */
-260public static Type codeToType(final 
byte b) {
-261  Type t = codeArray[b  0xff];
-262  if (t != null) {
-263return t;
-264  }
-265  throw new RuntimeException("Unknown 
code " + b);
-266}
-267  }
-268
-269  /**
-270   * Lowest possible key.
-271   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
-272   * key can be equal or lower than this 
one in memstore or in store file.
-273   */
-274  public static final KeyValue LOWESTKEY 
=
-275new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
-276
-277  
-278  // KeyValue core instance fields.
-279  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
-280  protected int offset = 0;  // offset 
into bytes buffer KV starts at
-281  protected int length = 0;  // length of 
the KV starting from offset.
-282
-283  /** Here be dragons **/
-284
-285  /**
-286   * used to achieve atomic operations in 
the memstore.
-287   */
-288  @Override
-289  public long getSequenceId() {
-290return seqId;
-291  }
-292
-293  @Override
-294  public void setSequenceId(long seqId) 
{
-295this.seqId = seqId;
-296  }
-297
-298  // multi-version concurrency control 
version.  default value is 0, aka do not care.
-299  private long seqId = 0;
-300
-301  /** Dragon time over, return to normal 
business */
-302
-303
-304  /** Writable Constructor -- DO NOT USE 
*/
-305  public KeyValue() {}
+255 * True to indicate that the byte b 
is a valid type.
+256 * @param b byte to check
+257 * @return true or false
+258 */
+259static boolean isValidType(byte b) 
{
+260  return codeArray[b  0xff] != 
null;
+261}
+262
+263/**
+264 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
+265 * Do our own codes.
+266 * @param b
+267 * @return Type associated with 
passed code.
+268 */
+269public static Type codeToType(final 
byte b) {
+270  Type t = codeArray[b  0xff];
+271  if (t != null) {
+272return t;
+273  }
+274  throw new RuntimeException("Unknown 
code " + b);
+275}
+276  }
+277
+278  /**
+279   * Lowest possible key.
+280   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
+281   * key can be equal or lower than this 
one in memstore or in store file.
+282   */
+283  public static final KeyValue LOWESTKEY 
=
+284new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
+285
+286  
+287  // KeyValue core instance fields.
+288  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
+289  protected int offset = 0;  // offset 
into bytes buffer KV starts at
+290  protected int length = 0;  // length of 
the KV starting from offset.
+291
+292  /** Here be dragons **/
+293
+294  /**
+295   * used to achieve atomic operations in 
the memstore.
+296   */
+297  @Override
+298  public long getSequenceId() {
+299return seqId;
+300  }
+301
+302  @Override
+303  public void setSequenceId(long seqId) 
{
+304this.seqId = seqId;
+305  }
 306
-307  /**
-308   * Creates a KeyValue from the start of 
the specified byte array.
-309   * Presumes 
codebytes/code content is formatted as a KeyValue blob.
-310   * @param bytes byte array
-311   */
-312  public KeyValue(final byte [] bytes) 
{
-313this(bytes, 0);
-314  }
+307  // multi-version concurrency control 
version.  default value is 0, aka do not care.
+308  private long seqId = 0;
+309
+310  /** Dragon time over, return to normal 
business */
+311
+312
+313  /** Writable Constructor -- DO NOT USE 
*/
+314  public KeyValue() {}
 315
 316  /**
-317   * Creates a KeyValue from the 
specified byte array and offset.
-318   * Presumes 
codebytes/code content starting at 
codeoffset/code is
-319   * formatted as a KeyValue blob.
-320   * @param bytes byte array
-321   * @param offset offset to start of 
KeyValue
-322   */
-323  public KeyValue(final byte [] bytes, 
final int offset) {
-324this(bytes, offset, getLength(bytes, 
offset));
-325  }
-326
-327  /**
-328   * Creates a KeyValue from the 
specified byte array, starting at offset, and
-329   * for 

[15/26] hbase-site git commit: Published site at 7464e2ef9d420d5d8c559600a15d69ed1f3fd41a.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd306e04/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
index 93a57cb..f8c8b32 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
@@ -33,4324 +33,4323 @@
 025import java.io.InterruptedIOException;
 026import java.util.ArrayList;
 027import java.util.Arrays;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.Iterator;
-031import java.util.LinkedList;
-032import java.util.List;
-033import java.util.Map;
-034import java.util.Set;
-035import java.util.concurrent.Callable;
-036import 
java.util.concurrent.ExecutionException;
-037import java.util.concurrent.Future;
-038import java.util.concurrent.TimeUnit;
-039import 
java.util.concurrent.TimeoutException;
-040import 
java.util.concurrent.atomic.AtomicInteger;
-041import 
java.util.concurrent.atomic.AtomicReference;
-042import java.util.function.Supplier;
-043import java.util.regex.Pattern;
-044import java.util.stream.Collectors;
-045import java.util.stream.Stream;
-046import 
org.apache.hadoop.conf.Configuration;
-047import 
org.apache.hadoop.hbase.Abortable;
-048import 
org.apache.hadoop.hbase.CacheEvictionStats;
-049import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-050import 
org.apache.hadoop.hbase.ClusterMetrics;
-051import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-053import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import 
org.apache.hadoop.hbase.NotServingRegionException;
-064import 
org.apache.hadoop.hbase.RegionLocations;
-065import 
org.apache.hadoop.hbase.RegionMetrics;
-066import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-067import 
org.apache.hadoop.hbase.ServerName;
-068import 
org.apache.hadoop.hbase.TableExistsException;
-069import 
org.apache.hadoop.hbase.TableName;
-070import 
org.apache.hadoop.hbase.TableNotDisabledException;
-071import 
org.apache.hadoop.hbase.TableNotFoundException;
-072import 
org.apache.hadoop.hbase.UnknownRegionException;
-073import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-074import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-075import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-076import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-077import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-078import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-079import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-080import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-081import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-082import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-083import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-084import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-085import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-086import 
org.apache.hadoop.hbase.replication.ReplicationException;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-088import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-089import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-090import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-091import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-092import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-093import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-094import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-095import 
org.apache.hadoop.hbase.util.Addressing;
-096import 
org.apache.hadoop.hbase.util.Bytes;
-097import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-098import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-099import 
org.apache.hadoop.hbase.util.Pair;
-100import 
org.apache.hadoop.ipc.RemoteException;
-101import 
org.apache.hadoop.util.StringUtils;
-102import 
org.apache.yetus.audience.InterfaceAudience;
-103import 
org.apache.yetus.audience.InterfaceStability;
-104import org.slf4j.Logger;

[15/26] hbase-site git commit: Published site at 42aa3dd463c0d30a9b940d296b87316b5c67e1f5.

2018-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37b8a04a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.ReplicationQueueOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.ReplicationQueueOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.ReplicationQueueOperation.html
index e984063..083ab07 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.ReplicationQueueOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.ReplicationQueueOperation.html
@@ -53,1074 +53,1082 @@
 045import 
org.apache.hadoop.conf.Configuration;
 046import org.apache.hadoop.fs.FileSystem;
 047import org.apache.hadoop.fs.Path;
-048import 
org.apache.hadoop.hbase.HConstants;
-049import org.apache.hadoop.hbase.Server;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.TableName;
-052import 
org.apache.hadoop.hbase.replication.ReplicationException;
-053import 
org.apache.hadoop.hbase.replication.ReplicationListener;
-054import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
-055import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
-056import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-057import 
org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
-058import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-059import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-060import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
-061import 
org.apache.hadoop.hbase.replication.ReplicationTracker;
-062import 
org.apache.hadoop.hbase.replication.ReplicationUtils;
-063import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-064import 
org.apache.hadoop.hbase.util.Pair;
-065import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-066import 
org.apache.hadoop.hbase.wal.SyncReplicationWALProvider;
-067import 
org.apache.yetus.audience.InterfaceAudience;
-068import 
org.apache.zookeeper.KeeperException;
-069import org.slf4j.Logger;
-070import org.slf4j.LoggerFactory;
-071
-072import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-073import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-074
-075/**
-076 * This class is responsible to manage 
all the replication sources. There are two classes of
-077 * sources:
-078 * ul
-079 * liNormal sources are 
persistent and one per peer cluster/li
-080 * liOld sources are recovered 
from a failed region server and our only goal is to finish
-081 * replicating the WAL queue it 
had/li
-082 * /ul
-083 * p
-084 * When a region server dies, this class 
uses a watcher to get notified and it tries to grab a lock
-085 * in order to transfer all the queues in 
a local old source.
-086 * p
-087 * Synchronization specification:
-088 * ul
-089 * liNo need synchronized on 
{@link #sources}. {@link #sources} is a ConcurrentHashMap and there
-090 * is a Lock for peer id in {@link 
PeerProcedureHandlerImpl}. So there is no race for peer
-091 * operations./li
-092 * liNeed synchronized on {@link 
#walsById}. There are four methods which modify it,
-093 * {@link #addPeer(String)}, {@link 
#removePeer(String)},
-094 * {@link #cleanOldLogs(String, boolean, 
ReplicationSourceInterface)} and {@link #preLogRoll(Path)}.
-095 * {@link #walsById} is a 
ConcurrentHashMap and there is a Lock for peer id in
-096 * {@link PeerProcedureHandlerImpl}. So 
there is no race between {@link #addPeer(String)} and
-097 * {@link #removePeer(String)}. {@link 
#cleanOldLogs(String, boolean, ReplicationSourceInterface)}
-098 * is called by {@link 
ReplicationSourceInterface}. So no race with {@link #addPeer(String)}.
-099 * {@link #removePeer(String)} will 
terminate the {@link ReplicationSourceInterface} firstly, then
-100 * remove the wals from {@link 
#walsById}. So no race with {@link #removePeer(String)}. The only
-101 * case need synchronized is {@link 
#cleanOldLogs(String, boolean, ReplicationSourceInterface)} and
-102 * {@link 
#preLogRoll(Path)}./li
-103 * liNo need synchronized on 
{@link #walsByIdRecoveredQueues}. There are three methods which
-104 * modify it, {@link #removePeer(String)} 
,
-105 * {@link #cleanOldLogs(String, boolean, 
ReplicationSourceInterface)} and
-106 * {@link 
ReplicationSourceManager.NodeFailoverWorker#run()}.
-107 * {@link #cleanOldLogs(String, boolean, 
ReplicationSourceInterface)} is called by
-108 * {@link ReplicationSourceInterface}. 
{@link #removePeer(String)} will terminate the
-109 * {@link ReplicationSourceInterface} 
firstly, then remove the wals from
-110 * {@link #walsByIdRecoveredQueues}. And 
{@link ReplicationSourceManager.NodeFailoverWorker#run()}

[15/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

2018-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
index 12d10e1..97ceefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader = 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader  
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum 
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of next 
block, so no need to set next block's 

[15/26] hbase-site git commit: Published site at 1e56938757d2958631ac1ea07387eaa61997d84a.

2018-04-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b707139a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
index 17cad92..1157a3a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
@@ -368,76 +368,89 @@
 
 
 private void
+UpdatePeerConfigProcedure.addToList(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringencodedRegionNames,
+ https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringencodedRegionName,
+ ReplicationQueueStoragequeueStorage)
+
+
+private void
 ModifyPeerProcedure.addToMap(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LonglastSeqIds,
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringencodedRegionName,
 longbarrier,
 ReplicationQueueStoragequeueStorage)
 
-
+
 private void
 ReplicationPeerManager.checkQueuesDeleted(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
 
-
+
 static ReplicationPeerManager
 ReplicationPeerManager.create(ZKWatcherzk,
   org.apache.hadoop.conf.Configurationconf)
 
-
+
 void
 ReplicationPeerManager.disablePeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
 
-
+
 void
 ReplicationPeerManager.enablePeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
 
-
+
 protected void
 AddPeerProcedure.postPeerModification(MasterProcedureEnvenv)
 
-
+
 protected void
 UpdatePeerConfigProcedure.postPeerModification(MasterProcedureEnvenv)
 
-
+
 protected abstract void
 ModifyPeerProcedure.postPeerModification(MasterProcedureEnvenv)
 Called before we finish the procedure.
 
 
-
+
 protected void
 RemovePeerProcedure.postPeerModification(MasterProcedureEnvenv)
 
-
+
 (package private) void
 ReplicationPeerManager.preAddPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
   ReplicationPeerConfigpeerConfig)
 
-
+
 protected void
 AddPeerProcedure.prePeerModification(MasterProcedureEnvenv)
 
-
+
 protected abstract void
 ModifyPeerProcedure.prePeerModification(MasterProcedureEnvenv)
 Called before we start the actual processing.
 
 
-
+
 (package private) void
 ReplicationPeerManager.removeAllLastPushedSeqIds(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
 
-
+
 (package private) void
 ReplicationPeerManager.removeAllQueuesAndHFileRefs(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
 
-
+
 void
 ReplicationPeerManager.removePeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
 
+
+protected void
+ModifyPeerProcedure.setLastPushedSequenceId(MasterProcedureEnvenv,
+   ReplicationPeerConfigpeerConfig)
+
 
-private void
-ModifyPeerProcedure.setLastSequenceIdForSerialPeer(MasterProcedureEnvenv)
+protected void
+ModifyPeerProcedure.setLastPushedSequenceIdForTable(MasterProcedureEnvenv,
+   TableNametableName,
+   https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in 
java.lang">LonglastSeqIds)
 
 
 private void
@@ -445,31 +458,43 @@
 booleanenabled)
 
 
+protected void

[15/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.

2018-03-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
index eccc4a3..ebbde54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
@@ -1744,1869 +1744,1872 @@
 1736  HRegion region = 
getRegion(request.getRegion());
 1737  RegionInfo info = 
region.getRegionInfo();
 1738  byte[] bestSplitRow = null;
-1739  if (request.hasBestSplitRow() 
 request.getBestSplitRow()) {
-1740HRegion r = region;
-1741
region.startRegionOperation(Operation.SPLIT_REGION);
-1742r.forceSplit(null);
-1743bestSplitRow = r.checkSplit();
-1744// when all table data are in 
memstore, bestSplitRow = null
-1745// try to flush region first
-1746if(bestSplitRow == null) {
-1747  r.flush(true);
-1748  bestSplitRow = 
r.checkSplit();
-1749}
-1750r.clearSplit();
-1751  }
-1752  GetRegionInfoResponse.Builder 
builder = GetRegionInfoResponse.newBuilder();
-1753  
builder.setRegionInfo(ProtobufUtil.toRegionInfo(info));
-1754  if (request.hasCompactionState() 
 request.getCompactionState()) {
-1755
builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState()));
-1756  }
-1757  
builder.setSplittable(region.isSplittable());
-1758  
builder.setMergeable(region.isMergeable());
-1759  if (request.hasBestSplitRow() 
 request.getBestSplitRow()  bestSplitRow != null) {
-1760
builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow));
-1761  }
-1762  return builder.build();
-1763} catch (IOException ie) {
-1764  throw new ServiceException(ie);
-1765}
-1766  }
-1767
-1768  @Override
-1769  
@QosPriority(priority=HConstants.ADMIN_QOS)
-1770  public GetRegionLoadResponse 
getRegionLoad(RpcController controller,
-1771  GetRegionLoadRequest request) 
throws ServiceException {
-1772
-1773ListHRegion regions;
-1774if (request.hasTableName()) {
-1775  TableName tableName = 
ProtobufUtil.toTableName(request.getTableName());
-1776  regions = 
regionServer.getRegions(tableName);
-1777} else {
-1778  regions = 
regionServer.getRegions();
-1779}
-1780ListRegionLoad rLoads = new 
ArrayList(regions.size());
-1781RegionLoad.Builder regionLoadBuilder 
= ClusterStatusProtos.RegionLoad.newBuilder();
-1782RegionSpecifier.Builder 
regionSpecifier = RegionSpecifier.newBuilder();
-1783
-1784try {
-1785  for (HRegion region : regions) {
-1786
rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, 
regionSpecifier));
-1787  }
-1788} catch (IOException e) {
-1789  throw new ServiceException(e);
-1790}
-1791GetRegionLoadResponse.Builder 
builder = GetRegionLoadResponse.newBuilder();
-1792builder.addAllRegionLoads(rLoads);
-1793return builder.build();
-1794  }
-1795
-1796  @Override
-1797  
@QosPriority(priority=HConstants.ADMIN_QOS)
-1798  public ClearCompactionQueuesResponse 
clearCompactionQueues(RpcController controller,
-1799ClearCompactionQueuesRequest 
request) throws ServiceException {
-1800LOG.debug("Client=" + 
RpcServer.getRequestUserName().orElse(null) + "/"
-1801+ 
RpcServer.getRemoteAddress().orElse(null) + " clear compactions queue");
-1802
ClearCompactionQueuesResponse.Builder respBuilder = 
ClearCompactionQueuesResponse.newBuilder();
-1803requestCount.increment();
-1804if 
(clearCompactionQueues.compareAndSet(false,true)) {
-1805  try {
-1806checkOpen();
-1807
regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues();
-1808for (String queueName : 
request.getQueueNameList()) {
-1809  LOG.debug("clear " + queueName 
+ " compaction queue");
-1810  switch (queueName) {
-1811case "long":
-1812  
regionServer.compactSplitThread.clearLongCompactionsQueue();
-1813  break;
-1814case "short":
-1815  
regionServer.compactSplitThread.clearShortCompactionsQueue();
+1739  boolean shouldSplit = true;
+1740  if (request.hasBestSplitRow() 
 request.getBestSplitRow()) {
+1741HRegion r = region;
+1742
region.startRegionOperation(Operation.SPLIT_REGION);
+1743r.forceSplit(null);
+1744// Even after setting force 
split if split policy says no to split then we should not split.
+1745shouldSplit = 
region.getSplitPolicy().shouldSplit()  

[15/26] hbase-site git commit: Published site at 67f013430c9ba051385c45d72ee680c44eb88470.

2018-03-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd35fe02/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
index 8e48140..2676386 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
@@ -33,855 +33,860 @@
 025import java.util.Iterator;
 026import java.util.List;
 027import java.util.Map;
-028import java.util.Set;
-029import java.util.SortedSet;
-030import java.util.TreeSet;
-031import java.util.UUID;
-032import 
java.util.concurrent.ConcurrentHashMap;
-033import 
java.util.concurrent.ConcurrentMap;
-034import java.util.concurrent.Future;
-035import 
java.util.concurrent.LinkedBlockingQueue;
-036import 
java.util.concurrent.RejectedExecutionException;
-037import 
java.util.concurrent.ThreadLocalRandom;
-038import 
java.util.concurrent.ThreadPoolExecutor;
-039import java.util.concurrent.TimeUnit;
-040import 
java.util.concurrent.atomic.AtomicLong;
-041import java.util.stream.Collectors;
-042import 
org.apache.hadoop.conf.Configuration;
-043import org.apache.hadoop.fs.FileSystem;
-044import org.apache.hadoop.fs.Path;
-045import 
org.apache.hadoop.hbase.HConstants;
-046import org.apache.hadoop.hbase.Server;
-047import 
org.apache.hadoop.hbase.ServerName;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.replication.ReplicationException;
-050import 
org.apache.hadoop.hbase.replication.ReplicationListener;
-051import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
-052import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
-053import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-054import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-055import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
-056import 
org.apache.hadoop.hbase.replication.ReplicationTracker;
-057import 
org.apache.hadoop.hbase.util.Pair;
-058import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-059import 
org.apache.yetus.audience.InterfaceAudience;
-060import org.slf4j.Logger;
-061import org.slf4j.LoggerFactory;
-062
-063import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-064import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-065
-066/**
-067 * This class is responsible to manage 
all the replication sources. There are two classes of
-068 * sources:
-069 * ul
-070 * liNormal sources are 
persistent and one per peer cluster/li
-071 * liOld sources are recovered 
from a failed region server and our only goal is to finish
-072 * replicating the WAL queue it 
had/li
-073 * /ul
-074 * p
-075 * When a region server dies, this class 
uses a watcher to get notified and it tries to grab a lock
-076 * in order to transfer all the queues in 
a local old source.
-077 * p
-078 * Synchronization specification:
-079 * ul
-080 * liNo need synchronized on 
{@link #sources}. {@link #sources} is a ConcurrentHashMap and there
-081 * is a Lock for peer id in {@link 
PeerProcedureHandlerImpl}. So there is no race for peer
-082 * operations./li
-083 * liNeed synchronized on {@link 
#walsById}. There are four methods which modify it,
-084 * {@link #addPeer(String)}, {@link 
#removePeer(String)},
-085 * {@link #cleanOldLogs(SortedSet, 
String, String)} and {@link #preLogRoll(Path)}.
-086 * {@link #walsById} is a 
ConcurrentHashMap and there is a Lock for peer id in
-087 * {@link PeerProcedureHandlerImpl}. So 
there is no race between {@link #addPeer(String)} and
-088 * {@link #removePeer(String)}. {@link 
#cleanOldLogs(SortedSet, String, String)} is called by
-089 * {@link ReplicationSourceInterface}. So 
no race with {@link #addPeer(String)}.
-090 * {@link #removePeer(String)} will 
terminate the {@link ReplicationSourceInterface} firstly, then
-091 * remove the wals from {@link 
#walsById}. So no race with {@link #removePeer(String)}. The only
-092 * case need synchronized is {@link 
#cleanOldLogs(SortedSet, String, String)} and
-093 * {@link 
#preLogRoll(Path)}./li
-094 * liNo need synchronized on 
{@link #walsByIdRecoveredQueues}. There are three methods which
-095 * modify it, {@link #removePeer(String)} 
, {@link #cleanOldLogs(SortedSet, String, String)} and
-096 * {@link 
ReplicationSourceManager.NodeFailoverWorker#run()}.
-097 * {@link #cleanOldLogs(SortedSet, 
String, String)} is called by {@link ReplicationSourceInterface}.
-098 * {@link #removePeer(String)} will 
terminate the {@link ReplicationSourceInterface} firstly, then
-099 * remove the wals from {@link 
#walsByIdRecoveredQueues}. 

[15/26] hbase-site git commit: Published site at .

2018-02-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ac54a6a8/hbase-build-configuration/hbase-spark/issue-tracking.html
--
diff --git a/hbase-build-configuration/hbase-spark/issue-tracking.html 
b/hbase-build-configuration/hbase-spark/issue-tracking.html
index 952ef18..f42ff68 100644
--- a/hbase-build-configuration/hbase-spark/issue-tracking.html
+++ b/hbase-build-configuration/hbase-spark/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Issue Management
 
@@ -123,7 +123,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-05
+  Last Published: 
2018-02-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ac54a6a8/hbase-build-configuration/hbase-spark/license.html
--
diff --git a/hbase-build-configuration/hbase-spark/license.html 
b/hbase-build-configuration/hbase-spark/license.html
index 0286227..ff695ee 100644
--- a/hbase-build-configuration/hbase-spark/license.html
+++ b/hbase-build-configuration/hbase-spark/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Project Licenses
 
@@ -326,7 +326,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-05
+  Last Published: 
2018-02-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ac54a6a8/hbase-build-configuration/hbase-spark/mail-lists.html
--
diff --git a/hbase-build-configuration/hbase-spark/mail-lists.html 
b/hbase-build-configuration/hbase-spark/mail-lists.html
index 2a2aea2..f778202 100644
--- a/hbase-build-configuration/hbase-spark/mail-lists.html
+++ b/hbase-build-configuration/hbase-spark/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Project Mailing Lists
 
@@ -176,7 +176,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-05
+  Last Published: 
2018-02-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ac54a6a8/hbase-build-configuration/hbase-spark/plugin-management.html
--
diff --git a/hbase-build-configuration/hbase-spark/plugin-management.html 
b/hbase-build-configuration/hbase-spark/plugin-management.html
index 876d62e..2a7732a 100644
--- a/hbase-build-configuration/hbase-spark/plugin-management.html
+++ b/hbase-build-configuration/hbase-spark/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Project Plugin Management
 
@@ -271,7 +271,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-05
+  Last Published: 
2018-02-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ac54a6a8/hbase-build-configuration/hbase-spark/plugins.html
--
diff --git a/hbase-build-configuration/hbase-spark/plugins.html 
b/hbase-build-configuration/hbase-spark/plugins.html
index 540591a..b1e3d48 100644
--- a/hbase-build-configuration/hbase-spark/plugins.html
+++ b/hbase-build-configuration/hbase-spark/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Project Plugins
 
@@ -238,7 +238,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-02-05
+  Last Published: 
2018-02-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ac54a6a8/hbase-build-configuration/hbase-spark/project-info.html
--
diff --git a/hbase-build-configuration/hbase-spark/project-info.html 
b/hbase-build-configuration/hbase-spark/project-info.html
index 921a8ab..a6f27e2 100644
--- a/hbase-build-configuration/hbase-spark/project-info.html
+++ b/hbase-build-configuration/hbase-spark/project-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Project Information
 
@@ -167,7 +167,7 @@
 https://www.apache.org/;>The Apache Software 

[15/26] hbase-site git commit: Published site at .

2017-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7d90d02f/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
index 2e2767b..a634c1e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
@@ -1732,8 +1732,8 @@
 1724  Put putA = 
makePutFromRegionInfo(splitA);
 1725  Put putB = 
makePutFromRegionInfo(splitB);
 1726
-1727  addLocation(putA, sn, 1, -1, 
splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
-1728  addLocation(putB, sn, 1, -1, 
splitB.getReplicaId());
+1727  addSequenceNum(putA, 1, -1, 
splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
+1728  addSequenceNum(putB, 1, -1, 
splitB.getReplicaId());
 1729
 1730  // Add empty locations for region 
replicas of daughters so that number of replicas can be
 1731  // cached whenever the primary 
region is looked up from meta
@@ -2108,143 +2108,152 @@
 2100return p.getClass().getSimpleName() 
+ p.toJSON();
 2101  }
 2102
-2103  /**
-2104   * Get replication position for a peer 
in a region.
-2105   * @param connection connection we're 
using
-2106   * @return the position of this peer, 
-1 if no position in meta.
-2107   */
-2108  public static long 
getReplicationPositionForOnePeer(Connection connection,
-2109  byte[] encodedRegionName, String 
peerId) throws IOException {
-2110Get get = new 
Get(encodedRegionName);
-2111
get.addColumn(HConstants.REPLICATION_POSITION_FAMILY, Bytes.toBytes(peerId));
-2112Result r = 
get(getMetaHTable(connection), get);
-2113if (r.isEmpty()) {
-2114  return -1;
-2115}
-2116Cell cell = r.rawCells()[0];
-2117return 
Bytes.toLong(cell.getValueArray(),cell.getValueOffset(),cell.getValueLength());
-2118  }
-2119
-2120  /**
-2121   * Get replication positions for all 
peers in a region.
-2122   * @param connection connection we're 
using
-2123   * @param encodedRegionName region's 
encoded name
-2124   * @return the map of positions for 
each peer
-2125   */
-2126  public static MapString, Long 
getReplicationPositionForAllPeer(Connection connection,
-2127  byte[] encodedRegionName) throws 
IOException {
-2128Get get = new 
Get(encodedRegionName);
-2129
get.addFamily(HConstants.REPLICATION_POSITION_FAMILY);
-2130Result r = 
get(getMetaHTable(connection), get);
-2131MapString, Long map = new 
HashMap((int) (r.size() / 0.75 + 1));
-2132for (Cell c : r.listCells()) {
-2133  map.put(
-2134  
Bytes.toString(c.getQualifierArray(), c.getQualifierOffset(), 
c.getQualifierLength()),
-2135  
Bytes.toLong(c.getValueArray(), c.getValueOffset(), c.getValueLength()));
-2136}
-2137return map;
-2138  }
-2139
-2140  /**
-2141   * Get replication barriers for all 
peers in a region.
-2142   * @param encodedRegionName region's 
encoded name
-2143   * @return a list of barrier sequence 
numbers.
-2144   * @throws IOException
-2145   */
-2146  public static ListLong 
getReplicationBarriers(Connection connection, byte[] encodedRegionName)
-2147  throws IOException {
-2148Get get = new 
Get(encodedRegionName);
-2149
get.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
-2150Result r = 
get(getMetaHTable(connection), get);
-2151ListLong list = new 
ArrayList();
-2152if (!r.isEmpty()) {
-2153  for (Cell cell : r.rawCells()) {
-2154
list.add(Bytes.toLong(cell.getQualifierArray(), cell.getQualifierOffset(),
-2155
cell.getQualifierLength()));
-2156  }
-2157}
-2158return list;
-2159  }
-2160
-2161  /**
-2162   * Get all barriers in all regions.
-2163   * @return a map of barrier lists in 
all regions
-2164   * @throws IOException
-2165   */
-2166  public static MapString, 
ListLong getAllBarriers(Connection connection) throws IOException 
{
-2167MapString, ListLong 
map = new HashMap();
-2168Scan scan = new Scan();
-2169
scan.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
-2170try (Table t = 
getMetaHTable(connection);
-2171ResultScanner scanner = 
t.getScanner(scan)) {
-2172  Result result;
-2173  while ((result = scanner.next()) 
!= null) {
-2174String key = 
Bytes.toString(result.getRow());
-2175ListLong list = new 
ArrayList(result.rawCells().length);
-2176for (Cell cell : 
result.rawCells()) {
-2177  
list.add(Bytes.toLong(cell.getQualifierArray(), cell.getQualifierOffset(),
-2178  
cell.getQualifierLength()));
-2179}
-2180map.put(key, list);
-2181  }
-2182}
-2183return map;
-2184  }
-2185
-2186  /**
-2187   * Get daughter region(s) for a 
region, only used in serial replication.
-2188   * @param connection 

[15/26] hbase-site git commit: Published site at .

2017-09-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/539471a7/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
index a81731d..bdeba84 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
@@ -604,13 +604,13 @@
 596   * @param row
 597   * @param family
 598   * @param qualifier
-599   * @param compareOp
+599   * @param op
 600   * @param comparator @throws 
IOException
 601   */
 602  private boolean checkAndRowMutate(final 
Region region, final ListClientProtos.Action actions,
-603final 
CellScanner cellScanner, byte[] row, byte[] family, byte[] qualifier,
-604
CompareOperator op, ByteArrayComparable comparator, RegionActionResult.Builder 
builder,
-605
ActivePolicyEnforcement spaceQuotaEnforcement) throws IOException {
+603  final CellScanner cellScanner, 
byte[] row, byte[] family, byte[] qualifier,
+604  CompareOperator op, 
ByteArrayComparable comparator, RegionActionResult.Builder builder,
+605  ActivePolicyEnforcement 
spaceQuotaEnforcement) throws IOException {
 606if 
(!region.getRegionInfo().isMetaTable()) {
 607  
regionServer.cacheFlusher.reclaimMemStoreMemory();
 608}
@@ -655,2847 +655,2843 @@
 647
 648  /**
 649   * Execute an append mutation.
-650   *
-651   * @param region
-652   * @param m
-653   * @param cellScanner
-654   * @return result to return to client 
if default operation should be
-655   * bypassed as indicated by 
RegionObserver, null otherwise
-656   * @throws IOException
-657   */
-658  private Result append(final Region 
region, final OperationQuota quota,
-659  final MutationProto mutation, final 
CellScanner cellScanner, long nonceGroup,
-660  ActivePolicyEnforcement 
spaceQuota)
-661  throws IOException {
-662long before = 
EnvironmentEdgeManager.currentTime();
-663Append append = 
ProtobufUtil.toAppend(mutation, cellScanner);
-664checkCellSizeLimit(region, append);
-665
spaceQuota.getPolicyEnforcement(region).check(append);
-666quota.addMutation(append);
-667Result r = null;
-668if (region.getCoprocessorHost() != 
null) {
-669  r = 
region.getCoprocessorHost().preAppend(append);
-670}
-671if (r == null) {
-672  boolean canProceed = 
startNonceOperation(mutation, nonceGroup);
-673  boolean success = false;
-674  try {
-675long nonce = mutation.hasNonce() 
? mutation.getNonce() : HConstants.NO_NONCE;
-676if (canProceed) {
-677  r = region.append(append, 
nonceGroup, nonce);
-678} else {
-679  // convert duplicate append to 
get
-680  ListCell results = 
region.get(ProtobufUtil.toGet(mutation, cellScanner), false,
-681nonceGroup, nonce);
-682  r = Result.create(results);
-683}
-684success = true;
-685  } finally {
-686if (canProceed) {
-687  endNonceOperation(mutation, 
nonceGroup, success);
-688}
-689  }
-690  if (region.getCoprocessorHost() != 
null) {
-691r = 
region.getCoprocessorHost().postAppend(append, r);
-692  }
+650   * @return result to return to client 
if default operation should be
+651   * bypassed as indicated by 
RegionObserver, null otherwise
+652   * @throws IOException
+653   */
+654  private Result append(final Region 
region, final OperationQuota quota,
+655  final MutationProto mutation, final 
CellScanner cellScanner, long nonceGroup,
+656  ActivePolicyEnforcement 
spaceQuota)
+657  throws IOException {
+658long before = 
EnvironmentEdgeManager.currentTime();
+659Append append = 
ProtobufUtil.toAppend(mutation, cellScanner);
+660checkCellSizeLimit(region, append);
+661
spaceQuota.getPolicyEnforcement(region).check(append);
+662quota.addMutation(append);
+663Result r = null;
+664if (region.getCoprocessorHost() != 
null) {
+665  r = 
region.getCoprocessorHost().preAppend(append);
+666}
+667if (r == null) {
+668  boolean canProceed = 
startNonceOperation(mutation, nonceGroup);
+669  boolean success = false;
+670  try {
+671long nonce = mutation.hasNonce() 
? mutation.getNonce() : HConstants.NO_NONCE;
+672if (canProceed) {
+673  r = region.append(append, 
nonceGroup, nonce);
+674} else {
+675  // convert duplicate append to 
get
+676  ListCell results = 
region.get(ProtobufUtil.toGet(mutation, cellScanner), 

[15/26] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fdcfc8d5/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
index 3c6f9b8..7d3deb8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.TableQuotasVisitor.html
@@ -39,731 +39,772 @@
 031import org.apache.commons.logging.Log;
 032import 
org.apache.commons.logging.LogFactory;
 033import org.apache.hadoop.hbase.Cell;
-034import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-035import 
org.apache.hadoop.hbase.ServerName;
-036import 
org.apache.hadoop.hbase.TableName;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-039import 
org.apache.hadoop.hbase.client.ClusterConnection;
-040import 
org.apache.hadoop.hbase.client.Connection;
-041import 
org.apache.hadoop.hbase.client.Get;
-042import 
org.apache.hadoop.hbase.client.Put;
-043import 
org.apache.hadoop.hbase.client.QuotaStatusCalls;
-044import 
org.apache.hadoop.hbase.client.Result;
-045import 
org.apache.hadoop.hbase.client.ResultScanner;
-046import 
org.apache.hadoop.hbase.client.Scan;
-047import 
org.apache.hadoop.hbase.client.Table;
-048import 
org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
-049import 
org.apache.hadoop.hbase.filter.CompareFilter;
-050import 
org.apache.hadoop.hbase.filter.Filter;
-051import 
org.apache.hadoop.hbase.filter.FilterList;
-052import 
org.apache.hadoop.hbase.filter.QualifierFilter;
-053import 
org.apache.hadoop.hbase.filter.RegexStringComparator;
-054import 
org.apache.hadoop.hbase.filter.RowFilter;
-055import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-056import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-057import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-058import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
-069import 
org.apache.hadoop.hbase.util.Bytes;
-070import 
org.apache.hadoop.hbase.util.Strings;
-071
-072/**
-073 * Helper class to interact with the 
quota table.
-074 * table
-075 *   
trthROW-KEY/ththFAM/QUAL/ththDATA/th/tr
-076 *   
trtdn.lt;namespacegt;/tdtdq:s/tdtdlt;global-quotasgt;/td/tr
-077 *   
trtdn.lt;namespacegt;/tdtdu:p/tdtdlt;namespace-quota
 policygt;/td/tr
-078 *   
trtdn.lt;namespacegt;/tdtdu:s/tdtdlt;SpaceQuotaSnapshotgt;/td/tr
-079 *   
trtdt.lt;tablegt;/tdtdq:s/tdtdlt;global-quotasgt;/td/tr
-080 *   
trtdt.lt;tablegt;/tdtdu:p/tdtdlt;table-quota
 policygt;/td/tr
-081 *   
trtdt.lt;tablegt;/tdtdu:ss.lt;snapshot
 
namegt;/tdtdlt;SpaceQuotaSnapshotgt;/td/tr
-082 *   
trtdu.lt;usergt;/tdtdq:s/tdtdlt;global-quotasgt;/td/tr
-083 *   
trtdu.lt;usergt;/tdtdq:s.lt;tablegt;/tdtdlt;table-quotasgt;/td/tr
-084 *   
trtdu.lt;usergt;/tdtdq:s.lt;nsgt;/tdtdlt;namespace-quotasgt;/td/tr
-085 * /table
-086 */
-087@InterfaceAudience.Private
-088@InterfaceStability.Evolving
-089public class QuotaTableUtil {
-090  private static final Log LOG = 
LogFactory.getLog(QuotaTableUtil.class);
-091
-092  /** System table for quotas */
-093  public static final TableName 
QUOTA_TABLE_NAME =
-094  
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota");
-095
-096  protected static final byte[] 
QUOTA_FAMILY_INFO = Bytes.toBytes("q");
-097  protected static final byte[] 
QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
-098  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
-099  protected static final byte[] 
QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s.");
-100  protected static final byte[] 
QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p");
-101  protected static final byte[] 

[15/26] hbase-site git commit: Published site at e916b79db58bb9be806a833b2c0e675f1136c15a.

2017-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b75efae/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.CodeGeneratorResponse.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.CodeGeneratorResponse.html
 
b/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.CodeGeneratorResponse.html
index 30ea259..81d1766 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.CodeGeneratorResponse.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.CodeGeneratorResponse.html
@@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static final class PluginProtos.CodeGeneratorResponse
+public static final class PluginProtos.CodeGeneratorResponse
 extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
 implements PluginProtos.CodeGeneratorResponseOrBuilder
 
@@ -556,7 +556,7 @@ implements 
 
 bitField0_
-privateint bitField0_
+privateint bitField0_
 
 
 
@@ -565,7 +565,7 @@ implements 
 
 ERROR_FIELD_NUMBER
-public static finalint ERROR_FIELD_NUMBER
+public static finalint ERROR_FIELD_NUMBER
 
 See Also:
 Constant
 Field Values
@@ -578,7 +578,7 @@ implements 
 
 error_
-private volatilehttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object error_
+private volatilehttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object error_
 
 
 
@@ -587,7 +587,7 @@ implements 
 
 FILE_FIELD_NUMBER
-public static finalint FILE_FIELD_NUMBER
+public static finalint FILE_FIELD_NUMBER
 
 See Also:
 Constant
 Field Values
@@ -600,7 +600,7 @@ implements 
 
 file_
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPluginProtos.CodeGeneratorResponse.File
 file_
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPluginProtos.CodeGeneratorResponse.File
 file_
 
 
 
@@ -609,7 +609,7 @@ implements 
 
 memoizedIsInitialized
-privatebyte memoizedIsInitialized
+privatebyte memoizedIsInitialized
 
 
 
@@ -618,7 +618,7 @@ implements 
 
 serialVersionUID
-private static finallong serialVersionUID
+private static finallong serialVersionUID
 
 See Also:
 Constant
 Field Values
@@ -631,7 +631,7 @@ implements 
 
 DEFAULT_INSTANCE
-private static finalPluginProtos.CodeGeneratorResponse
 DEFAULT_INSTANCE
+private static finalPluginProtos.CodeGeneratorResponse
 DEFAULT_INSTANCE
 
 
 
@@ -641,7 +641,7 @@ implements 
 PARSER
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
-public static 
finalorg.apache.hadoop.hbase.shaded.com.google.protobuf.ParserPluginProtos.CodeGeneratorResponse
 PARSER
+public static 
finalorg.apache.hadoop.hbase.shaded.com.google.protobuf.ParserPluginProtos.CodeGeneratorResponse
 PARSER
 Deprecated.
 
 
@@ -659,7 +659,7 @@ public static 
finalorg.apache.hadoop.hbase.shaded.com.google.protobuf.Pars
 
 
 CodeGeneratorResponse
-privateCodeGeneratorResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder?builder)
+privateCodeGeneratorResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder?builder)
 
 
 
@@ -668,7 +668,7 @@ public static 
finalorg.apache.hadoop.hbase.shaded.com.google.protobuf.Pars
 
 
 CodeGeneratorResponse
-privateCodeGeneratorResponse()
+privateCodeGeneratorResponse()
 
 
 
@@ -677,7 +677,7 @@ public static 
finalorg.apache.hadoop.hbase.shaded.com.google.protobuf.Pars
 
 
 CodeGeneratorResponse
-privateCodeGeneratorResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStreaminput,
+privateCodeGeneratorResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStreaminput,
   
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLiteextensionRegistry)
throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 
@@ -700,7 +700,7 @@ public static 
finalorg.apache.hadoop.hbase.shaded.com.google.protobuf.Pars
 
 
 getUnknownFields
-public 
finalorg.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSetgetUnknownFields()
+public 
finalorg.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSetgetUnknownFields()
 Description copied from 
interface:org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder
 Get the UnknownFieldSet for this message.
 
@@ -717,7 +717,7 @@ public static 
finalorg.apache.hadoop.hbase.shaded.com.google.protobuf.Pars
 
 
 getDescriptor
-public static 

[15/26] hbase-site git commit: Published site at 7c54525c89bbbe0c66401813433bfb957e461eac.

2016-03-01 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c115ab43/hbase-archetypes/hbase-shaded-client-project/dependencies.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/dependencies.html 
b/hbase-archetypes/hbase-shaded-client-project/dependencies.html
new file mode 100644
index 000..7a9885b
--- /dev/null
+++ b/hbase-archetypes/hbase-shaded-client-project/dependencies.html
@@ -0,0 +1,3597 @@
+http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+  
+
+Apache HBase - Exemplar for hbase-shaded-client archetype  
Project Dependencies
+
+  @import url("./css/maven-base.css");
+  @import url("./css/maven-theme.css");
+  @import url("./css/site.css");
+
+
+
+
+
+
+  
+
+
+Apache HBase - Exemplar for hbase-shaded-client archetype
+
+
+
+  
+
+
+
+
+
+Last Published: 2016-03-01
+  | Version: 
2.0.0-SNAPSHOT
+  
+Apache HBase - Exemplar for 
hbase-shaded-client archetype
+  
+
+  
+  
+
+  
+
+
+  
+ 
+
+Parent Project
+  
+  
+  Apache HBase - Archetypes
+
+  
+   Project Documentation
+  
+   



   
+  Project Information
+
+  
+  Continuous Integration
+
+  
+Dependencies
+  
+  
+  Dependency Convergence
+
+  
+  Dependency Information
+
+  
+  Dependency Management
+
+  
+  About
+
+  
+  Issue Tracking
+
+  
+  Project License
+
+  
+  Mailing Lists
+
+  
+  Plugin Management
+
+  
+  Project Plugins
+
+  
+  Project Team
+
+  
+  Source Repository
+
+  
+  Project Summary
+
+  
+  Distribution Management
+
+  
+
+
+  Project Reports
+  
+  
+ http://maven.apache.org/; title="Built 
by Maven" class="poweredBy">
+
+  
+   
+
+
+
+
+  
+
+
+Project Dependencies
+
+compile
+The following is a list of compile dependencies for this project. These 
dependencies are required to compile and run the application:
+
+
+GroupId
+ArtifactId
+Version
+Type
+License
+
+com.github.stephenc.findbugs
+http://stephenc.github.com/findbugs-annotations;>findbugs-annotations
+1.3.9-1
+jar
+http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
+
+log4j
+http://logging.apache.org/log4j/1.2/;>log4j
+1.2.17
+jar
+http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
+
+org.apache.hbase
+http://hbase.apache.org/hbase-shaded/hbase-shaded-client;>hbase-shaded-client
+2.0.0-SNAPSHOT
+jar
+http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
+
+test
+The following is a list of test dependencies for this project. These 
dependencies are only required to compile and run unit tests for the 
application:
+
+
+GroupId
+ArtifactId
+Version
+Type
+License
+
+junit
+http://junit.org;>junit
+4.12
+jar
+http://www.eclipse.org/legal/epl-v10.html;>Eclipse Public License 
1.0
+
+org.apache.hbase
+http://hbase.apache.org/hbase-testing-util;>hbase-testing-util
+2.0.0-SNAPSHOT
+jar
+http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
+
+org.mockito