[08/26] hbase-site git commit: Published site at dfeab9f5c968625ac1c642c53c721eb5e81068c0.

2018-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/32cb0f25/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
index 15db0f1..82a8c92 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
@@ -285,7 +285,7 @@ extends 
 
 Methods inherited from class org.apache.hadoop.hbase.HBaseTestingUtility
-assertKVListsEqual,
 assertRegionOnlyOnServer,
 assertRegionOnServer,
 assignRegion,
 available,
 checksumRows,
 cleanupDataTestDirOnTestFS,
 cleanupDataTestDirOnTestFS,
 closeRegionAndWAL,
 closeRegionAndWAL,
 compact,
 compact,
 countRows,
 countRows,
 countRows,
 countRows,
 countRows,
 countRows,
 countRows,
 createLocalHRegion,
 createLocalHRegion,
 createLocalHRegion,
 createLocalHRegion,
 createLocalHRegio
 n, createLocalHRegionWithInMemoryFlags,
 createLocalHTU,
 createLocalHTU,
 createMockRegionServerService,
 createMockRegionServerService,
 createMockRegionServerService, createMultiRegionsInMeta,
 createMultiRegionsInMeta,
 createMultiRegionTable,
 createMultiRegionTable,
 createMultiRegionTable,
 createMultiRegionTable,
 createPreSplitLoadTestTable,
 createPreSplitLoadTestTable,
 createPreSplitLoadTestTable, createPreSplitLoadTestTable,
 createPreSplitLoadTestTable,
 createPreS
 plitLoadTestTable, createPreSplitLoadTestTable,
 createRandomTable,
 createRegionAndWAL,
 createRegionAndWAL,
 createRootDir,
 createRootDir,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable, createTable,
 createTable,
 createTable,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTestRegion,
 createWal,
 createWALRootDir, deleteNumericRows,
 deleteTable,
 deleteTableData,
 deleteTableIfAny,
 enableDebug,
 ensureSomeNonStoppedRegionServersAvailable,
 ensureSomeRegionServersAvailable, expireMasterSession,
 expireRegionServerSession,
 expireSession,
 expireSession,
 explainTableAvailability,
 explainTableState,
 findLastTableState,
 flush,
 flush,
 generateColumnDescriptors,
 generateColumnDescriptors,
 getAdmin,
 getAllOnlineRegions,
 getClosestRowBefore, getClusterKey,
 getConfiguration,
 getConnection,
 getDataTestDirOnTestFS,
 getDataTestDirOnTestFS,
 getDefaultRootDirPath,
 getDefaultRootDirPath,
 getDFSCluster,
 getDifferentUser,
 getFromStoreFile,
 getFromStoreFile,
 getHBaseAdmin,
 getHBaseCluster,
 getHBaseClusterInterface,
 getHbck,
 getMetaRSPort,
 getMetaTableDescriptor,
 getMetaTableDescriptorBuilder,
 getMetaTableRows,
 getMetaTableRows,
 getMiniHBaseCluster,
 getNumHFiles,
 getNumHFilesForRS,
 getOtherRegionServer,
 getRegionSplitStartKeys,
 getRSForFirstRegionInTable,
 getSplittableRegion,
 getSupportedCompressionAlgorithms,
 getTestFileSystem,
 isReadShortCircuitOn,
 loadNumericRows,
 loadRandomRows,
 loadRegion,
 loadRegion,
 loadRegion,
 loadTable, loadTable,
 loadTable,
 loadTable,
 loadTable,
 memStoreTSTagsAndOffheapCombination,
 modifyTableSync,
 moveRegionAndWait,
 predicateNoRegionsInTransition,
 predicateTableAvailable,
 predicateTableDisabled,
 predicateTableEnabled,
 randomFreePort,
 randomMul
 tiCastAddress, restartHBaseCluster,
 safeGetAsStr,
 setDFSCluster,
 setDFSCluster,
 setFileSystemURI,
 setHBaseCluster,
 setMaxRecoveryErrorCount,
 setReplicas,
 setupDataTestDir,
 setupMiniKdc,
 shutdownMiniCluster,
 shutdownMiniDFSCluster,
 shutdownMiniHBaseCluster,
 shutdownMiniMapReduceCluster,
 startMiniClu
 ster, startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster, startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniDFSCluster,
 startMiniDFSCluster,
 startMiniDFSCluster,
 startMiniDFSCluster,
 startMiniDFSClusterForTestWAL,
 startMiniHBaseCluster,
 startMiniHBaseCluster,
 startMiniHBaseCluster,
 startMiniHBaseCluster,
 startMiniHBaseCluster,
 startMiniMapReduceCluster,
 truncateTable,
 truncateTable,
 unassignRegion,
 unassignRegion,
 unassignRegionByRow,
 unassignRegionByRow,
 verifyNumericRows,
 verifyNumericRows,
 verifyNumericRows,
 verif
 yNumericRows, verifyNumericRows,
 verifyTableDescriptorIgnoreTableName,
 waitForHostPort,
 waitLabelAvailable,
 waitTableAvailable,
 waitTableAvailable,
 waitTableAvailable,
 waitTableDisabled,
 waitTableDisabled,
 waitTableDisabled,
 waitTableEnabled,
 waitTableEnabled,
 waitTableEnabled,
 waitUntilAllRegionsAssigned,
 waitUntilAllRegionsAssigned,
 waitUntilAllSystemRegionsAssigned,
 waitUntilNoRegionsIn

[08/26] hbase-site git commit: Published site at 6f15cecaed2f1f76bfe1880b7c578ed369daa5d5.

2018-11-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dccdd274/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.IndexCacheEntry.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.IndexCacheEntry.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.IndexCacheEntry.html
index f8e34f2..ea22449 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.IndexCacheEntry.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.IndexCacheEntry.html
@@ -217,184 +217,188 @@
 209  @Test
 210  public void testDisableCacheDataBlock() 
throws IOException {
 211Configuration conf = 
HBaseConfiguration.create();
-212CacheConfig cacheConfig = new 
CacheConfig(conf);
-213
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
-214
assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
-215
assertFalse(cacheConfig.shouldCacheDataCompressed());
-216
assertFalse(cacheConfig.shouldCacheDataOnWrite());
-217
assertTrue(cacheConfig.shouldCacheDataOnRead());
-218
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
-219
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
-220
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
-221
assertFalse(cacheConfig.shouldCacheBloomsOnWrite());
-222
assertFalse(cacheConfig.shouldCacheIndexesOnWrite());
-223
-224
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
-225
conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, true);
-226
conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true);
-227
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true);
-228
-229cacheConfig = new 
CacheConfig(conf);
-230
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
-231
assertTrue(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
-232
assertTrue(cacheConfig.shouldCacheDataCompressed());
-233
assertTrue(cacheConfig.shouldCacheDataOnWrite());
-234
assertTrue(cacheConfig.shouldCacheDataOnRead());
-235
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
-236
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
-237
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
-238
assertTrue(cacheConfig.shouldCacheBloomsOnWrite());
-239
assertTrue(cacheConfig.shouldCacheIndexesOnWrite());
-240
-241
conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, false);
-242
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
-243
-244cacheConfig = new 
CacheConfig(conf);
-245
assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
-246
assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
-247
assertFalse(cacheConfig.shouldCacheDataCompressed());
-248
assertFalse(cacheConfig.shouldCacheDataOnWrite());
-249
assertFalse(cacheConfig.shouldCacheDataOnRead());
-250
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
-251
assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
-252
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
-253
assertTrue(cacheConfig.shouldCacheBloomsOnWrite());
-254
assertTrue(cacheConfig.shouldCacheIndexesOnWrite());
-255
-256
conf.setBoolean(CacheConfig.CACHE_DATA_ON_READ_KEY, true);
-257
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
-258
-259HColumnDescriptor family = new 
HColumnDescriptor("testDisableCacheDataBlock");
-260family.setBlockCacheEnabled(false);
-261
-262cacheConfig = new CacheConfig(conf, 
family);
-263
assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
-264
assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
-265
assertFalse(cacheConfig.shouldCacheDataCompressed());
-266
assertFalse(cacheConfig.shouldCacheDataOnWrite());
-267
assertFalse(cacheConfig.shouldCacheDataOnRead());
-268
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.INDEX));
-269
assertFalse(cacheConfig.shouldCacheBlockOnRead(BlockCategory.META));
-270
assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.BLOOM));
-271
assertTrue(cacheConfig.shouldCacheBloomsOnWrite());
-272
assertTrue(cacheConfig.shouldCacheIndexesOnWrite());
-273  }
-274
-275  @Test
-276  public void 
testCacheConfigDefaultLRUBlockCache() {
-277CacheConfig cc = new 
CacheConfig(this.conf);
-278
assertTrue(cc.isBlockCacheEnabled());
-279
assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
-280basicBlockCacheOps(cc, false, 
true);
-281assertTrue(cc.getBlockCache() 
instanceof LruBlockCache);
-282  }
-283
-284  /**
-285   * Assert that the caches are deployed 
with Com

[08/26] hbase-site git commit: Published site at 64c4861272aa03f714b4029ae7725f4286b77062.

2018-11-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9b09fec/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
index 98ef11a..ab175b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
@@ -256,1168 +256,1169 @@
 248this.leaseRecovery = leaseRecovery;
 249this.walDir = walDir;
 250this.walArchiveDir = walArchiveDir;
-251this.fs = 
walDir.getFileSystem(conf);
-252this.enforceStreamCapability = 
conf.getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true);
-253
-254// Create the log directory for the 
procedure store
-255if (!fs.exists(walDir)) {
-256  if (!fs.mkdirs(walDir)) {
-257throw new IOException("Unable to 
mkdir " + walDir);
-258  }
-259}
-260// Now that it exists, set the log 
policy
-261String storagePolicy =
-262
conf.get(HConstants.WAL_STORAGE_POLICY, 
HConstants.DEFAULT_WAL_STORAGE_POLICY);
-263CommonFSUtils.setStoragePolicy(fs, 
walDir, storagePolicy);
-264
-265// Create archive dir up front. 
Rename won't work w/o it up on HDFS.
-266if (this.walArchiveDir != null 
&& !this.fs.exists(this.walArchiveDir)) {
-267  if 
(this.fs.mkdirs(this.walArchiveDir)) {
-268LOG.debug("Created Procedure 
Store WAL archive dir {}", this.walArchiveDir);
-269  } else {
-270LOG.warn("Failed create of {}", 
this.walArchiveDir);
-271  }
-272}
-273  }
-274
-275  @Override
-276  public void start(int numSlots) throws 
IOException {
-277if (!setRunning(true)) {
-278  return;
-279}
-280
-281// Init buffer slots
-282loading.set(true);
-283runningProcCount = numSlots;
-284syncMaxSlot = numSlots;
-285slots = new ByteSlot[numSlots];
-286slotsCache = new 
LinkedTransferQueue<>();
-287while (slotsCache.size() < 
numSlots) {
-288  slotsCache.offer(new ByteSlot());
-289}
-290
-291// Tunings
-292walCountWarnThreshold =
-293  
conf.getInt(WAL_COUNT_WARN_THRESHOLD_CONF_KEY, 
DEFAULT_WAL_COUNT_WARN_THRESHOLD);
-294maxRetriesBeforeRoll =
-295  
conf.getInt(MAX_RETRIES_BEFORE_ROLL_CONF_KEY, 
DEFAULT_MAX_RETRIES_BEFORE_ROLL);
-296maxSyncFailureRoll = 
conf.getInt(MAX_SYNC_FAILURE_ROLL_CONF_KEY, DEFAULT_MAX_SYNC_FAILURE_ROLL);
-297waitBeforeRoll = 
conf.getInt(WAIT_BEFORE_ROLL_CONF_KEY, DEFAULT_WAIT_BEFORE_ROLL);
-298rollRetries = 
conf.getInt(ROLL_RETRIES_CONF_KEY, DEFAULT_ROLL_RETRIES);
-299rollThreshold = 
conf.getLong(ROLL_THRESHOLD_CONF_KEY, DEFAULT_ROLL_THRESHOLD);
-300periodicRollMsec = 
conf.getInt(PERIODIC_ROLL_CONF_KEY, DEFAULT_PERIODIC_ROLL);
-301syncWaitMsec = 
conf.getInt(SYNC_WAIT_MSEC_CONF_KEY, DEFAULT_SYNC_WAIT_MSEC);
-302useHsync = 
conf.getBoolean(USE_HSYNC_CONF_KEY, DEFAULT_USE_HSYNC);
-303
-304// WebUI
-305syncMetricsQueue = new 
CircularFifoQueue<>(
-306  
conf.getInt(STORE_WAL_SYNC_STATS_COUNT, DEFAULT_SYNC_STATS_COUNT));
-307
-308// Init sync thread
-309syncThread = new 
Thread("WALProcedureStoreSyncThread") {
-310  @Override
-311  public void run() {
-312try {
-313  syncLoop();
-314} catch (Throwable e) {
-315  LOG.error("Got an exception 
from the sync-loop", e);
-316  if (!isSyncAborted()) {
-317sendAbortProcessSignal();
-318  }
-319}
-320  }
-321};
-322syncThread.start();
-323  }
-324
-325  @Override
-326  public void stop(final boolean abort) 
{
-327if (!setRunning(false)) {
-328  return;
-329}
-330
-331LOG.info("Stopping the WAL Procedure 
Store, isAbort=" + abort +
-332  (isSyncAborted() ? " (self 
aborting)" : ""));
-333sendStopSignal();
-334if (!isSyncAborted()) {
-335  try {
-336while (syncThread.isAlive()) {
-337  sendStopSignal();
-338  syncThread.join(250);
-339}
-340  } catch (InterruptedException e) 
{
-341LOG.warn("join interrupted", 
e);
-342
Thread.currentThread().interrupt();
-343  }
-344}
-345
-346// Close the writer
-347closeCurrentLogStream(abort);
-348
-349// Close the old logs
-350// they should be already closed, 
this is just in case the load fails
-351// and we call start() and then 
stop()
-352for (ProcedureWALFile log: logs) {
-353  log.close();
-354}
-355logs.clear();
-356loading.set(true);
-357  }
-358
-359  private void sendStopSignal() {
-360if (lock.tryLock()) {
-361  try {
-362waitCond.signalAll();
-363syncCond.signalAll();
-364  } finally {
-365lock.unlock

[08/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
index c82bf55..172b7a3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -1718,1082 +1718,1081 @@
 1710
 1711  @Override
 1712  public boolean hasReferences() {
-1713List 
reloadedStoreFiles = null;
-1714// Grab the read lock here, because 
we need to ensure that: only when the atomic
-1715// replaceStoreFiles(..) finished, 
we can get all the complete store file list.
-1716this.lock.readLock().lock();
-1717try {
-1718  // Merge the current store files 
with compacted files here due to HBASE-20940.
-1719  Collection 
allStoreFiles = new ArrayList<>(getStorefiles());
-1720  
allStoreFiles.addAll(getCompactedFiles());
-1721  return 
StoreUtils.hasReferences(allStoreFiles);
-1722} finally {
-1723  this.lock.readLock().unlock();
-1724}
-1725  }
-1726
-1727  /**
-1728   * getter for CompactionProgress 
object
-1729   * @return CompactionProgress object; 
can be null
-1730   */
-1731  public CompactionProgress 
getCompactionProgress() {
-1732return 
this.storeEngine.getCompactor().getProgress();
-1733  }
-1734
-1735  @Override
-1736  public boolean 
shouldPerformMajorCompaction() throws IOException {
-1737for (HStoreFile sf : 
this.storeEngine.getStoreFileManager().getStorefiles()) {
-1738  // TODO: what are these reader 
checks all over the place?
-1739  if (sf.getReader() == null) {
-1740LOG.debug("StoreFile {} has null 
Reader", sf);
-1741return false;
-1742  }
-1743}
-1744return 
storeEngine.getCompactionPolicy().shouldPerformMajorCompaction(
-1745
this.storeEngine.getStoreFileManager().getStorefiles());
-1746  }
-1747
-1748  public 
Optional requestCompaction() throws IOException {
-1749return 
requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, null);
-1750  }
-1751
-1752  public 
Optional requestCompaction(int priority,
-1753  CompactionLifeCycleTracker 
tracker, User user) throws IOException {
-1754// don't even select for compaction 
if writes are disabled
-1755if (!this.areWritesEnabled()) {
-1756  return Optional.empty();
-1757}
-1758// Before we do compaction, try to 
get rid of unneeded files to simplify things.
-1759removeUnneededFiles();
-1760
-1761final CompactionContext compaction = 
storeEngine.createCompaction();
-1762CompactionRequestImpl request = 
null;
-1763this.lock.readLock().lock();
-1764try {
-1765  synchronized (filesCompacting) {
-1766// First, see if coprocessor 
would want to override selection.
-1767if (this.getCoprocessorHost() != 
null) {
-1768  final List 
candidatesForCoproc = compaction.preSelect(this.filesCompacting);
-1769  boolean override = 
getCoprocessorHost().preCompactSelection(this,
-1770  candidatesForCoproc, 
tracker, user);
-1771  if (override) {
-1772// Coprocessor is overriding 
normal file selection.
-1773compaction.forceSelect(new 
CompactionRequestImpl(candidatesForCoproc));
-1774  }
-1775}
-1776
-1777// Normal case - coprocessor is 
not overriding file selection.
-1778if (!compaction.hasSelection()) 
{
-1779  boolean isUserCompaction = 
priority == Store.PRIORITY_USER;
-1780  boolean mayUseOffPeak = 
offPeakHours.isOffPeakHour() &&
-1781  
offPeakCompactionTracker.compareAndSet(false, true);
-1782  try {
-1783
compaction.select(this.filesCompacting, isUserCompaction,
-1784  mayUseOffPeak, forceMajor 
&& filesCompacting.isEmpty());
-1785  } catch (IOException e) {
-1786if (mayUseOffPeak) {
-1787  
offPeakCompactionTracker.set(false);
-1788}
-1789throw e;
-1790  }
-1791  assert 
compaction.hasSelection();
-1792  if (mayUseOffPeak && 
!compaction.getRequest().isOffPeak()) {
-1793// Compaction policy doesn't 
want to take advantage of off-peak.
-1794
offPeakCompactionTracker.set(false);
-1795  }
-1796}
-1797if (this.getCoprocessorHost() != 
null) {
-1798  
this.getCoprocessorHost().postCompactSelection(
-1799  this, 
ImmutableList.copyOf(compaction.getRequest().getFiles()), tracker,
-1800  compaction.getRequest(), 
user);
-1801}
-1802// Finally, we have the 
resulting files list. Check if we have any files at all.
-1803req

[08/26] hbase-site git commit: Published site at 7464e2ef9d420d5d8c559600a15d69ed1f3fd41a.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd306e04/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
index 93a57cb..f8c8b32 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
@@ -33,4324 +33,4323 @@
 025import java.io.InterruptedIOException;
 026import java.util.ArrayList;
 027import java.util.Arrays;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.Iterator;
-031import java.util.LinkedList;
-032import java.util.List;
-033import java.util.Map;
-034import java.util.Set;
-035import java.util.concurrent.Callable;
-036import 
java.util.concurrent.ExecutionException;
-037import java.util.concurrent.Future;
-038import java.util.concurrent.TimeUnit;
-039import 
java.util.concurrent.TimeoutException;
-040import 
java.util.concurrent.atomic.AtomicInteger;
-041import 
java.util.concurrent.atomic.AtomicReference;
-042import java.util.function.Supplier;
-043import java.util.regex.Pattern;
-044import java.util.stream.Collectors;
-045import java.util.stream.Stream;
-046import 
org.apache.hadoop.conf.Configuration;
-047import 
org.apache.hadoop.hbase.Abortable;
-048import 
org.apache.hadoop.hbase.CacheEvictionStats;
-049import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-050import 
org.apache.hadoop.hbase.ClusterMetrics;
-051import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-053import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import 
org.apache.hadoop.hbase.NotServingRegionException;
-064import 
org.apache.hadoop.hbase.RegionLocations;
-065import 
org.apache.hadoop.hbase.RegionMetrics;
-066import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-067import 
org.apache.hadoop.hbase.ServerName;
-068import 
org.apache.hadoop.hbase.TableExistsException;
-069import 
org.apache.hadoop.hbase.TableName;
-070import 
org.apache.hadoop.hbase.TableNotDisabledException;
-071import 
org.apache.hadoop.hbase.TableNotFoundException;
-072import 
org.apache.hadoop.hbase.UnknownRegionException;
-073import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-074import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-075import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-076import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-077import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-078import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-079import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-080import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-081import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-082import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-083import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-084import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-085import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-086import 
org.apache.hadoop.hbase.replication.ReplicationException;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-088import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-089import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-090import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-091import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-092import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-093import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-094import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-095import 
org.apache.hadoop.hbase.util.Addressing;
-096import 
org.apache.hadoop.hbase.util.Bytes;
-097import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-098import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-099import 
org.apache.hadoop.hbase.util.Pair;
-100import 
org.apache.hadoop.ipc.RemoteException;
-101import 
org.apache.hadoop.util.StringUtils;
-102import 
org.apache.yetus.audience.InterfaceAudience;
-103import 
org.apache.yetus.audience.InterfaceStability;
-104import org.slf4j.Logger;
-105import org

[08/26] hbase-site git commit: Published site at 42aa3dd463c0d30a9b940d296b87316b5c67e1f5.

2018-10-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37b8a04a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass.html
index ef39f9e..f188fed 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass.html
@@ -25,543 +25,555 @@
 017 */
 018package 
org.apache.hadoop.hbase.replication;
 019
-020import static org.mockito.Mockito.mock;
-021import static 
org.mockito.Mockito.verify;
-022import static org.mockito.Mockito.when;
-023
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.HashMap;
-027import java.util.List;
-028import java.util.Map;
-029import java.util.UUID;
-030import java.util.concurrent.Callable;
-031import 
java.util.concurrent.atomic.AtomicBoolean;
-032import 
java.util.concurrent.atomic.AtomicInteger;
-033import 
java.util.concurrent.atomic.AtomicReference;
-034import org.apache.hadoop.hbase.Cell;
-035import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-036import org.apache.hadoop.hbase.Waiter;
-037import 
org.apache.hadoop.hbase.client.Connection;
-038import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-039import 
org.apache.hadoop.hbase.client.Put;
-040import 
org.apache.hadoop.hbase.client.RegionInfo;
-041import 
org.apache.hadoop.hbase.client.Table;
-042import 
org.apache.hadoop.hbase.regionserver.HRegion;
-043import 
org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
-044import 
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource;
-045import 
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
-046import 
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource;
-047import 
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl;
-048import 
org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
-049import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-050import 
org.apache.hadoop.hbase.testclassification.ReplicationTests;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-053import 
org.apache.hadoop.hbase.util.Threads;
-054import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-055import 
org.apache.hadoop.hbase.zookeeper.ZKConfig;
-056import 
org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
-057import org.junit.AfterClass;
-058import org.junit.Assert;
-059import org.junit.Before;
-060import org.junit.BeforeClass;
-061import org.junit.ClassRule;
-062import org.junit.Test;
-063import 
org.junit.experimental.categories.Category;
-064import org.slf4j.Logger;
-065import org.slf4j.LoggerFactory;
-066
-067/**
-068 * Tests ReplicationSource and 
ReplicationEndpoint interactions
-069 */
-070@Category({ ReplicationTests.class, 
MediumTests.class })
-071public class TestReplicationEndpoint 
extends TestReplicationBase {
-072
-073  @ClassRule
-074  public static final HBaseClassTestRule 
CLASS_RULE =
-075  
HBaseClassTestRule.forClass(TestReplicationEndpoint.class);
-076
-077  private static final Logger LOG = 
LoggerFactory.getLogger(TestReplicationEndpoint.class);
+020import static 
org.mockito.Mockito.doNothing;
+021import static org.mockito.Mockito.mock;
+022import static org.mockito.Mockito.spy;
+023import static 
org.mockito.Mockito.verify;
+024import static org.mockito.Mockito.when;
+025
+026import java.io.IOException;
+027import java.util.ArrayList;
+028import java.util.HashMap;
+029import java.util.List;
+030import java.util.Map;
+031import java.util.UUID;
+032import java.util.concurrent.Callable;
+033import 
java.util.concurrent.atomic.AtomicBoolean;
+034import 
java.util.concurrent.atomic.AtomicInteger;
+035import 
java.util.concurrent.atomic.AtomicReference;
+036import org.apache.hadoop.hbase.Cell;
+037import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+038import org.apache.hadoop.hbase.Waiter;
+039import 
org.apache.hadoop.hbase.client.Connection;
+040import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+041import 
org.apache.hadoop.hbase.client.Put;
+042import 
org.apache.hadoop.hbase.client.RegionInfo;
+043import 
org.apache.hadoop.hbase.client.Table;
+044import 
org.apache.hadoop.hbase.regionserver.HRegion;
+045import 
org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
+046import 
org.apache.hadoop.hbase.replication.regionserver.MetricsReplicat

[08/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

2018-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/BasicMemStoreCompactionStrategy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/BasicMemStoreCompactionStrategy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/BasicMemStoreCompactionStrategy.html
index c24fb8f..d1de577 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/BasicMemStoreCompactionStrategy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/BasicMemStoreCompactionStrategy.html
@@ -37,18 +37,22 @@
 029 */
 030@InterfaceAudience.Private
 031public class 
BasicMemStoreCompactionStrategy extends MemStoreCompactionStrategy{
-032
-033  private static final String name = 
"BASIC";
-034
-035  public 
BasicMemStoreCompactionStrategy(Configuration conf, String cfName) {
-036super(conf, cfName);
-037  }
-038
-039  @Override
-040  public Action 
getAction(VersionedSegmentsList versionedList) {
-041return 
simpleMergeOrFlatten(versionedList, name);
-042  }
-043}
+032  private static final String NAME = 
"BASIC";
+033
+034  public 
BasicMemStoreCompactionStrategy(Configuration conf, String cfName) {
+035super(conf, cfName);
+036  }
+037
+038  @Override
+039  public Action 
getAction(VersionedSegmentsList versionedList) {
+040return 
simpleMergeOrFlatten(versionedList, getName());
+041  }
+042
+043  @Override
+044  protected String getName() {
+045return NAME;
+046  }
+047}
 
 
 



[08/26] hbase-site git commit: Published site at 1e56938757d2958631ac1ea07387eaa61997d84a.

2018-04-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b707139a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
index 6007f27..4ce6735 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
@@ -65,716 +65,716 @@
 057import 
org.apache.hadoop.hbase.client.Table;
 058import 
org.apache.hadoop.hbase.client.TableState;
 059import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-060import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-061import 
org.apache.hadoop.hbase.master.RegionState;
-062import 
org.apache.hadoop.hbase.master.RegionState.State;
-063import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-064import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-065import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-066import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-067import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
-068import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-069import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-072import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-073import 
org.apache.hadoop.hbase.util.Pair;
-074import 
org.apache.hadoop.hbase.util.PairOfSameType;
-075import 
org.apache.yetus.audience.InterfaceAudience;
-076import org.slf4j.Logger;
-077import org.slf4j.LoggerFactory;
-078
-079import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+060import 
org.apache.hadoop.hbase.filter.Filter;
+061import 
org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+062import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+063import 
org.apache.hadoop.hbase.master.RegionState;
+064import 
org.apache.hadoop.hbase.master.RegionState.State;
+065import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+066import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+067import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
+068import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+069import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+070import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
+071import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
+072import 
org.apache.hadoop.hbase.util.Bytes;
+073import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+074import 
org.apache.hadoop.hbase.util.ExceptionUtil;
+075import 
org.apache.hadoop.hbase.util.Pair;
+076import 
org.apache.hadoop.hbase.util.PairOfSameType;
+077import 
org.apache.yetus.audience.InterfaceAudience;
+078import org.slf4j.Logger;
+079import org.slf4j.LoggerFactory;
 080
-081/**
-082 * 

-083 * Read/write operations on region and assignment information store in hbase:meta. -084 *

-085 *

-086 * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason for this is -087 * because when used on client-side (like from HBaseAdmin), we want to use short-living connection -088 * (opened before each operation, closed right after), while when used on HM or HRS (like in -089 * AssignmentManager) we want permanent connection. -090 *

-091 *

-092 * HBASE-10070 adds a replicaId to HRI, meaning more than one HRI can be defined for the same table -093 * range (table, startKey, endKey). For every range, there will be at least one HRI defined which is -094 * called default replica. -095 *

-096 *

-097 * Meta layout (as of 0.98 + HBASE-10070) is like: -098 * -099 *

-100 * For each table there is single row in 
column family 'table' formatted:
-101 * <tableName> including 
namespace and columns are:
-102 * table: state => 
contains table state
-103 *
-104 * For each table range, there is a 
single row, formatted like:
-105 * 
<tableName>,<startKey>,<regionId>,<encodedRegionName>.
-106 * This row corresponds to the regionName 
of the default region replica.
-107 * Columns are:
-108 * info:regioninfo => contains 
serialized HRI for the default region replica
-109 * info:server => contains 
hostname:port (in string form) for the server hosting
-110 *the default 
regionInfo replica
-111 * info:server_<replicaId> 
=> contains host

[08/26] hbase-site git commit: Published site at 3b6199a27a944f9f05ca6512c59766ed0f590f48.

2018-03-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b2e10744/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
index 9cd8a90..798637d 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestAdmin1
+public class TestAdmin1
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Class to test HBaseAdmin.
  Spins up the minicluster once at test start and then takes it down afterward.
@@ -311,26 +311,30 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-testTableAvailableWithRandomSplitKeys() 
+testSplitShouldNotHappenIfSplitIsDisabledForTable() 
 
 
 void
-testTableExist() 
+testTableAvailableWithRandomSplitKeys() 
 
 
 void
-testTruncateTable() 
+testTableExist() 
 
 
+void
+testTruncateTable() 
+
+
 private void
 testTruncateTable(org.apache.hadoop.hbase.TableName tableName,
  boolean preserveSplits) 
 
-
+
 void
 testTruncateTablePreservingSplits() 
 
-
+
 protected void
 verifyRoundRobinDistribution(org.apache.hadoop.hbase.client.ClusterConnection c,
 
org.apache.hadoop.hbase.client.RegionLocator regionLocator,
@@ -364,7 +368,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 CLASS_RULE
-public static final HBaseClassTestRule CLASS_RULE
+public static final HBaseClassTestRule CLASS_RULE
 
 
 
@@ -373,7 +377,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -382,7 +386,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TEST_UTIL
-private static final HBaseTestingUtility TEST_UTIL
+private static final HBaseTestingUtility TEST_UTIL
 
 
 
@@ -391,7 +395,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 admin
-private org.apache.hadoop.hbase.client.Admin admin
+private org.apache.hadoop.hbase.client.Admin admin
 
 
 
@@ -400,7 +404,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 name
-public org.junit.rules.TestName name
+public org.junit.rules.TestName name
 
 
 
@@ -417,7 +421,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TestAdmin1
-public TestAdmin1()
+public TestAdmin1()
 
 
 
@@ -434,7 +438,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setUpBeforeClass
-public static void setUpBeforeClass()
+public static void setUpBeforeClass()
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -448,7 +452,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 tearDownAfterClass
-public static void tearDownAfterClass()
+public static void tearDownAfterClass()
throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -462,7 +466,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setUp
-public void setUp()
+public void setUp()
throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -476,7 +480,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 tearDown
-public void tearDown()
+public void tearDown()
   throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in 

[08/26] hbase-site git commit: Published site at 67f013430c9ba051385c45d72ee680c44eb88470.

2018-03-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd35fe02/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
index 1a43271..2acdb55 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/test/package-tree.html
@@ -253,10 +253,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.test.IntegrationTestLoadAndVerify.Counters
-org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Generator.Counts
 org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Verify.Counts
 org.apache.hadoop.hbase.test.IntegrationTestWithCellVisibilityLoadAndVerify.Counters
+org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Generator.Counts
+org.apache.hadoop.hbase.test.IntegrationTestLoadAndVerify.Counters
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd35fe02/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.html
index eef15f0..1d83de7 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.html
@@ -33,342 +33,343 @@
 025import static org.junit.Assert.fail;
 026
 027import java.util.ArrayList;
-028import java.util.List;
-029
-030import org.apache.hadoop.fs.Path;
-031import 
org.apache.hadoop.hbase.HConstants;
-032import 
org.apache.hadoop.hbase.ServerName;
-033import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
-034import 
org.apache.hadoop.hbase.util.Pair;
-035import 
org.apache.hadoop.hbase.zookeeper.ZKConfig;
-036import 
org.apache.zookeeper.KeeperException;
-037import org.junit.Test;
-038import org.slf4j.Logger;
-039import org.slf4j.LoggerFactory;
-040
-041import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
-042
-043/**
-044 * White box testing for replication 
state interfaces. Implementations should extend this class, and
-045 * initialize the interfaces properly.
-046 */
-047public abstract class 
TestReplicationStateBasic {
-048
-049  private static final Logger LOG = 
LoggerFactory.getLogger(TestReplicationStateBasic.class);
-050
-051  protected ReplicationQueueStorage 
rqs;
-052  protected ServerName server1 = 
ServerName.valueOf("hostname1.example.org", 1234, 12345);
-053  protected ServerName server2 = 
ServerName.valueOf("hostname2.example.org", 1234, 12345);
-054  protected ServerName server3 = 
ServerName.valueOf("hostname3.example.org", 1234, 12345);
-055  protected ReplicationPeers rp;
-056  protected static final String ID_ONE = 
"1";
-057  protected static final String ID_TWO = 
"2";
-058  protected static String KEY_ONE;
-059  protected static String KEY_TWO;
-060
-061  // For testing when we try to replicate 
to ourself
-062  protected String OUR_KEY;
-063
-064  protected static int zkTimeoutCount;
-065  protected static final int ZK_MAX_COUNT 
= 300;
-066  protected static final int 
ZK_SLEEP_INTERVAL = 100; // millis
-067
-068  @Test
-069  public void 
testReplicationQueueStorage() throws ReplicationException {
-070// Test methods with empty state
-071assertEquals(0, 
rqs.getListOfReplicators().size());
-072
assertTrue(rqs.getWALsInQueue(server1, "qId1").isEmpty());
-073
assertTrue(rqs.getAllQueues(server1).isEmpty());
-074
-075/*
-076 * Set up data Two replicators: -- 
server1: three queues with 0, 1 and 2 log files each --
-077 * server2: zero queues
-078 */
-079rqs.addWAL(server1, "qId1", 
"trash");
-080rqs.removeWAL(server1, "qId1", 
"trash");
-081rqs.addWAL(server1,"qId2", 
"filename1");
-082rqs.addWAL(server1,"qId3", 
"filename2");
-083rqs.addWAL(server1,"qId3", 
"filename3");
-084rqs.addWAL(server2,"trash", 
"trash");
-085rqs.removeQueue(server2,"trash");
-086
-087List reps = 
rqs.getListOfReplicators();
-088assertEquals(2, reps.size());
-089assertTrue(server1.getServerName(), 
reps.contains(server1));
-090assertTrue(server2.getServerName(), 
reps.contains(server2));
-091
-092
assertTrue

[08/26] hbase-site git commit: Published site at .

2018-02-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ac54a6a8/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
index e8af18d..5a1361f 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
@@ -2643,1491 +2643,1490 @@
 2635   * Expire a region server's session
 2636   * @param index which RS
 2637   */
-2638  public HRegionServer 
expireRegionServerSession(int index) throws Exception {
+2638  public void 
expireRegionServerSession(int index) throws Exception {
 2639HRegionServer rs = 
getMiniHBaseCluster().getRegionServer(index);
 2640expireSession(rs.getZooKeeper(), 
false);
 2641decrementMinRegionServerCount();
-2642return rs;
-2643  }
-2644
-2645  private void 
decrementMinRegionServerCount() {
-2646// decrement the count for 
this.conf, for newly spwaned master
-2647// this.hbaseCluster shares this 
configuration too
-2648
decrementMinRegionServerCount(getConfiguration());
-2649
-2650// each master thread keeps a copy 
of configuration
-2651for (MasterThread master : 
getHBaseCluster().getMasterThreads()) {
-2652  
decrementMinRegionServerCount(master.getMaster().getConfiguration());
-2653}
-2654  }
-2655
-2656  private void 
decrementMinRegionServerCount(Configuration conf) {
-2657int currentCount = conf.getInt(
-2658
ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
-2659if (currentCount != -1) {
-2660  
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
-2661  Math.max(currentCount - 1, 
1));
-2662}
-2663  }
-2664
-2665  public void expireSession(ZKWatcher 
nodeZK) throws Exception {
-2666   expireSession(nodeZK, false);
-2667  }
-2668
-2669  /**
-2670   * Expire a ZooKeeper session as 
recommended in ZooKeeper documentation
-2671   * 
http://hbase.apache.org/book.html#trouble.zookeeper
-2672   * There are issues when doing this:
-2673   * [1] 
http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html
-2674   * [2] 
https://issues.apache.org/jira/browse/ZOOKEEPER-1105
-2675   *
-2676   * @param nodeZK - the ZK watcher to 
expire
-2677   * @param checkStatus - true to check 
if we can create a Table with the
-2678   *current 
configuration.
-2679   */
-2680  public void expireSession(ZKWatcher 
nodeZK, boolean checkStatus)
-2681throws Exception {
-2682Configuration c = new 
Configuration(this.conf);
-2683String quorumServers = 
ZKConfig.getZKQuorumServersString(c);
-2684ZooKeeper zk = 
nodeZK.getRecoverableZooKeeper().getZooKeeper();
-2685byte[] password = 
zk.getSessionPasswd();
-2686long sessionID = 
zk.getSessionId();
-2687
-2688// Expiry seems to be asynchronous 
(see comment from P. Hunt in [1]),
-2689//  so we create a first watcher to 
be sure that the
-2690//  event was sent. We expect that 
if our watcher receives the event
-2691//  other watchers on the same 
machine will get is as well.
-2692// When we ask to close the 
connection, ZK does not close it before
-2693//  we receive all the events, so 
don't have to capture the event, just
-2694//  closing the connection should be 
enough.
-2695ZooKeeper monitor = new 
ZooKeeper(quorumServers,
-2696  1000, new 
org.apache.zookeeper.Watcher(){
-2697  @Override
-2698  public void process(WatchedEvent 
watchedEvent) {
-2699LOG.info("Monitor ZKW received 
event="+watchedEvent);
-2700  }
-2701} , sessionID, password);
-2702
-2703// Making it expire
-2704ZooKeeper newZK = new 
ZooKeeper(quorumServers,
-27051000, EmptyWatcher.instance, 
sessionID, password);
-2706
-2707//ensure that we have connection to 
the server before closing down, otherwise
-2708//the close session event will be 
eaten out before we start CONNECTING state
-2709long start = 
System.currentTimeMillis();
-2710while (newZK.getState() != 
States.CONNECTED
-2711 && 
System.currentTimeMillis() - start < 1000) {
-2712   Thread.sleep(1);
-2713}
-2714newZK.close();
-2715LOG.info("ZK Closed Session 0x" + 
Long.toHexString(sessionID));
-2716
-2717// Now closing & waiting to be 
sure that the clients get it.
-2718monitor.close();
-2719
-2720if (checkStatus) {
-2721  
getConnection().getTable(TableName.META_TABLE_NAME).close();
-2722}
-2723  }
-2724
-2725  /**
-2726   * Get the Mini HBase cluster.
-2727   *
-2728   * @return hbase cluster
-2729   * @see #getHBaseClusterInterface()
-2

[08/26] hbase-site git commit: Published site at .

2017-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7d90d02f/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
index 07b6ae0..21f2337 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html
@@ -47,1510 +47,1527 @@
 039import 
org.apache.commons.logging.LogFactory;
 040import 
org.apache.hadoop.conf.Configuration;
 041import 
org.apache.hadoop.hbase.ClusterStatus;
-042import 
org.apache.hadoop.hbase.HBaseIOException;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-045import 
org.apache.hadoop.hbase.HRegionInfo;
-046import 
org.apache.hadoop.hbase.ServerLoad;
-047import 
org.apache.hadoop.hbase.ServerName;
-048import 
org.apache.hadoop.hbase.TableName;
-049import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-050import 
org.apache.hadoop.hbase.master.LoadBalancer;
-051import 
org.apache.hadoop.hbase.master.MasterServices;
-052import 
org.apache.hadoop.hbase.master.RackManager;
-053import 
org.apache.hadoop.hbase.master.RegionPlan;
-054import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
-055
-056import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-057import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
-058import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
-059import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-060import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-061
-062/**
-063 * The base class for load balancers. It 
provides the the functions used to by
-064 * {@link 
org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign 
regions
-065 * in the edge cases. It doesn't provide 
an implementation of the
-066 * actual balancing algorithm.
-067 *
-068 */
-069public abstract class BaseLoadBalancer 
implements LoadBalancer {
-070  protected static final int 
MIN_SERVER_BALANCE = 2;
-071  private volatile boolean stopped = 
false;
-072
-073  private static final 
List EMPTY_REGION_LIST = new ArrayList<>(0);
-074
-075  static final 
Predicate IDLE_SERVER_PREDICATOR
-076= load -> 
load.getNumberOfRegions() == 0;
-077
-078  protected final RegionLocationFinder 
regionFinder = new RegionLocationFinder();
-079
-080  private static class DefaultRackManager 
extends RackManager {
-081@Override
-082public String getRack(ServerName 
server) {
-083  return UNKNOWN_RACK;
-084}
-085  }
-086
-087  /**
-088   * The constructor that uses the basic 
MetricsBalancer
-089   */
-090  protected BaseLoadBalancer() {
-091metricsBalancer = new 
MetricsBalancer();
-092  }
-093
-094  /**
-095   * This Constructor accepts an instance 
of MetricsBalancer,
-096   * which will be used instead of 
creating a new one
-097   */
-098  protected 
BaseLoadBalancer(MetricsBalancer metricsBalancer) {
-099this.metricsBalancer = 
(metricsBalancer != null) ? metricsBalancer : new MetricsBalancer();
-100  }
-101
-102  /**
-103   * An efficient array based 
implementation similar to ClusterState for keeping
-104   * the status of the cluster in terms 
of region assignment and distribution.
-105   * LoadBalancers, such as 
StochasticLoadBalancer uses this Cluster object because of
-106   * hundreds of thousands of hashmap 
manipulations are very costly, which is why this
-107   * class uses mostly indexes and 
arrays.
-108   *
-109   * Cluster tracks a list of unassigned 
regions, region assignments, and the server
-110   * topology in terms of server names, 
hostnames and racks.
-111   */
-112  protected static class Cluster {
-113ServerName[] servers;
-114String[] hosts; // ServerName 
uniquely identifies a region server. multiple RS can run on the same host
-115String[] racks;
-116boolean multiServersPerHost = false; 
// whether or not any host has more than one server
-117
-118ArrayList tables;
-119HRegionInfo[] regions;
-120Deque[] 
regionLoads;
-121private RegionLocationFinder 
regionFinder;
-122
-123int[][] regionLocations; 
//regionIndex -> list of serverIndex sorted by locality
-124
-125int[]   serverIndexToHostIndex;  
//serverIndex -> host index
-126int[]   serverIndexToRackIndex;  
//serverIndex -> rack index
-127
-128int[][] regionsPerServer;
//serverIndex -> region list
-129int[][] regionsPerHost;  
//hostIndex -> list of regions
-130int[][] regionsPerRack;  
//rackIndex -> r

[08/26] hbase-site git commit: Published site at .

2017-09-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/539471a7/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.html
index ff50ef0..d02c0d9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.html
@@ -167,7 +167,7 @@
 159
 160  for (WALEntry entry : entries) {
 161TableName table =
-162
TableName.valueOf(entry.getKey().getTableName().toByteArray());
+162
TableName.valueOf(entry.getEdit().getTableName().toByteArray());
 163Cell previousCell = null;
 164Mutation m = null;
 165int count = 
entry.getAssociatedCellCount();
@@ -191,8 +191,8 @@
 183  CellUtil.isDelete(cell) 
? new Delete(cell.getRowArray(), cell.getRowOffset(),
 184  
cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(),
 185  
cell.getRowLength());
-186  List clusterIds 
= new ArrayList<>(entry.getKey().getClusterIdsList().size());
-187  for (HBaseProtos.UUID 
clusterId : entry.getKey().getClusterIdsList()) {
+186  List clusterIds 
= new ArrayList<>(entry.getEdit().getClusterIdsList().size());
+187  for (HBaseProtos.UUID 
clusterId : entry.getEdit().getClusterIdsList()) {
 188
clusterIds.add(toUUID(clusterId));
 189  }
 190  
m.setClusterIds(clusterIds);
@@ -229,7 +229,7 @@
 221  }
 222
 223  int size = entries.size();
-224  
this.metrics.setAgeOfLastAppliedOp(entries.get(size - 
1).getKey().getWriteTime());
+224  
this.metrics.setAgeOfLastAppliedOp(entries.get(size - 
1).getEdit().getWriteTime());
 225  this.metrics.applyBatch(size + 
hfilesReplicated, hfilesReplicated);
 226  
this.totalReplicatedEdits.addAndGet(totalReplicated);
 227} catch (IOException ex) {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/539471a7/devapidocs/src-html/org/apache/hadoop/hbase/rest/RESTServer.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/rest/RESTServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/rest/RESTServer.html
index bd0c853..eeb641b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/rest/RESTServer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/rest/RESTServer.html
@@ -49,323 +49,324 @@
 041import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
 042import 
org.apache.hadoop.hbase.http.InfoServer;
 043import 
org.apache.hadoop.hbase.rest.filter.AuthFilter;
-044import 
org.apache.hadoop.hbase.rest.filter.RestCsrfPreventionFilter;
-045import 
org.apache.hadoop.hbase.security.UserProvider;
-046import 
org.apache.hadoop.hbase.util.DNS;
-047import 
org.apache.hadoop.hbase.util.HttpServerUtil;
-048import 
org.apache.hadoop.hbase.util.Pair;
-049import 
org.apache.hadoop.hbase.util.Strings;
-050import 
org.apache.hadoop.hbase.util.VersionInfo;
-051import 
org.apache.hadoop.util.StringUtils;
-052
-053import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-054
-055import 
org.eclipse.jetty.http.HttpVersion;
-056import org.eclipse.jetty.server.Server;
-057import 
org.eclipse.jetty.server.HttpConnectionFactory;
-058import 
org.eclipse.jetty.server.SslConnectionFactory;
-059import 
org.eclipse.jetty.server.HttpConfiguration;
-060import 
org.eclipse.jetty.server.ServerConnector;
-061import 
org.eclipse.jetty.server.SecureRequestCustomizer;
-062import 
org.eclipse.jetty.util.ssl.SslContextFactory;
-063import 
org.eclipse.jetty.servlet.ServletContextHandler;
-064import 
org.eclipse.jetty.servlet.ServletHolder;
-065import 
org.eclipse.jetty.util.thread.QueuedThreadPool;
-066import 
org.eclipse.jetty.jmx.MBeanContainer;
-067import 
org.eclipse.jetty.servlet.FilterHolder;
-068
-069import 
org.glassfish.jersey.jackson1.Jackson1Feature;
-070import 
org.glassfish.jersey.server.ResourceConfig;
-071import 
org.glassfish.jersey.servlet.ServletContainer;
-072
-073import javax.servlet.DispatcherType;
-074
-075/**
-076 * Main class for launching REST gateway 
as a servlet hosted by Jetty.
-077 * 

-078 * The following options are supported: -079 *

    -080 *
  • -p --port : service port
  • -081 *
  • -ro --readonly : server mode
  • -082 *
-083 */ -084@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) -085public class RESTServer implements Constants { -086 static Log LOG = LogFactory.getLog("RESTServer"); -087 -088 static String REST_CSRF_ENABLED_KEY = "hbase.rest.csrf.enabled"; -089 static boolean REST_CSRF_ENABL

[08/26] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fdcfc8d5/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
index e1e4110..c46e8c5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
@@ -458,3282 +458,3297 @@
 450  
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
 451  protected final static String 
MASTER_HOSTNAME_KEY = "hbase.master.hostname";
 452
-453  /**
-454   * This servers startcode.
-455   */
-456  protected final long startcode;
+453  // HBASE-18226: This config and 
hbase.regionserver.hostname are mutually exclusive.
+454  // Exception will be thrown if both are 
used.
+455  final static String 
RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY =
+456
"hbase.regionserver.hostname.disable.master.reversedns";
 457
 458  /**
-459   * Unique identifier for the cluster we 
are a part of.
+459   * This servers startcode.
 460   */
-461  private String clusterId;
+461  protected final long startcode;
 462
 463  /**
-464   * MX Bean for RegionServerInfo
+464   * Unique identifier for the cluster we 
are a part of.
 465   */
-466  private ObjectName mxBean = null;
+466  private String clusterId;
 467
 468  /**
-469   * Chore to clean periodically the 
moved region list
+469   * MX Bean for RegionServerInfo
 470   */
-471  private MovedRegionsCleaner 
movedRegionsCleaner;
+471  private ObjectName mxBean = null;
 472
-473  // chore for refreshing store files for 
secondary regions
-474  private StorefileRefresherChore 
storefileRefresher;
-475
-476  private RegionServerCoprocessorHost 
rsHost;
+473  /**
+474   * Chore to clean periodically the 
moved region list
+475   */
+476  private MovedRegionsCleaner 
movedRegionsCleaner;
 477
-478  private 
RegionServerProcedureManagerHost rspmHost;
-479
-480  private RegionServerRpcQuotaManager 
rsQuotaManager;
-481  private RegionServerSpaceQuotaManager 
rsSpaceQuotaManager;
+478  // chore for refreshing store files for 
secondary regions
+479  private StorefileRefresherChore 
storefileRefresher;
+480
+481  private RegionServerCoprocessorHost 
rsHost;
 482
-483  /**
-484   * Nonce manager. Nonces are used to 
make operations like increment and append idempotent
-485   * in the case where client doesn't 
receive the response from a successful operation and
-486   * retries. We track the successful ops 
for some time via a nonce sent by client and handle
-487   * duplicate operations (currently, by 
failing them; in future we might use MVCC to return
-488   * result). Nonces are also recovered 
from WAL during, recovery; however, the caveats (from
-489   * HBASE-3787) are:
-490   * - WAL recovery is optimized, and 
under high load we won't read nearly nonce-timeout worth
-491   *   of past records. If we don't read 
the records, we don't read and recover the nonces.
-492   *   Some WALs within nonce-timeout at 
recovery may not even be present due to rolling/cleanup.
-493   * - There's no WAL recovery during 
normal region move, so nonces will not be transfered.
-494   * We can have separate additional 
"Nonce WAL". It will just contain bunch of numbers and
-495   * won't be flushed on main path - 
because WAL itself also contains nonces, if we only flush
-496   * it before memstore flush, for a 
given nonce we will either see it in the WAL (if it was
-497   * never flushed to disk, it will be 
part of recovery), or we'll see it as part of the nonce
-498   * log (or both occasionally, which 
doesn't matter). Nonce log file can be deleted after the
-499   * latest nonce in it expired. It can 
also be recovered during move.
-500   */
-501  final ServerNonceManager 
nonceManager;
-502
-503  private UserProvider userProvider;
-504
-505  protected final RSRpcServices 
rpcServices;
-506
-507  protected BaseCoordinatedStateManager 
csm;
-508
-509  /**
-510   * Configuration manager is used to 
register/deregister and notify the configuration observers
-511   * when the regionserver is notified 
that there was a change in the on disk configs.
-512   */
-513  protected final ConfigurationManager 
configurationManager;
-514
-515  @VisibleForTesting
-516  CompactedHFilesDischarger 
compactedFileDischarger;
-517
-518  private volatile ThroughputController 
flushThroughputController;
+483  private 
RegionServerProcedureManagerHost rspmHost;
+484
+485  private RegionServerRpcQuotaManager 
rsQuotaManager;
+486  private RegionServerSpaceQuotaManager 
rsSpaceQuotaManager;
+487
+488  /**
+489   * Nonce manager. Nonces are used to 
make operations like increment and a

[08/26] hbase-site git commit: Published site at e916b79db58bb9be806a833b2c0e675f1136c15a.

2017-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b75efae/devapidocs/src-html/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.CodeGeneratorResponse.File.Builder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.CodeGeneratorResponse.File.Builder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.CodeGeneratorResponse.File.Builder.html
index e2f3f0c..01af80f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.CodeGeneratorResponse.File.Builder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.CodeGeneratorResponse.File.Builder.html
@@ -22,4268 +22,5414 @@
 014registerAllExtensions(
 015
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
 016  }
-017  public interface 
CodeGeneratorRequestOrBuilder extends
-018  // 
@@protoc_insertion_point(interface_extends:google.protobuf.compiler.CodeGeneratorRequest)
+017  public interface VersionOrBuilder 
extends
+018  // 
@@protoc_insertion_point(interface_extends:google.protobuf.compiler.Version)
 019  
org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
 020
 021/**
-022 * 
-023 * The .proto files that were 
explicitly listed on the command-line.  The
-024 * code generator should generate 
code only for these files.  Each file's
-025 * descriptor will be included in 
proto_file, below.
-026 * 
-027 * -028 * repeated string file_to_generate = 1; -029 */ -030 java.util.List -031getFileToGenerateList(); -032/** -033 *
-034 * The .proto files that were 
explicitly listed on the command-line.  The
-035 * code generator should generate 
code only for these files.  Each file's
-036 * descriptor will be included in 
proto_file, below.
-037 * 
-038 * -039 * repeated string file_to_generate = 1; -040 */ -041int getFileToGenerateCount(); -042/** -043 *
-044 * The .proto files that were 
explicitly listed on the command-line.  The
-045 * code generator should generate 
code only for these files.  Each file's
-046 * descriptor will be included in 
proto_file, below.
-047 * 
-048 * -049 * repeated string file_to_generate = 1; -050 */ -051java.lang.String getFileToGenerate(int index); -052/** -053 *
-054 * The .proto files that were 
explicitly listed on the command-line.  The
-055 * code generator should generate 
code only for these files.  Each file's
-056 * descriptor will be included in 
proto_file, below.
-057 * 
-058 * -059 * repeated string file_to_generate = 1; -060 */ -061 org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString -062getFileToGenerateBytes(int index); -063 -064/** -065 *
-066 * The generator parameter passed on 
the command-line.
-067 * 
-068 * -069 * optional string parameter = 2; -070 */ -071boolean hasParameter(); -072/** -073 *
-074 * The generator parameter passed on 
the command-line.
-075 * 
-076 * -077 * optional string parameter = 2; -078 */ -079java.lang.String getParameter(); -080/** -081 *
-082 * The generator parameter passed on 
the command-line.
-083 * 
-084 * -085 * optional string parameter = 2; -086 */ -087 org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString -088getParameterBytes(); -089 -090/** -091 *
-092 * FileDescriptorProtos for all files 
in files_to_generate and everything
-093 * they import.  The files will 
appear in topological order, so each file
-094 * appears before any file that 
imports it.
-095 * protoc guarantees that all 
proto_files will be written after
-096 * the fields above, even though this 
is not technically guaranteed by the
-097 * protobuf wire format.  This 
theoretically could allow a plugin to stream
-098 * in the FileDescriptorProtos and 
handle them one by one rather than read
-099 * the entire set into memory at 
once.  However, as of this writing, this
-100 * is not similarly optimized on 
protoc's end -- it will store all fields in
-101 * memory at once before sending them 
to the plugin.
-102 * 
-103 * -104 * repeated .google.protobuf.FileDescriptorProto proto_file = 15; -105 */ -106 java.util.List -107getProtoFileList(); -108

[08/26] hbase-site git commit: Published site at 7c54525c89bbbe0c66401813433bfb957e461eac.

2016-03-01 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c115ab43/testdevapidocs/org/apache/hadoop/hbase/rest/TestDeleteRow.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/rest/TestDeleteRow.html 
b/testdevapidocs/org/apache/hadoop/hbase/rest/TestDeleteRow.html
index 2654a57..f46cbf3 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/rest/TestDeleteRow.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/rest/TestDeleteRow.html
@@ -166,7 +166,7 @@ extends RowResourceBase
-afterMethod,
 beforeMethod,
 checkAndDeleteJson,
 checkAndDeleteJson,
 checkAndDeletePB,
 checkAndDeleteValuePB, checkAndDeleteXML,
 checkAndDeleteXML,
 checkAndPutValuePB,
 checkAndPutValuePB,
 checkAndPutValuePB,
 checkAndPutValueXML,
 checkAndPutValueXML,
 checkAndPutValueXML,
 checkValueJSON, 
checkValuePB,
 checkValueXML,
 checkValueXML,
 deleteRow,
 deleteValue,
 getValueJson, 
getValueJson,
 getValuePB,
 getValuePB,
 getValueXML,
 getValueXML,
 getValueXML,
 putValueJson,
 putValueJson,
 putValuePB,
 putValuePB,
 putValueXML,
 putValueXML,
 setUpBeforeClass,
 tearDownAfterClass
+afterMethod,
 beforeMethod,
 checkAndDeleteJson,
 checkAndDeleteJson,
 checkAndDeleteJson,
 checkAndDeletePB, checkAndDeletePB,
 checkAndDeleteValuePB,
 checkAndDeleteXML,
 checkAndDeleteXML,
 checkAndDeleteXML,
 checkAndPutValuePB,
 checkAndPutValuePB,
 checkAndPutValuePB,
 checkAndPutValueXML,
 checkAndPutValueXML,
 checkAndPutValueXML,
 checkValueJSON,
 checkValuePB,
 checkValueXML,
 checkValueXML,
 deleteRow,
 deleteValue,
 getValueJson,
 getValueJson,
 getValuePB, getValuePB,
 getValueXML,
 getValueXML,
 getValueXML,
 putValueJson,
 putValueJson,
 putValuePB,
 putValuePB,
 putValueXML,
 putValueXML,
 setUpBeforeClass,
 tearDownAfterClass
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c115ab43/testdevapidocs/org/apache/hadoop/hbase/rest/TestGetAndPutResource.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/rest/TestGetAndPutResource.html 
b/testdevapidocs/org/apache/hadoop/hbase/rest/TestGetAndPutResource.html
index 37932bb..032e3cf 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/rest/TestGetAndPutResource.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/rest/TestGetAndPutResource.html
@@ -207,46 +207,50 @@ extends 
 void
-testMultipleCellCheckPutPB() 
+testMultipleCellCheckDeletePB() 
 
 
 void
-testMultipleCellCheckPutXML() 
+testMultipleCellCheckPutPB() 
 
 
 void
-testNoSuchCF() 
+testMultipleCellCheckPutXML() 
 
 
 void
-testSingleCellGetJSON() 
+testNoSuchCF() 
 
 
 void
-testSingleCellGetPutBinary() 
+testSingleCellGetJSON() 
 
 
 void
-testSingleCellGetPutPB() 
+testSingleCellGetPutBinary() 
 
 
 void
-testSingleCellGetPutXML() 
+testSingleCellGetPutPB() 
 
 
 void
-testStartEndRowGetPutXML() 
+testSingleCellGetPutXML() 
 
 
 void
-testSuffixGlobbingXML() 
+testStartEndRowGetPutXML() 
 
 
 void
-testSuffixGlobbingXMLWithNewScanner() 
+testSuffixGlobbingXML() 
 
 
 void
+testSuffixGlobbingXMLWithNewScanner() 
+
+
+void
 testURLEncodedKey() 
 
 
@@ -255,7 +259,7 @@ extends RowResourceBase
-afterMethod,
 beforeMethod,
 checkAndDeleteJson,
 checkAndDeleteJson,
 checkAndDeletePB,
 checkAndDeleteValuePB, checkAndDeleteXML,
 checkAndDeleteXML,
 checkAndPutValuePB,
 checkAndPutValuePB,
 checkAndPutValuePB,
 checkAndPutValueXML,
 checkAndPutValueXML,
 checkAndPutValueXML,
 checkValueJSON, 
checkValuePB,
 checkValueXML,
 checkValueXML,
 deleteRow,
 deleteValue,
 getValueJson, 
getValueJson,
 getValuePB,
 getValuePB,
 getValueXML,
 getValueXML,
 getValueXML,
 putValueJson,
 putValueJson,
 putValuePB,
 putValuePB,
 putValueXML,
 putValueXML,
 setUpBeforeClass,
 tearDownAfterClass
+afterMethod,
 beforeMethod,
 checkAndDeleteJson,
 checkAndDeleteJson,
 checkAndDeleteJson,
 checkAndDeletePB, checkAndDeletePB,
 checkAndDeleteValuePB,
 checkAndDeleteXML,
 checkAndDeleteXML,
 checkAndDeleteXML,
 checkAndPutValuePB,
 checkAndPutValuePB,
 checkAndPutValuePB,
 checkAndPutValueXML,
 checkAndPutValueXML,
 checkAndPutValueXML,
 checkValueJSON,
 checkValuePB,
 checkValueXML,
 checkValueXML,
 deleteRow,
 deleteValue,
 getValueJson,
 getValueJson,
 getValuePB, getValuePB,
 getValueXML,
 getValueXML,
 getValueXML,
 putValueJson,
 putValueJson,
 putValuePB,
 putValuePB,
 putValueXML,
 putValueXML,
 setUpBeforeClass,
 tearDownAfterClass
 
 
 
@@ -382,13 +386,27 @@ extends http://docs.oracle.com/javase/7/docs/api/javax/xml/bind/JAXBException.html?is-external=true";
 title="class or interface in javax.xml.bind">JAXBException
 
 
+
+
+
+
+
+testMultipleCellCheckDeletePB
+public void testMultipleCellCheckDeletePB()
+   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException,
+  http://docs.oracle.com/javase/7/docs/api/javax/xml/bind/JAXBException.htm