[24/35] hbase-site git commit: Published site at 8cc56bd18c40ba9a7131336e97c74f8d97d8b2be.

2018-10-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
index 7becf50..5b9d987 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -332,2472 +332,2473 @@
 324  confPrintThreshold = 10;
 325}
 326this.parallelPutCountPrintThreshold = 
confPrintThreshold;
-327LOG.info("Store={},  memstore 
type={}, storagePolicy={}, verifyBulkLoads={}, " +
-328
"parallelPutCountPrintThreshold={}", getColumnFamilyName(),
-329
this.memstore.getClass().getSimpleName(), policyName,
-330this.verifyBulkLoads, 
this.parallelPutCountPrintThreshold);
-331  }
-332
-333  /**
-334   * @return MemStore Instance to use in 
this store.
-335   */
-336  private MemStore getMemstore() {
-337MemStore ms = null;
-338// Check if in-memory-compaction 
configured. Note MemoryCompactionPolicy is an enum!
-339MemoryCompactionPolicy 
inMemoryCompaction = null;
-340if 
(this.getTableName().isSystemTable()) {
-341  inMemoryCompaction = 
MemoryCompactionPolicy.valueOf(
-342  
conf.get("hbase.systemtables.compacting.memstore.type", "NONE"));
-343} else {
-344  inMemoryCompaction = 
family.getInMemoryCompaction();
-345}
-346if (inMemoryCompaction == null) {
-347  inMemoryCompaction =
-348  
MemoryCompactionPolicy.valueOf(conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
-349  
CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT).toUpperCase());
-350}
-351switch (inMemoryCompaction) {
-352  case NONE:
-353ms = 
ReflectionUtils.newInstance(DefaultMemStore.class,
-354new Object[] { conf, 
this.comparator,
-355
this.getHRegion().getRegionServicesForStores()});
-356break;
-357  default:
-358Class? extends 
CompactingMemStore clz = conf.getClass(MEMSTORE_CLASS_NAME,
-359CompactingMemStore.class, 
CompactingMemStore.class);
-360ms = 
ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this,
-361
this.getHRegion().getRegionServicesForStores(), inMemoryCompaction});
-362}
-363return ms;
-364  }
-365
-366  /**
-367   * Creates the cache config.
-368   * @param family The current column 
family.
-369   */
-370  protected void createCacheConf(final 
ColumnFamilyDescriptor family) {
-371this.cacheConf = new 
CacheConfig(conf, family);
-372  }
-373
-374  /**
-375   * Creates the store engine configured 
for the given Store.
-376   * @param store The store. An 
unfortunate dependency needed due to it
-377   *  being passed to 
coprocessors via the compactor.
-378   * @param conf Store configuration.
-379   * @param kvComparator KVComparator for 
storeFileManager.
-380   * @return StoreEngine to use.
-381   */
-382  protected StoreEngine?, ?, ?, ? 
createStoreEngine(HStore store, Configuration conf,
-383  CellComparator kvComparator) throws 
IOException {
-384return StoreEngine.create(store, 
conf, comparator);
-385  }
-386
-387  /**
-388   * @param family
-389   * @return TTL in seconds of the 
specified family
-390   */
-391  public static long 
determineTTLFromFamily(final ColumnFamilyDescriptor family) {
-392// HCD.getTimeToLive returns ttl in 
seconds.  Convert to milliseconds.
-393long ttl = family.getTimeToLive();
-394if (ttl == HConstants.FOREVER) {
-395  // Default is unlimited ttl.
-396  ttl = Long.MAX_VALUE;
-397} else if (ttl == -1) {
-398  ttl = Long.MAX_VALUE;
-399} else {
-400  // Second - ms adjust for user 
data
-401  ttl *= 1000;
-402}
-403return ttl;
-404  }
-405
-406  @Override
-407  public String getColumnFamilyName() {
-408return 
this.family.getNameAsString();
-409  }
-410
-411  @Override
-412  public TableName getTableName() {
-413return 
this.getRegionInfo().getTable();
-414  }
-415
-416  @Override
-417  public FileSystem getFileSystem() {
-418return this.fs.getFileSystem();
-419  }
-420
-421  public HRegionFileSystem 
getRegionFileSystem() {
-422return this.fs;
-423  }
-424
-425  /* Implementation of 
StoreConfigInformation */
-426  @Override
-427  public long getStoreFileTtl() {
-428// TTL only applies if there's no 
MIN_VERSIONs setting on the column.
-429return 
(this.scanInfo.getMinVersions() == 0) ? this.scanInfo.getTtl() : 
Long.MAX_VALUE;
-430  }
-431
-432  @Override
-433  public long getMemStoreFlushSize() {
-434// TODO: Why is this in here?  The 
flushsize of the region rather than the store?  St.Ack
-435return 
this.region.memstoreFlushSize;
-436  }
-437
-438  @Override
-439  public 

[24/35] hbase-site git commit: Published site at 42d5447cfbc593becfc26684e03f482eb3a0fc49.

2018-04-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a610f23a/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationBase.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationBase.html 
b/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationBase.html
index 25f375a..fa615cb 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationBase.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationBase.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":9,"i5":9,"i6":10,"i7":9,"i8":9,"i9":10,"i10":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestReplicationBase
+public class TestReplicationBase
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 This class is only a base for other integration-level 
replication tests.
  Do not add tests here.
@@ -190,17 +190,17 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 noRepfamName
 
 
+protected static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+PEER_ID2
+
+
 protected static byte[]
 row
 
-
+
 protected static https://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer
 scopes
 
-
-static boolean
-seperateOldWALs
-
 
 protected static long
 SLEEP_TIME
@@ -262,31 +262,43 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 cleanUp()
 
 
+protected boolean
+isSerialPeer()
+
+
 protected static void
 loadData(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringprefix,
 byte[]row)
 
-
-static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-params()
-
 
+private boolean
+peerExist(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
+
+
 protected static void
 runSimplePutDeleteTest()
 
-
+
 protected static void
 runSmallBatchTest()
 
-
+
+void
+setUpBase()
+
+
 static void
 setUpBeforeClass()
 
-
+
 static void
 tearDownAfterClass()
 
-
+
+void
+tearDownBase()
+
+
 protected static void
 waitForReplication(intexpectedRows,
   intretries)
@@ -319,7 +331,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -328,7 +340,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 conf1
-protected staticorg.apache.hadoop.conf.Configuration conf1
+protected staticorg.apache.hadoop.conf.Configuration conf1
 
 
 
@@ -337,7 +349,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 conf2
-protected staticorg.apache.hadoop.conf.Configuration conf2
+protected staticorg.apache.hadoop.conf.Configuration conf2
 
 
 
@@ -346,7 +358,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 CONF_WITH_LOCALFS
-protected staticorg.apache.hadoop.conf.Configuration CONF_WITH_LOCALFS
+protected staticorg.apache.hadoop.conf.Configuration CONF_WITH_LOCALFS
 
 
 
@@ -355,7 +367,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 zkw1
-protected staticorg.apache.hadoop.hbase.zookeeper.ZKWatcher zkw1
+protected staticorg.apache.hadoop.hbase.zookeeper.ZKWatcher zkw1
 
 
 
@@ -364,7 +376,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 zkw2
-protected staticorg.apache.hadoop.hbase.zookeeper.ZKWatcher zkw2
+protected staticorg.apache.hadoop.hbase.zookeeper.ZKWatcher zkw2
 
 
 
@@ -373,7 +385,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 admin
-protected 
staticorg.apache.hadoop.hbase.client.replication.ReplicationAdmin admin
+protected 
staticorg.apache.hadoop.hbase.client.replication.ReplicationAdmin admin
 
 
 
@@ -382,7 +394,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 hbaseAdmin
-protected staticorg.apache.hadoop.hbase.client.Admin 

[24/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table 

[24/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
index 6d25806..55fa666 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.LoadQueueItem.html
@@ -370,12 +370,12 @@
 362   * @param regionLocator region 
locator
 363   * @param silence true to ignore 
unmatched column families
 364   * @param copyFile always copy hfiles 
if true
-365   * @return List of filenames which were 
not found
+365   * @return Map of LoadQueueItem to 
region
 366   * @throws TableNotFoundException if 
table does not yet exist
 367   */
-368  public ListString 
doBulkLoad(Mapbyte[], ListPath map, final Admin admin, Table 
table,
-369  RegionLocator regionLocator, 
boolean silence, boolean copyFile)
-370  throws 
TableNotFoundException, IOException {
+368  public MapLoadQueueItem, 
ByteBuffer doBulkLoad(Mapbyte[], ListPath map, final Admin 
admin,
+369  Table table, RegionLocator 
regionLocator, boolean silence, boolean copyFile)
+370  throws TableNotFoundException, 
IOException {
 371if 
(!admin.isTableAvailable(regionLocator.getName())) {
 372  throw new 
TableNotFoundException("Table " + table.getName() + " is not currently 
available.");
 373}
@@ -457,8 +457,8 @@
 449}
 450  }
 451
-452  ListString 
performBulkLoad(final Admin admin, Table table, RegionLocator regionLocator,
-453  DequeLoadQueueItem queue, 
ExecutorService pool,
+452  MapLoadQueueItem, ByteBuffer 
performBulkLoad(final Admin admin, Table table,
+453  RegionLocator regionLocator, 
DequeLoadQueueItem queue, ExecutorService pool,
 454  SecureBulkLoadClient secureClient, 
boolean copyFile) throws IOException {
 455int count = 0;
 456
@@ -472,802 +472,815 @@
 464// fs is the source filesystem
 465
fsDelegationToken.acquireDelegationToken(fs);
 466bulkToken = 
secureClient.prepareBulkLoad(admin.getConnection());
-467PairMultimapByteBuffer, 
LoadQueueItem, ListString pair = null;
+467PairMultimapByteBuffer, 
LoadQueueItem, SetString pair = null;
 468
-469// Assumes that region splits can 
happen while this occurs.
-470while (!queue.isEmpty()) {
-471  // need to reload split keys each 
iteration.
-472  final Pairbyte[][], 
byte[][] startEndKeys = regionLocator.getStartEndKeys();
-473  if (count != 0) {
-474LOG.info("Split occured while 
grouping HFiles, retry attempt " +
-475+ count + " with " + 
queue.size() + " files remaining to group or split");
-476  }
-477
-478  int maxRetries = 
getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
-479  maxRetries = Math.max(maxRetries, 
startEndKeys.getFirst().length + 1);
-480  if (maxRetries != 0  
count = maxRetries) {
-481throw new IOException("Retry 
attempted " + count +
-482" times without completing, 
bailing out");
-483  }
-484  count++;
-485
-486  // Using ByteBuffer for byte[] 
equality semantics
-487  pair = groupOrSplitPhase(table, 
pool, queue, startEndKeys);
-488  MultimapByteBuffer, 
LoadQueueItem regionGroups = pair.getFirst();
-489
-490  if 
(!checkHFilesCountPerRegionPerFamily(regionGroups)) {
-491// Error is logged inside 
checkHFilesCountPerRegionPerFamily.
-492throw new IOException("Trying to 
load more than " + maxFilesPerRegionPerFamily
-493+ " hfiles to one family of 
one region");
-494  }
-495
-496  bulkLoadPhase(table, 
admin.getConnection(), pool, queue, regionGroups, copyFile);
-497
-498  // NOTE: The next iteration's split 
/ group could happen in parallel to
-499  // atomic bulkloads assuming that 
there are splits and no merges, and
-500  // that we can atomically pull out 
the groups we want to retry.
-501}
-502
-503if (!queue.isEmpty()) {
-504  throw new RuntimeException("Bulk 
load aborted with some files not yet loaded."
-505+ "Please check log for more 
details.");
-506}
-507if (pair == null) return null;
-508return pair.getSecond();
-509  }
-510
-511  /**
-512   * Prepare a collection of {@link 
LoadQueueItem} from list of source hfiles contained in the
-513   * passed directory and validates 
whether the prepared queue has all the valid table column
-514   * families in it.
-515   * @param hfilesDir directory 
containing list of hfiles to be loaded into the table
-516   * @param table table to which hfiles 
should be loaded
-517   * @param queue queue which needs to be 
loaded into the table
-518   *