[22/35] hbase-site git commit: Published site at 8cc56bd18c40ba9a7131336e97c74f8d97d8b2be.

2018-10-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.FileDetails.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.FileDetails.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.FileDetails.html
index c2698b3..9142fe1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.FileDetails.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.FileDetails.html
@@ -207,307 +207,309 @@
 199  }
 200  tmp = 
fileInfo.get(TIMERANGE_KEY);
 201  fd.latestPutTs = tmp == null ? 
HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax();
-202  LOG.debug("Compacting {}, 
keycount={}, bloomtype={}, size={}, encoding={}, seqNum={}{}",
-203  (file.getPath() == null? null: 
file.getPath().getName()),
-204  keyCount,
-205  
r.getBloomFilterType().toString(),
-206  
TraditionalBinaryPrefix.long2String(r.length(), "", 1),
-207  
r.getHFileReader().getDataBlockEncoding(),
-208  seqNum,
-209  (allFiles? ", earliestPutTs=" + 
earliestPutTs: ""));
-210}
-211return fd;
-212  }
-213
-214  /**
-215   * Creates file scanners for 
compaction.
-216   * @param filesToCompact Files.
-217   * @return Scanners.
-218   */
-219  private ListStoreFileScanner 
createFileScanners(CollectionHStoreFile filesToCompact,
-220  long smallestReadPoint, boolean 
useDropBehind) throws IOException {
-221return 
StoreFileScanner.getScannersForCompaction(filesToCompact, useDropBehind,
-222  smallestReadPoint);
-223  }
-224
-225  private long getSmallestReadPoint() {
-226return 
store.getSmallestReadPoint();
-227  }
-228
-229  protected interface 
InternalScannerFactory {
+202  LOG.debug("Compacting {}, 
keycount={}, bloomtype={}, size={}, "
+203  + "encoding={}, 
compression={}, seqNum={}{}",
+204  (file.getPath() == null? null: 
file.getPath().getName()),
+205  keyCount,
+206  
r.getBloomFilterType().toString(),
+207  
TraditionalBinaryPrefix.long2String(r.length(), "", 1),
+208  
r.getHFileReader().getDataBlockEncoding(),
+209  compactionCompression,
+210  seqNum,
+211  (allFiles? ", earliestPutTs=" + 
earliestPutTs: ""));
+212}
+213return fd;
+214  }
+215
+216  /**
+217   * Creates file scanners for 
compaction.
+218   * @param filesToCompact Files.
+219   * @return Scanners.
+220   */
+221  private ListStoreFileScanner 
createFileScanners(CollectionHStoreFile filesToCompact,
+222  long smallestReadPoint, boolean 
useDropBehind) throws IOException {
+223return 
StoreFileScanner.getScannersForCompaction(filesToCompact, useDropBehind,
+224  smallestReadPoint);
+225  }
+226
+227  private long getSmallestReadPoint() {
+228return 
store.getSmallestReadPoint();
+229  }
 230
-231ScanType 
getScanType(CompactionRequestImpl request);
+231  protected interface 
InternalScannerFactory {
 232
-233InternalScanner 
createScanner(ScanInfo scanInfo, ListStoreFileScanner scanners, 
ScanType scanType,
-234FileDetails fd, long 
smallestReadPoint) throws IOException;
-235  }
-236
-237  protected final InternalScannerFactory 
defaultScannerFactory = new InternalScannerFactory() {
+233ScanType 
getScanType(CompactionRequestImpl request);
+234
+235InternalScanner 
createScanner(ScanInfo scanInfo, ListStoreFileScanner scanners, 
ScanType scanType,
+236FileDetails fd, long 
smallestReadPoint) throws IOException;
+237  }
 238
-239@Override
-240public ScanType 
getScanType(CompactionRequestImpl request) {
-241  return request.isAllFiles() ? 
COMPACT_DROP_DELETES : COMPACT_RETAIN_DELETES;
-242}
-243
-244@Override
-245public InternalScanner 
createScanner(ScanInfo scanInfo, ListStoreFileScanner scanners,
-246ScanType scanType, FileDetails 
fd, long smallestReadPoint) throws IOException {
-247  return 
Compactor.this.createScanner(store, scanInfo, scanners, scanType, 
smallestReadPoint,
-248fd.earliestPutTs);
-249}
-250  };
-251
-252  /**
-253   * Creates a writer for a new file in a 
temporary directory.
-254   * @param fd The file details.
-255   * @return Writer for a new StoreFile 
in the tmp dir.
-256   * @throws IOException if creation 
failed
-257   */
-258  protected final StoreFileWriter 
createTmpWriter(FileDetails fd, boolean shouldDropBehind)
-259  throws IOException {
-260// When all MVCC readpoints are 0, 
don't write them.
-261// See HBASE-8166, HBASE-12600, and 
HBASE-13389.
-262return 
store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, true,
-263fd.maxMVCCReadpoint  0, 
fd.maxTagsLength  0, shouldDropBehind);
-264  }
-265
-266  

[22/35] hbase-site git commit: Published site at 42d5447cfbc593becfc26684e03f482eb3a0fc49.

2018-04-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a610f23a/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.html
 
b/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.html
new file mode 100644
index 000..6ebf6d8
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/replication/TestReplicationKillMasterRSWithSeparateOldWALs.html
@@ -0,0 +1,362 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestReplicationKillMasterRSWithSeparateOldWALs (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.replication
+Class TestReplicationKillMasterRSWithSeparateOldWALs
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.replication.TestReplicationBase
+
+
+org.apache.hadoop.hbase.replication.TestReplicationKillRS
+
+
+org.apache.hadoop.hbase.replication.TestReplicationKillMasterRSWithSeparateOldWALs
+
+
+
+
+
+
+
+
+
+
+
+
+public class TestReplicationKillMasterRSWithSeparateOldWALs
+extends TestReplicationKillRS
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static HBaseClassTestRule
+CLASS_RULE
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.replication.TestReplicationBase
+admin,
 CONF_WITH_LOCALFS,
 conf1,
 conf2,
 famName,
 hbaseAdmin,
 htable1,
 htable2,
 NB_RETRIES,
 NB_ROWS_IN_BATCH,
 NB_ROWS_IN_BIG_BATCH,
 noRepfamName,
 PEER_ID2,
 row,
 scopes,
 SLEEP_TIME,
 tableName,
 utility1, utility2,
 zkw1,
 zkw2
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TestReplicationKillMasterRSWithSeparateOldWALs()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+void
+killOneMasterRS()
+
+
+static void
+setUpBeforeClass()
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.replication.TestReplicationKillRS
+loadTableAndKillRS
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.replication.TestReplicationBase
+cleanUp,
 isSerialPeer,
 loadData,
 runSimplePutDeleteTest,
 runSmallBatchTest,
 setUpBase,
 tearDownAfterClass,
 tearDownBase
 , waitForReplication
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, 

[22/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1875}

[22/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
index f9833df..58c6a9c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.html
@@ -259,145 +259,153 @@
 251  }
 252
 253  /**
-254   * It "atomically" copies all the wals 
queues from another region server and returns them all
-255   * sorted per peer cluster (appended 
with the dead server's znode).
+254   * It "atomically" copies one peer's 
wals queue from another dead region server and returns them
+255   * all sorted. The new peer id is equal 
to the old peer id appended with the dead server's znode.
 256   * @param znode pertaining to the 
region server to copy the queues from
-257   */
-258  private PairString, 
SortedSetString moveQueueUsingMulti(String znode, String peerId) 
{
-259try {
-260  // hbase/replication/rs/deadrs
-261  String deadRSZnodePath = 
ZKUtil.joinZNode(this.queuesZNode, znode);
-262  ListZKUtilOp listOfOps = 
new ArrayList();
-263  ReplicationQueueInfo 
replicationQueueInfo = new ReplicationQueueInfo(peerId);
-264  if 
(!peerExists(replicationQueueInfo.getPeerId())) {
-265// the orphaned queues must be 
moved, otherwise the delete op of dead rs will fail,
-266// this will cause the whole 
multi op fail.
-267// NodeFailoverWorker will skip 
the orphaned queues.
-268LOG.warn("Peer " + peerId +
-269" didn't exist, will move its 
queue to avoid the failure of multi op");
-270  }
-271  String newPeerId = peerId + "-" + 
znode;
-272  String newPeerZnode = 
ZKUtil.joinZNode(this.myQueuesZnode, newPeerId);
-273  // check the logs queue for the old 
peer cluster
-274  String oldClusterZnode = 
ZKUtil.joinZNode(deadRSZnodePath, peerId);
-275  ListString wals = 
ZKUtil.listChildrenNoWatch(this.zookeeper, oldClusterZnode);
-276  SortedSetString logQueue = 
new TreeSet();
-277  if (wals == null || wals.size() == 
0) {
-278
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
-279  } else {
-280// create the new cluster znode
-281ZKUtilOp op = 
ZKUtilOp.createAndFailSilent(newPeerZnode, HConstants.EMPTY_BYTE_ARRAY);
-282listOfOps.add(op);
-283// get the offset of the logs and 
set it to new znodes
-284for (String wal : wals) {
-285  String oldWalZnode = 
ZKUtil.joinZNode(oldClusterZnode, wal);
-286  byte[] logOffset = 
ZKUtil.getData(this.zookeeper, oldWalZnode);
-287  LOG.debug("Creating " + wal + " 
with data " + Bytes.toString(logOffset));
-288  String newLogZnode = 
ZKUtil.joinZNode(newPeerZnode, wal);
-289  
listOfOps.add(ZKUtilOp.createAndFailSilent(newLogZnode, logOffset));
-290  
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldWalZnode));
-291  logQueue.add(wal);
-292}
-293// add delete op for peer
-294
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
-295
-296if (LOG.isTraceEnabled())
-297  LOG.trace(" The multi list size 
is: " + listOfOps.size());
-298  }
-299  
ZKUtil.multiOrSequential(this.zookeeper, listOfOps, false);
-300  if (LOG.isTraceEnabled())
-301LOG.trace("Atomically moved the 
dead regionserver logs. ");
-302  return new Pair(newPeerId, 
logQueue);
-303} catch (KeeperException e) {
-304  // Multi call failed; it looks like 
some other regionserver took away the logs.
-305  LOG.warn("Got exception in 
copyQueuesFromRSUsingMulti: ", e);
-306} catch (InterruptedException e) {
-307  LOG.warn("Got exception in 
copyQueuesFromRSUsingMulti: ", e);
-308  
Thread.currentThread().interrupt();
-309}
-310return null;
-311  }
-312
-313  @Override
-314  public void addHFileRefs(String peerId, 
ListString files) throws ReplicationException {
-315String peerZnode = 
ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
-316boolean debugEnabled = 
LOG.isDebugEnabled();
-317if (debugEnabled) {
-318  LOG.debug("Adding hfile references 
" + files + " in queue " + peerZnode);
-319}
-320ListZKUtilOp listOfOps = new 
ArrayListZKUtil.ZKUtilOp();
-321int size = files.size();
-322for (int i = 0; i  size; i++) {
-323  
listOfOps.add(ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(peerZnode, 
files.get(i)),
-324HConstants.EMPTY_BYTE_ARRAY));
-325}
-326if (debugEnabled) {
-327  LOG.debug(" The multi list size for 
adding hfile references in zk for node " + peerZnode
-328  + " is " +