[20/35] hbase-site git commit: Published site at 8cc56bd18c40ba9a7131336e97c74f8d97d8b2be.

2018-10-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
index c2698b3..9142fe1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/compactions/Compactor.html
@@ -207,307 +207,309 @@
 199  }
 200  tmp = 
fileInfo.get(TIMERANGE_KEY);
 201  fd.latestPutTs = tmp == null ? 
HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax();
-202  LOG.debug("Compacting {}, 
keycount={}, bloomtype={}, size={}, encoding={}, seqNum={}{}",
-203  (file.getPath() == null? null: 
file.getPath().getName()),
-204  keyCount,
-205  
r.getBloomFilterType().toString(),
-206  
TraditionalBinaryPrefix.long2String(r.length(), "", 1),
-207  
r.getHFileReader().getDataBlockEncoding(),
-208  seqNum,
-209  (allFiles? ", earliestPutTs=" + 
earliestPutTs: ""));
-210}
-211return fd;
-212  }
-213
-214  /**
-215   * Creates file scanners for 
compaction.
-216   * @param filesToCompact Files.
-217   * @return Scanners.
-218   */
-219  private List 
createFileScanners(Collection filesToCompact,
-220  long smallestReadPoint, boolean 
useDropBehind) throws IOException {
-221return 
StoreFileScanner.getScannersForCompaction(filesToCompact, useDropBehind,
-222  smallestReadPoint);
-223  }
-224
-225  private long getSmallestReadPoint() {
-226return 
store.getSmallestReadPoint();
-227  }
-228
-229  protected interface 
InternalScannerFactory {
+202  LOG.debug("Compacting {}, 
keycount={}, bloomtype={}, size={}, "
+203  + "encoding={}, 
compression={}, seqNum={}{}",
+204  (file.getPath() == null? null: 
file.getPath().getName()),
+205  keyCount,
+206  
r.getBloomFilterType().toString(),
+207  
TraditionalBinaryPrefix.long2String(r.length(), "", 1),
+208  
r.getHFileReader().getDataBlockEncoding(),
+209  compactionCompression,
+210  seqNum,
+211  (allFiles? ", earliestPutTs=" + 
earliestPutTs: ""));
+212}
+213return fd;
+214  }
+215
+216  /**
+217   * Creates file scanners for 
compaction.
+218   * @param filesToCompact Files.
+219   * @return Scanners.
+220   */
+221  private List 
createFileScanners(Collection filesToCompact,
+222  long smallestReadPoint, boolean 
useDropBehind) throws IOException {
+223return 
StoreFileScanner.getScannersForCompaction(filesToCompact, useDropBehind,
+224  smallestReadPoint);
+225  }
+226
+227  private long getSmallestReadPoint() {
+228return 
store.getSmallestReadPoint();
+229  }
 230
-231ScanType 
getScanType(CompactionRequestImpl request);
+231  protected interface 
InternalScannerFactory {
 232
-233InternalScanner 
createScanner(ScanInfo scanInfo, List scanners, 
ScanType scanType,
-234FileDetails fd, long 
smallestReadPoint) throws IOException;
-235  }
-236
-237  protected final InternalScannerFactory 
defaultScannerFactory = new InternalScannerFactory() {
+233ScanType 
getScanType(CompactionRequestImpl request);
+234
+235InternalScanner 
createScanner(ScanInfo scanInfo, List scanners, 
ScanType scanType,
+236FileDetails fd, long 
smallestReadPoint) throws IOException;
+237  }
 238
-239@Override
-240public ScanType 
getScanType(CompactionRequestImpl request) {
-241  return request.isAllFiles() ? 
COMPACT_DROP_DELETES : COMPACT_RETAIN_DELETES;
-242}
-243
-244@Override
-245public InternalScanner 
createScanner(ScanInfo scanInfo, List scanners,
-246ScanType scanType, FileDetails 
fd, long smallestReadPoint) throws IOException {
-247  return 
Compactor.this.createScanner(store, scanInfo, scanners, scanType, 
smallestReadPoint,
-248fd.earliestPutTs);
-249}
-250  };
-251
-252  /**
-253   * Creates a writer for a new file in a 
temporary directory.
-254   * @param fd The file details.
-255   * @return Writer for a new StoreFile 
in the tmp dir.
-256   * @throws IOException if creation 
failed
-257   */
-258  protected final StoreFileWriter 
createTmpWriter(FileDetails fd, boolean shouldDropBehind)
-259  throws IOException {
-260// When all MVCC readpoints are 0, 
don't write them.
-261// See HBASE-8166, HBASE-12600, and 
HBASE-13389.
-262return 
store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, true,
-263fd.maxMVCCReadpoint > 0, 
fd.maxTagsLength > 0, shouldDropBehind);
-264  }
-265
-266  private ScanInfo 
preCompactScannerOpen(Compactio

[20/35] hbase-site git commit: Published site at 42d5447cfbc593becfc26684e03f482eb3a0fc49.

2018-04-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a610f23a/testdevapidocs/org/apache/hadoop/hbase/replication/class-use/TestReplicationKillRS.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/replication/class-use/TestReplicationKillRS.html
 
b/testdevapidocs/org/apache/hadoop/hbase/replication/class-use/TestReplicationKillRS.html
index 08e5360..c254da7 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/replication/class-use/TestReplicationKillRS.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/replication/class-use/TestReplicationKillRS.html
@@ -122,11 +122,19 @@
 
 
 class 
+TestReplicationKillMasterRSWithSeparateOldWALs 
+
+
+class 
 TestReplicationKillSlaveRS
 Runs the TestReplicationKillRS test and selects the RS to 
kill in the slave cluster
  Do not add other tests in this class.
 
 
+
+class 
+TestReplicationKillSlaveRSWithSeparateOldWALs 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a610f23a/testdevapidocs/org/apache/hadoop/hbase/replication/class-use/TestReplicationKillSlaveRSWithSeparateOldWALs.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/replication/class-use/TestReplicationKillSlaveRSWithSeparateOldWALs.html
 
b/testdevapidocs/org/apache/hadoop/hbase/replication/class-use/TestReplicationKillSlaveRSWithSeparateOldWALs.html
new file mode 100644
index 000..9a4fd82
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/replication/class-use/TestReplicationKillSlaveRSWithSeparateOldWALs.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.replication.TestReplicationKillSlaveRSWithSeparateOldWALs
 (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.replication.TestReplicationKillSlaveRSWithSeparateOldWALs
+
+No usage of 
org.apache.hadoop.hbase.replication.TestReplicationKillSlaveRSWithSeparateOldWALs
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a610f23a/testdevapidocs/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.html
 
b/testdevapidocs/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.html
index da9a73a..13a7e7c 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithMultipleAsyncWAL.html
@@ -164,7 +164,7 @@ extends TestReplicationBase
-admin,
 CONF_WITH_LOCALFS,
 conf1,
 conf2,
 famName,
 hbaseAdmin,
 htable1,
 htable2,
 NB_RETRIES, NB_ROWS_IN_BATCH,
 NB_ROWS_IN_BIG_BATCH,
 noRepfamName,
 row,
 scopes,
 seperateOldWALs,
 SLEEP_TIME,
 tableName,
 utility1,
 utility2,
 zkw1,
 zkw2
+admin,
 CONF_WITH_LOCALFS,
 conf1,
 conf2,
 famName,
 hbaseAdmin,
 htable1,
 htable2,
 NB_RETRIES, NB_ROWS_IN_BATCH,
 NB_ROWS_IN_BIG_BATCH,
 noRepfamName,
 PEER_ID2,
 row,
 scopes,
 SLEEP_TIME,
 tableName,
 utility1, utility2,
 zkw1,
 zkw2
 
 
 
@@ -214,7 +214,7 @@ extends TestReplicationBase
-cleanUp,
 loadData,
 params,
 runSimplePutDeleteTest,
 runSmallBatchTest,
 waitForReplication
+cleanUp,
 isSerialPeer,
 loadData,
 runSimplePutDeleteTest,
 runSmallBatchTest,
 setUpBase,
 tearDownBase,
 waitForReplication
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a610f23a/testdevapidocs/org/apache/hadoop/hbase/replication/multiwal/TestReplicationEndpointWithM

[20/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816&& 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829List tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821&& 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833List paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly &&
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1

[20/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
index 36f2731..23fce63 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
@@ -34,9 +34,9 @@
 026@org.jamon.annotations.Argument(name 
= "regionServer", type = "HRegionServer")},
 027  optionalArguments = {
 028@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
-029@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
-030@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-031@org.jamon.annotations.Argument(name 
= "filter", type = "String")})
+029@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+030@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
+031@org.jamon.annotations.Argument(name 
= "format", type = "String")})
 032public class RSStatusTmpl
 033  extends 
org.jamon.AbstractTemplateProxy
 034{
@@ -94,57 +94,57 @@
 086  return m_bcv__IsNotDefault;
 087}
 088private boolean 
m_bcv__IsNotDefault;
-089// 23, 1
-090public void setBcn(String bcn)
+089// 21, 1
+090public void setFilter(String 
filter)
 091{
-092  // 23, 1
-093  m_bcn = bcn;
-094  m_bcn__IsNotDefault = true;
+092  // 21, 1
+093  m_filter = filter;
+094  m_filter__IsNotDefault = true;
 095}
-096public String getBcn()
+096public String getFilter()
 097{
-098  return m_bcn;
+098  return m_filter;
 099}
-100private String m_bcn;
-101public boolean 
getBcn__IsNotDefault()
+100private String m_filter;
+101public boolean 
getFilter__IsNotDefault()
 102{
-103  return m_bcn__IsNotDefault;
+103  return m_filter__IsNotDefault;
 104}
-105private boolean 
m_bcn__IsNotDefault;
-106// 22, 1
-107public void setFormat(String 
format)
+105private boolean 
m_filter__IsNotDefault;
+106// 23, 1
+107public void setBcn(String bcn)
 108{
-109  // 22, 1
-110  m_format = format;
-111  m_format__IsNotDefault = true;
+109  // 23, 1
+110  m_bcn = bcn;
+111  m_bcn__IsNotDefault = true;
 112}
-113public String getFormat()
+113public String getBcn()
 114{
-115  return m_format;
+115  return m_bcn;
 116}
-117private String m_format;
-118public boolean 
getFormat__IsNotDefault()
+117private String m_bcn;
+118public boolean 
getBcn__IsNotDefault()
 119{
-120  return m_format__IsNotDefault;
+120  return m_bcn__IsNotDefault;
 121}
-122private boolean 
m_format__IsNotDefault;
-123// 21, 1
-124public void setFilter(String 
filter)
+122private boolean 
m_bcn__IsNotDefault;
+123// 22, 1
+124public void setFormat(String 
format)
 125{
-126  // 21, 1
-127  m_filter = filter;
-128  m_filter__IsNotDefault = true;
+126  // 22, 1
+127  m_format = format;
+128  m_format__IsNotDefault = true;
 129}
-130public String getFilter()
+130public String getFormat()
 131{
-132  return m_filter;
+132  return m_format;
 133}
-134private String m_filter;
-135public boolean 
getFilter__IsNotDefault()
+134private String m_format;
+135public boolean 
getFormat__IsNotDefault()
 136{
-137  return m_filter__IsNotDefault;
+137  return m_format__IsNotDefault;
 138}
-139private boolean 
m_filter__IsNotDefault;
+139private boolean 
m_format__IsNotDefault;
 140  }
 141  @Override
 142  protected 
org.jamon.AbstractTemplateProxy.ImplData makeImplData()
@@ -163,24 +163,24 @@
 155return this;
 156  }
 157  
-158  protected String bcn;
-159  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcn(String p_bcn)
+158  protected String filter;
+159  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFilter(String 
p_filter)
 160  {
-161(getImplData()).setBcn(p_bcn);
+161
(getImplData()).setFilter(p_filter);
 162return this;
 163  }
 164  
-165  protected String format;
-166  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String 
p_format)
+165  protected String bcn;
+166  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcn(String p_bcn)
 167  {
-168
(getImplData()).setFormat(p_format);
+168(getImplData()).setBcn(p_bcn);
 169return this;
 170  }
 171  
-172  protected String filter;
-173  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFilter(String 
p_filter)
+172  pr