[07/35] hbase-site git commit: Published site at 8cc56bd18c40ba9a7131336e97c74f8d97d8b2be.

2018-10-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperTask.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperTask.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperTask.html
index ce887a2..506bc5c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperTask.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperTask.html
@@ -98,1529 +98,1560 @@
 090import 
org.apache.hadoop.util.GenericOptionsParser;
 091import org.apache.hadoop.util.Tool;
 092import 
org.apache.hadoop.util.ToolRunner;
-093import 
org.apache.yetus.audience.InterfaceAudience;
-094import 
org.apache.zookeeper.KeeperException;
-095import org.apache.zookeeper.ZooKeeper;
-096import 
org.apache.zookeeper.client.ConnectStringParser;
-097import org.apache.zookeeper.data.Stat;
-098import org.slf4j.Logger;
-099import org.slf4j.LoggerFactory;
-100
-101import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-102
-103/**
-104 * HBase Canary Tool, that that can be 
used to do
-105 * "canary monitoring" of a running HBase 
cluster.
+093import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+094import 
org.apache.yetus.audience.InterfaceAudience;
+095import 
org.apache.zookeeper.KeeperException;
+096import org.apache.zookeeper.ZooKeeper;
+097import 
org.apache.zookeeper.client.ConnectStringParser;
+098import org.apache.zookeeper.data.Stat;
+099import org.slf4j.Logger;
+100import org.slf4j.LoggerFactory;
+101
+102import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+103
+104/**
+105 * HBase Canary Tool for "canary 
monitoring" of a running HBase cluster.
 106 *
-107 * Here are three modes
-108 * 1. region mode - Foreach region tries 
to get one row per column family
-109 * and outputs some information about 
failure or latency.
-110 *
-111 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-112 * selected randomly and outputs some 
information about failure or latency.
-113 *
-114 * 3. zookeeper mode - for each zookeeper 
instance, selects a zNode and
-115 * outputs some information about failure 
or latency.
-116 */
-117@InterfaceAudience.Private
-118public final class Canary implements Tool 
{
-119  // Sink interface used by the canary to 
outputs information
-120  public interface Sink {
-121public long getReadFailureCount();
-122public long incReadFailureCount();
-123public MapString,String 
getReadFailures();
-124public void updateReadFailures(String 
regionName, String serverName);
-125public long getWriteFailureCount();
-126public long incWriteFailureCount();
-127public MapString,String 
getWriteFailures();
-128public void 
updateWriteFailures(String regionName, String serverName);
-129  }
-130
-131  // Simple implementation of canary sink 
that allows to plot on
-132  // file or standard output timings or 
failures.
-133  public static class StdOutSink 
implements Sink {
-134private AtomicLong readFailureCount = 
new AtomicLong(0),
-135writeFailureCount = new 
AtomicLong(0);
-136
-137private MapString, String 
readFailures = new ConcurrentHashMap();
-138private MapString, String 
writeFailures = new ConcurrentHashMap();
-139
-140@Override
-141public long getReadFailureCount() {
-142  return readFailureCount.get();
-143}
-144
-145@Override
-146public long incReadFailureCount() {
-147  return 
readFailureCount.incrementAndGet();
-148}
-149
-150@Override
-151public MapString, String 
getReadFailures() {
-152  return readFailures;
-153}
-154
-155@Override
-156public void updateReadFailures(String 
regionName, String serverName) {
-157  readFailures.put(regionName, 
serverName);
-158}
-159
-160@Override
-161public long getWriteFailureCount() 
{
-162  return writeFailureCount.get();
-163}
-164
-165@Override
-166public long incWriteFailureCount() 
{
-167  return 
writeFailureCount.incrementAndGet();
-168}
-169
-170@Override
-171public MapString, String 
getWriteFailures() {
-172  return writeFailures;
-173}
-174
-175@Override
-176public void 
updateWriteFailures(String regionName, String serverName) {
-177  writeFailures.put(regionName, 
serverName);
-178}
-179  }
-180
-181  public static class 
RegionServerStdOutSink extends StdOutSink {
-182
-183public void publishReadFailure(String 
table, String server) {
-184  incReadFailureCount();
-185  LOG.error(String.format("Read from 
table:%s on region server:%s", table, server));
-186}
+107 * There are three modes:
+108 * ol
+109 * liregion mode (Default): For 
each region, try to get one row per column family outputting
+110 * information on failure (ERROR) or else 
the latency.
+111 

[07/35] hbase-site git commit: Published site at 42d5447cfbc593becfc26684e03f482eb3a0fc49.

2018-04-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a610f23a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.ReplicationEndpointReturningFalse.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.ReplicationEndpointReturningFalse.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.ReplicationEndpointReturningFalse.html
index bee8222..7a938de 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.ReplicationEndpointReturningFalse.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.ReplicationEndpointReturningFalse.html
@@ -72,7 +72,7 @@
 064/**
 065 * Tests ReplicationSource and 
ReplicationEndpoint interactions
 066 */
-067@Category({ReplicationTests.class, 
MediumTests.class})
+067@Category({ ReplicationTests.class, 
MediumTests.class })
 068public class TestReplicationEndpoint 
extends TestReplicationBase {
 069
 070  @ClassRule
@@ -86,317 +86,317 @@
 078  @BeforeClass
 079  public static void setUpBeforeClass() 
throws Exception {
 080
TestReplicationBase.setUpBeforeClass();
-081admin.removePeer("2");
-082numRegionServers = 
utility1.getHBaseCluster().getRegionServerThreads().size();
-083  }
-084
-085  @AfterClass
-086  public static void tearDownAfterClass() 
throws Exception {
-087
TestReplicationBase.tearDownAfterClass();
-088// check stop is called
-089
Assert.assertTrue(ReplicationEndpointForTest.stoppedCount.get()  0);
-090  }
-091
-092  @Before
-093  public void setup() throws Exception 
{
-094
ReplicationEndpointForTest.contructedCount.set(0);
-095
ReplicationEndpointForTest.startedCount.set(0);
-096
ReplicationEndpointForTest.replicateCount.set(0);
-097
ReplicationEndpointReturningFalse.replicated.set(false);
-098
ReplicationEndpointForTest.lastEntries = null;
-099final ListRegionServerThread 
rsThreads =
-100
utility1.getMiniHBaseCluster().getRegionServerThreads();
-101for (RegionServerThread rs : 
rsThreads) {
-102  
utility1.getAdmin().rollWALWriter(rs.getRegionServer().getServerName());
-103}
-104// Wait for  all log roll to finish
-105utility1.waitFor(3000, new 
Waiter.ExplainingPredicateException() {
-106  @Override
-107  public boolean evaluate() throws 
Exception {
-108for (RegionServerThread rs : 
rsThreads) {
-109  if 
(!rs.getRegionServer().walRollRequestFinished()) {
-110return false;
-111  }
-112}
-113return true;
-114  }
-115
-116  @Override
-117  public String explainFailure() 
throws Exception {
-118ListString 
logRollInProgressRsList = new ArrayList();
-119for (RegionServerThread rs : 
rsThreads) {
-120  if 
(!rs.getRegionServer().walRollRequestFinished()) {
-121
logRollInProgressRsList.add(rs.getRegionServer().toString());
-122  }
-123}
-124return "Still waiting for log 
roll on regionservers: " + logRollInProgressRsList;
-125  }
-126});
-127  }
-128
-129  @Test
-130  public void 
testCustomReplicationEndpoint() throws Exception {
-131// test installing a custom 
replication endpoint other than the default one.
-132
admin.addPeer("testCustomReplicationEndpoint",
-133new 
ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf1))
-134
.setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()), 
null);
-135
-136// check whether the class has been 
constructed and started
-137Waiter.waitFor(conf1, 6, new 
Waiter.PredicateException() {
-138  @Override
-139  public boolean evaluate() throws 
Exception {
-140return 
ReplicationEndpointForTest.contructedCount.get() = numRegionServers;
-141  }
-142});
-143
-144Waiter.waitFor(conf1, 6, new 
Waiter.PredicateException() {
-145  @Override
-146  public boolean evaluate() throws 
Exception {
-147return 
ReplicationEndpointForTest.startedCount.get() = numRegionServers;
-148  }
-149});
-150
-151Assert.assertEquals(0, 
ReplicationEndpointForTest.replicateCount.get());
-152
-153// now replicate some data.
-154doPut(Bytes.toBytes("row42"));
-155
-156Waiter.waitFor(conf1, 6, new 
Waiter.PredicateException() {
-157  @Override
-158  public boolean evaluate() throws 
Exception {
-159return 
ReplicationEndpointForTest.replicateCount.get() = 1;
-160  }
-161});
-162
-163doAssert(Bytes.toBytes("row42"));
-164
-165
admin.removePeer("testCustomReplicationEndpoint");
-166  }
-167
-168  @Test
-169  public void 
testReplicationEndpointReturnsFalseOnReplicate() throws Exception {
-170Assert.assertEquals(0, 
ReplicationEndpointForTest.replicateCount.get());
-171

[07/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index bf1a2cc..89317aa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829ListFileStatus tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833ListPath paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly 
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 

[07/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
--
diff --git 
a/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html 
b/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
index e0d658a..e348ec6 100644
--- a/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
+++ b/xref-test/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
@@ -34,723 +34,730 @@
 24  import static 
org.junit.Assert.fail;
 25  
 26  import java.io.IOException;
-27  import java.util.ArrayList;
-28  import java.util.List;
-29  import java.util.Locale;
-30  import java.util.Map;
-31  import java.util.TreeMap;
-32
-33  import org.apache.hadoop.conf.Configuration;
-34  import org.apache.hadoop.fs.FSDataOutputStream;
-35  import org.apache.hadoop.fs.FileStatus;
-36  import org.apache.hadoop.fs.FileSystem;
-37  import org.apache.hadoop.fs.Path;
-38  import org.apache.hadoop.hbase.HBaseTestingUtility;
-39  import org.apache.hadoop.hbase.HColumnDescriptor;
-40  import org.apache.hadoop.hbase.HConstants;
-41  import org.apache.hadoop.hbase.HTableDescriptor;
-42  import org.apache.hadoop.hbase.NamespaceDescriptor;
-43  import org.apache.hadoop.hbase.TableName;
-44  import 
org.apache.hadoop.hbase.TableNotFoundException;
-45  import org.apache.hadoop.hbase.client.Table;
-46  import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
-47  import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-48  import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-49  import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-50  import org.apache.hadoop.hbase.io.hfile.HFile;
-51  import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-52  import 
org.apache.hadoop.hbase.regionserver.BloomType;
-53  import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-54  import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
-55  import org.apache.hadoop.hbase.util.Bytes;
-56  import org.apache.hadoop.hbase.util.FSUtils;
-57  import org.apache.hadoop.hbase.util.HFileTestUtil;
-58  import org.junit.AfterClass;
-59  import org.junit.BeforeClass;
-60  import org.junit.Rule;
-61  import org.junit.Test;
-62  import org.junit.experimental.categories.Category;
-63  import org.junit.rules.TestName;
-64
-65  /**
-66   * Test cases for the "load" half of the 
HFileOutputFormat bulk load
-67   * functionality. These tests run faster than the 
full MR cluster
-68   * tests in TestHFileOutputFormat
-69   */
-70  
@Category({MapReduceTests.class, 
LargeTests.class})
-71  public class 
TestLoadIncrementalHFiles
 {
-72@Rule
-73public TestName tn = new TestName();
-74
-75private static final 
byte[] QUALIFIER = Bytes.toBytes("myqual");
-76private static final 
byte[] FAMILY = Bytes.toBytes("myfam");
-77private static final 
String NAMESPACE = "bulkNS";
-78
-79static final 
String EXPECTED_MSG_FOR_NON_EXISTING_FAMILY = "Unmatched family names found";
-80static final 
int MAX_FILES_PER_REGION_PER_FAMILY = 4;
+27  import java.nio.ByteBuffer;
+28  import java.util.ArrayList;
+29  import java.util.Deque;
+30  import java.util.List;
+31  import java.util.Locale;
+32  import java.util.Map;
+33  import java.util.TreeMap;
+34
+35  import org.apache.hadoop.conf.Configuration;
+36  import org.apache.hadoop.fs.FSDataOutputStream;
+37  import org.apache.hadoop.fs.FileStatus;
+38  import org.apache.hadoop.fs.FileSystem;
+39  import org.apache.hadoop.fs.Path;
+40  import org.apache.hadoop.hbase.HBaseTestingUtility;
+41  import org.apache.hadoop.hbase.HColumnDescriptor;
+42  import org.apache.hadoop.hbase.HConstants;
+43  import org.apache.hadoop.hbase.HTableDescriptor;
+44  import org.apache.hadoop.hbase.NamespaceDescriptor;
+45  import org.apache.hadoop.hbase.TableName;
+46  import 
org.apache.hadoop.hbase.TableNotFoundException;
+47  import org.apache.hadoop.hbase.client.Table;
+48  import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
+49  import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+50  import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+51  import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
+52  import org.apache.hadoop.hbase.io.hfile.HFile;
+53  import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
+54  import 
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem;
+55  import 
org.apache.hadoop.hbase.regionserver.BloomType;
+56  import 
org.apache.hadoop.hbase.testclassification.LargeTests;
+57  import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
+58  import org.apache.hadoop.hbase.util.Bytes;
+59  import org.apache.hadoop.hbase.util.FSUtils;
+60  import org.apache.hadoop.hbase.util.HFileTestUtil;
+61  import org.junit.AfterClass;
+62  import org.junit.BeforeClass;
+63  import org.junit.Rule;
+64  import org.junit.Test;
+65  import