[10/35] hbase-site git commit: Published site at 8cc56bd18c40ba9a7131336e97c74f8d97d8b2be.

2018-10-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.StdOutSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.StdOutSink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.StdOutSink.html
index ce887a2..506bc5c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.StdOutSink.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.StdOutSink.html
@@ -98,1529 +98,1560 @@
 090import 
org.apache.hadoop.util.GenericOptionsParser;
 091import org.apache.hadoop.util.Tool;
 092import 
org.apache.hadoop.util.ToolRunner;
-093import 
org.apache.yetus.audience.InterfaceAudience;
-094import 
org.apache.zookeeper.KeeperException;
-095import org.apache.zookeeper.ZooKeeper;
-096import 
org.apache.zookeeper.client.ConnectStringParser;
-097import org.apache.zookeeper.data.Stat;
-098import org.slf4j.Logger;
-099import org.slf4j.LoggerFactory;
-100
-101import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-102
-103/**
-104 * HBase Canary Tool, that that can be 
used to do
-105 * "canary monitoring" of a running HBase 
cluster.
+093import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+094import 
org.apache.yetus.audience.InterfaceAudience;
+095import 
org.apache.zookeeper.KeeperException;
+096import org.apache.zookeeper.ZooKeeper;
+097import 
org.apache.zookeeper.client.ConnectStringParser;
+098import org.apache.zookeeper.data.Stat;
+099import org.slf4j.Logger;
+100import org.slf4j.LoggerFactory;
+101
+102import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+103
+104/**
+105 * HBase Canary Tool for "canary 
monitoring" of a running HBase cluster.
 106 *
-107 * Here are three modes
-108 * 1. region mode - Foreach region tries 
to get one row per column family
-109 * and outputs some information about 
failure or latency.
-110 *
-111 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-112 * selected randomly and outputs some 
information about failure or latency.
-113 *
-114 * 3. zookeeper mode - for each zookeeper 
instance, selects a zNode and
-115 * outputs some information about failure 
or latency.
-116 */
-117@InterfaceAudience.Private
-118public final class Canary implements Tool 
{
-119  // Sink interface used by the canary to 
outputs information
-120  public interface Sink {
-121public long getReadFailureCount();
-122public long incReadFailureCount();
-123public Map 
getReadFailures();
-124public void updateReadFailures(String 
regionName, String serverName);
-125public long getWriteFailureCount();
-126public long incWriteFailureCount();
-127public Map 
getWriteFailures();
-128public void 
updateWriteFailures(String regionName, String serverName);
-129  }
-130
-131  // Simple implementation of canary sink 
that allows to plot on
-132  // file or standard output timings or 
failures.
-133  public static class StdOutSink 
implements Sink {
-134private AtomicLong readFailureCount = 
new AtomicLong(0),
-135writeFailureCount = new 
AtomicLong(0);
-136
-137private Map 
readFailures = new ConcurrentHashMap<>();
-138private Map 
writeFailures = new ConcurrentHashMap<>();
-139
-140@Override
-141public long getReadFailureCount() {
-142  return readFailureCount.get();
-143}
-144
-145@Override
-146public long incReadFailureCount() {
-147  return 
readFailureCount.incrementAndGet();
-148}
-149
-150@Override
-151public Map 
getReadFailures() {
-152  return readFailures;
-153}
-154
-155@Override
-156public void updateReadFailures(String 
regionName, String serverName) {
-157  readFailures.put(regionName, 
serverName);
-158}
-159
-160@Override
-161public long getWriteFailureCount() 
{
-162  return writeFailureCount.get();
-163}
-164
-165@Override
-166public long incWriteFailureCount() 
{
-167  return 
writeFailureCount.incrementAndGet();
-168}
-169
-170@Override
-171public Map 
getWriteFailures() {
-172  return writeFailures;
-173}
-174
-175@Override
-176public void 
updateWriteFailures(String regionName, String serverName) {
-177  writeFailures.put(regionName, 
serverName);
-178}
-179  }
-180
-181  public static class 
RegionServerStdOutSink extends StdOutSink {
-182
-183public void publishReadFailure(String 
table, String server) {
-184  incReadFailureCount();
-185  LOG.error(String.format("Read from 
table:%s on region server:%s", table, server));
-186}
+107 * There are three modes:
+108 * 
    +109 *
  1. region mode (Default): For each region, try to get one row per column family outputting +110 * information on failure (ERROR) or else the latency. +

[10/35] hbase-site git commit: Published site at 42d5447cfbc593becfc26684e03f482eb3a0fc49.

2018-04-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a610f23a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator.html
index bee8222..7a938de 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator.html
@@ -72,7 +72,7 @@
 064/**
 065 * Tests ReplicationSource and 
ReplicationEndpoint interactions
 066 */
-067@Category({ReplicationTests.class, 
MediumTests.class})
+067@Category({ ReplicationTests.class, 
MediumTests.class })
 068public class TestReplicationEndpoint 
extends TestReplicationBase {
 069
 070  @ClassRule
@@ -86,317 +86,317 @@
 078  @BeforeClass
 079  public static void setUpBeforeClass() 
throws Exception {
 080
TestReplicationBase.setUpBeforeClass();
-081admin.removePeer("2");
-082numRegionServers = 
utility1.getHBaseCluster().getRegionServerThreads().size();
-083  }
-084
-085  @AfterClass
-086  public static void tearDownAfterClass() 
throws Exception {
-087
TestReplicationBase.tearDownAfterClass();
-088// check stop is called
-089
Assert.assertTrue(ReplicationEndpointForTest.stoppedCount.get() > 0);
-090  }
-091
-092  @Before
-093  public void setup() throws Exception 
{
-094
ReplicationEndpointForTest.contructedCount.set(0);
-095
ReplicationEndpointForTest.startedCount.set(0);
-096
ReplicationEndpointForTest.replicateCount.set(0);
-097
ReplicationEndpointReturningFalse.replicated.set(false);
-098
ReplicationEndpointForTest.lastEntries = null;
-099final List 
rsThreads =
-100
utility1.getMiniHBaseCluster().getRegionServerThreads();
-101for (RegionServerThread rs : 
rsThreads) {
-102  
utility1.getAdmin().rollWALWriter(rs.getRegionServer().getServerName());
-103}
-104// Wait for  all log roll to finish
-105utility1.waitFor(3000, new 
Waiter.ExplainingPredicate() {
-106  @Override
-107  public boolean evaluate() throws 
Exception {
-108for (RegionServerThread rs : 
rsThreads) {
-109  if 
(!rs.getRegionServer().walRollRequestFinished()) {
-110return false;
-111  }
-112}
-113return true;
-114  }
-115
-116  @Override
-117  public String explainFailure() 
throws Exception {
-118List 
logRollInProgressRsList = new ArrayList<>();
-119for (RegionServerThread rs : 
rsThreads) {
-120  if 
(!rs.getRegionServer().walRollRequestFinished()) {
-121
logRollInProgressRsList.add(rs.getRegionServer().toString());
-122  }
-123}
-124return "Still waiting for log 
roll on regionservers: " + logRollInProgressRsList;
-125  }
-126});
-127  }
-128
-129  @Test
-130  public void 
testCustomReplicationEndpoint() throws Exception {
-131// test installing a custom 
replication endpoint other than the default one.
-132
admin.addPeer("testCustomReplicationEndpoint",
-133new 
ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf1))
-134
.setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()), 
null);
-135
-136// check whether the class has been 
constructed and started
-137Waiter.waitFor(conf1, 6, new 
Waiter.Predicate() {
-138  @Override
-139  public boolean evaluate() throws 
Exception {
-140return 
ReplicationEndpointForTest.contructedCount.get() >= numRegionServers;
-141  }
-142});
-143
-144Waiter.waitFor(conf1, 6, new 
Waiter.Predicate() {
-145  @Override
-146  public boolean evaluate() throws 
Exception {
-147return 
ReplicationEndpointForTest.startedCount.get() >= numRegionServers;
-148  }
-149});
-150
-151Assert.assertEquals(0, 
ReplicationEndpointForTest.replicateCount.get());
-152
-153// now replicate some data.
-154doPut(Bytes.toBytes("row42"));
-155
-156Waiter.waitFor(conf1, 6, new 
Waiter.Predicate() {
-157  @Override
-158  public boolean evaluate() throws 
Exception {
-159return 
ReplicationEndpointForTest.replicateCount.get() >= 1;
-160  }
-161});
-162
-163doAssert(Bytes.toBytes("row42"));
-164
-165
admin.removePeer("testCustomReplicationEndpoint");
-166  }
-167
-168  @Test
-169  public void 
testReplicationEndpo

[10/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index bf1a2cc..89317aa 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -1813,3325 +1813,3330 @@
 1805  private void loadTableStates()
 1806  throws IOException {
 1807tableStates = 
MetaTableAccessor.getTableStates(connection);
-1808  }
-1809
-1810  /**
-1811   * Check if the specified region's 
table is disabled.
-1812   * @param tableName table to check 
status of
-1813   */
-1814  private boolean 
isTableDisabled(TableName tableName) {
-1815return 
tableStates.containsKey(tableName)
-1816&& 
tableStates.get(tableName)
-1817
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
-1818  }
-1819
-1820  /**
-1821   * Scan HDFS for all regions, 
recording their information into
-1822   * regionInfoMap
-1823   */
-1824  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
-1825Path rootDir = 
FSUtils.getRootDir(getConf());
-1826FileSystem fs = 
rootDir.getFileSystem(getConf());
-1827
-1828// list all tables from HDFS
-1829List tableDirs = 
Lists.newArrayList();
-1830
-1831boolean foundVersionFile = 
fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
+1808// Add hbase:meta so this tool keeps 
working. In hbase2, meta is always enabled though it
+1809// has no entry in the table states. 
HBCK doesn't work right w/ hbase2 but just do this in
+1810// meantime.
+1811
this.tableStates.put(TableName.META_TABLE_NAME,
+1812new 
TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+1813  }
+1814
+1815  /**
+1816   * Check if the specified region's 
table is disabled.
+1817   * @param tableName table to check 
status of
+1818   */
+1819  private boolean 
isTableDisabled(TableName tableName) {
+1820return 
tableStates.containsKey(tableName)
+1821&& 
tableStates.get(tableName)
+1822
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
+1823  }
+1824
+1825  /**
+1826   * Scan HDFS for all regions, 
recording their information into
+1827   * regionInfoMap
+1828   */
+1829  public void loadHdfsRegionDirs() 
throws IOException, InterruptedException {
+1830Path rootDir = 
FSUtils.getRootDir(getConf());
+1831FileSystem fs = 
rootDir.getFileSystem(getConf());
 1832
-1833List paths = 
FSUtils.getTableDirs(fs, rootDir);
-1834for (Path path : paths) {
-1835  TableName tableName = 
FSUtils.getTableName(path);
-1836   if ((!checkMetaOnly &&
-1837   isTableIncluded(tableName)) 
||
-1838   
tableName.equals(TableName.META_TABLE_NAME)) {
-1839 
tableDirs.add(fs.getFileStatus(path));
-1840   }
-1841}
-1842
-1843// verify that version file exists
-1844if (!foundVersionFile) {
-1845  
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
-1846  "Version file does not exist 
in root dir " + rootDir);
-1847  if (shouldFixVersionFile()) {
-1848LOG.info("Trying to create a new 
" + HConstants.VERSION_FILE_NAME
-1849+ " file.");
-1850setShouldRerun();
-1851FSUtils.setVersion(fs, rootDir, 
getConf().getInt(
-1852
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
-1853
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
-1854
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
-1855  }
-1856}
-1857
-1858// Avoid multithreading at 
table-level because already multithreaded internally at
-1859// region-level.  Additionally 
multithreading at table-level can lead to deadlock
-1860// if there are many tables in the 
cluster.  Since there are a limited # of threads
-1861// in the executor's thread pool and 
if we multithread at the table-level by putting
-1862// WorkItemHdfsDir callables into 
the executor, then we will have some threads in the
-1863// executor tied up solely in 
waiting for the tables' region-level calls to complete.
-1864// If there are enough tables then 
there will be no actual threads in the pool left
-1865// for the region-level callables to 
be serviced.
-1866for (FileStatus tableDir : 
tableDirs) {
-1867  LOG.debug("Loading region dirs 
from " +tableDir.getPath());
-1868  WorkItemHdfsDir item = new 
WorkItemHdfsDir(fs, errors, tableDir);
-1869  try {
-1870item.call();
-1871  } catch (ExecutionException e) {
-1872LOG.warn("Could not completely 
load table dir " +
-1873tableDir.getPath(), 
e.getCause());
-1874  }
-1875}
-1876erro

[10/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
index ee9b740..57575c0 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.html
@@ -32,723 +32,730 @@
 024import static org.junit.Assert.fail;
 025
 026import java.io.IOException;
-027import java.util.ArrayList;
-028import java.util.List;
-029import java.util.Locale;
-030import java.util.Map;
-031import java.util.TreeMap;
-032
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.FileStatus;
-036import org.apache.hadoop.fs.FileSystem;
-037import org.apache.hadoop.fs.Path;
-038import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-039import 
org.apache.hadoop.hbase.HColumnDescriptor;
-040import 
org.apache.hadoop.hbase.HConstants;
-041import 
org.apache.hadoop.hbase.HTableDescriptor;
-042import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.TableNotFoundException;
-045import 
org.apache.hadoop.hbase.client.Table;
-046import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
-047import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-048import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-049import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-050import 
org.apache.hadoop.hbase.io.hfile.HFile;
-051import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-052import 
org.apache.hadoop.hbase.regionserver.BloomType;
-053import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-054import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
-055import 
org.apache.hadoop.hbase.util.Bytes;
-056import 
org.apache.hadoop.hbase.util.FSUtils;
-057import 
org.apache.hadoop.hbase.util.HFileTestUtil;
-058import org.junit.AfterClass;
-059import org.junit.BeforeClass;
-060import org.junit.Rule;
-061import org.junit.Test;
-062import 
org.junit.experimental.categories.Category;
-063import org.junit.rules.TestName;
-064
-065/**
-066 * Test cases for the "load" half of the 
HFileOutputFormat bulk load
-067 * functionality. These tests run faster 
than the full MR cluster
-068 * tests in TestHFileOutputFormat
-069 */
-070@Category({MapReduceTests.class, 
LargeTests.class})
-071public class TestLoadIncrementalHFiles 
{
-072  @Rule
-073  public TestName tn = new TestName();
-074
-075  private static final byte[] QUALIFIER = 
Bytes.toBytes("myqual");
-076  private static final byte[] FAMILY = 
Bytes.toBytes("myfam");
-077  private static final String NAMESPACE = 
"bulkNS";
-078
-079  static final String 
EXPECTED_MSG_FOR_NON_EXISTING_FAMILY = "Unmatched family names found";
-080  static final int 
MAX_FILES_PER_REGION_PER_FAMILY = 4;
+027import java.nio.ByteBuffer;
+028import java.util.ArrayList;
+029import java.util.Deque;
+030import java.util.List;
+031import java.util.Locale;
+032import java.util.Map;
+033import java.util.TreeMap;
+034
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.hadoop.fs.FSDataOutputStream;
+037import org.apache.hadoop.fs.FileStatus;
+038import org.apache.hadoop.fs.FileSystem;
+039import org.apache.hadoop.fs.Path;
+040import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+041import 
org.apache.hadoop.hbase.HColumnDescriptor;
+042import 
org.apache.hadoop.hbase.HConstants;
+043import 
org.apache.hadoop.hbase.HTableDescriptor;
+044import 
org.apache.hadoop.hbase.NamespaceDescriptor;
+045import 
org.apache.hadoop.hbase.TableName;
+046import 
org.apache.hadoop.hbase.TableNotFoundException;
+047import 
org.apache.hadoop.hbase.client.Table;
+048import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
+049import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+050import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+051import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
+052import 
org.apache.hadoop.hbase.io.hfile.HFile;
+053import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
+054import 
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem;
+055import 
org.apache.hadoop.hbase.regionserver.BloomType;
+056import 
org.apache.hadoop.hbase.testclassification.LargeTests;
+057import 
org.apache.hadoop.hbase.testclassification.MapReduceTests;
+058import 
org.apache.hadoop.hbase.util.Bytes;
+059import 
org.apache.hadoop.hbase.util.FSUtils;
+060import 
org.apache.hadoop.hbase.util.HFileTestUtil;
+061import org.junit.AfterClass;
+062import org.junit.BeforeClass;
+063import org.junit.Rule;
+064import org.junit.Test;
+06