[05/35] hbase-site git commit: Published site at 8cc56bd18c40ba9a7131336e97c74f8d97d8b2be.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/downloads.html -- diff --git a/downloads.html b/downloads.html index 1ed32d0..5581885 100644 --- a/downloads.html +++ b/downloads.html @@ -7,7 +7,7 @@ - + Apache HBase Apache HBase Downloads @@ -433,7 +433,7 @@ under the License. --> https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-10-16 + Last Published: 2018-10-17 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/export_control.html -- diff --git a/export_control.html b/export_control.html index cf2a290..3351356 100644 --- a/export_control.html +++ b/export_control.html @@ -7,7 +7,7 @@ - + Apache HBase Export Control @@ -341,7 +341,7 @@ for more details. https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-10-16 + Last Published: 2018-10-17 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/index.html -- diff --git a/index.html b/index.html index 2a9029d..3dc0a3f 100644 --- a/index.html +++ b/index.html @@ -7,7 +7,7 @@ - + Apache HBase Apache HBase⢠Home @@ -421,7 +421,7 @@ Apache HBase is an open-source, distributed, versioned, non-relational database https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-10-16 + Last Published: 2018-10-17 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/integration.html -- diff --git a/integration.html b/integration.html index 9b8a5ca..3d5daff 100644 --- a/integration.html +++ b/integration.html @@ -7,7 +7,7 @@ - + Apache HBase CI Management @@ -301,7 +301,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-10-16 + Last Published: 2018-10-17 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/issue-tracking.html -- diff --git a/issue-tracking.html b/issue-tracking.html index 4e6f110..7625259 100644 --- a/issue-tracking.html +++ b/issue-tracking.html @@ -7,7 +7,7 @@ - + Apache HBase Issue Management @@ -298,7 +298,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-10-16 + Last Published: 2018-10-17 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/license.html -- diff --git a/license.html b/license.html index 83944af..39c4e18 100644 --- a/license.html +++ b/license.html @@ -7,7 +7,7 @@ - + Apache HBase Project Licenses @@ -501,7 +501,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-10-16 + Last Published: 2018-10-17 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/mail-lists.html -- diff --git a/mail-lists.html b/mail-lists.html index 2bc5854..fc5db58 100644 --- a/mail-lists.html +++ b/mail-lists.html @@ -7,7 +7,7 @@ - + Apache HBase Project Mailing Lists @@ -351,7 +351,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-10-16 + Last Published: 2018-10-17 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/metrics.html -- diff --git a/metrics.html b/metrics.html index c1cb639..c4ac46b 100644 --- a/metrics.html +++ b/metrics.html
[05/35] hbase-site git commit: Published site at 42d5447cfbc593becfc26684e03f482eb3a0fc49.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a610f23a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.html index bee8222..7a938de 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.html @@ -72,7 +72,7 @@ 064/** 065 * Tests ReplicationSource and ReplicationEndpoint interactions 066 */ -067@Category({ReplicationTests.class, MediumTests.class}) +067@Category({ ReplicationTests.class, MediumTests.class }) 068public class TestReplicationEndpoint extends TestReplicationBase { 069 070 @ClassRule @@ -86,317 +86,317 @@ 078 @BeforeClass 079 public static void setUpBeforeClass() throws Exception { 080 TestReplicationBase.setUpBeforeClass(); -081admin.removePeer("2"); -082numRegionServers = utility1.getHBaseCluster().getRegionServerThreads().size(); -083 } -084 -085 @AfterClass -086 public static void tearDownAfterClass() throws Exception { -087 TestReplicationBase.tearDownAfterClass(); -088// check stop is called -089 Assert.assertTrue(ReplicationEndpointForTest.stoppedCount.get() 0); -090 } -091 -092 @Before -093 public void setup() throws Exception { -094 ReplicationEndpointForTest.contructedCount.set(0); -095 ReplicationEndpointForTest.startedCount.set(0); -096 ReplicationEndpointForTest.replicateCount.set(0); -097 ReplicationEndpointReturningFalse.replicated.set(false); -098 ReplicationEndpointForTest.lastEntries = null; -099final ListRegionServerThread rsThreads = -100 utility1.getMiniHBaseCluster().getRegionServerThreads(); -101for (RegionServerThread rs : rsThreads) { -102 utility1.getAdmin().rollWALWriter(rs.getRegionServer().getServerName()); -103} -104// Wait for all log roll to finish -105utility1.waitFor(3000, new Waiter.ExplainingPredicateException() { -106 @Override -107 public boolean evaluate() throws Exception { -108for (RegionServerThread rs : rsThreads) { -109 if (!rs.getRegionServer().walRollRequestFinished()) { -110return false; -111 } -112} -113return true; -114 } -115 -116 @Override -117 public String explainFailure() throws Exception { -118ListString logRollInProgressRsList = new ArrayList(); -119for (RegionServerThread rs : rsThreads) { -120 if (!rs.getRegionServer().walRollRequestFinished()) { -121 logRollInProgressRsList.add(rs.getRegionServer().toString()); -122 } -123} -124return "Still waiting for log roll on regionservers: " + logRollInProgressRsList; -125 } -126}); -127 } -128 -129 @Test -130 public void testCustomReplicationEndpoint() throws Exception { -131// test installing a custom replication endpoint other than the default one. -132 admin.addPeer("testCustomReplicationEndpoint", -133new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf1)) -134 .setReplicationEndpointImpl(ReplicationEndpointForTest.class.getName()), null); -135 -136// check whether the class has been constructed and started -137Waiter.waitFor(conf1, 6, new Waiter.PredicateException() { -138 @Override -139 public boolean evaluate() throws Exception { -140return ReplicationEndpointForTest.contructedCount.get() = numRegionServers; -141 } -142}); -143 -144Waiter.waitFor(conf1, 6, new Waiter.PredicateException() { -145 @Override -146 public boolean evaluate() throws Exception { -147return ReplicationEndpointForTest.startedCount.get() = numRegionServers; -148 } -149}); -150 -151Assert.assertEquals(0, ReplicationEndpointForTest.replicateCount.get()); -152 -153// now replicate some data. -154doPut(Bytes.toBytes("row42")); -155 -156Waiter.waitFor(conf1, 6, new Waiter.PredicateException() { -157 @Override -158 public boolean evaluate() throws Exception { -159return ReplicationEndpointForTest.replicateCount.get() = 1; -160 } -161}); -162 -163doAssert(Bytes.toBytes("row42")); -164 -165 admin.removePeer("testCustomReplicationEndpoint"); -166 } -167 -168 @Test -169 public void testReplicationEndpointReturnsFalseOnReplicate() throws Exception { -170Assert.assertEquals(0, ReplicationEndpointForTest.replicateCount.get()); -171 Assert.assertTrue(!ReplicationEndpointReturningFalse.replicated.get()); -172int peerCount = admin.getPeersCount(); -173final String id =
[05/35] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html index bf1a2cc..89317aa 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html @@ -1813,3325 +1813,3330 @@ 1805 private void loadTableStates() 1806 throws IOException { 1807tableStates = MetaTableAccessor.getTableStates(connection); -1808 } -1809 -1810 /** -1811 * Check if the specified region's table is disabled. -1812 * @param tableName table to check status of -1813 */ -1814 private boolean isTableDisabled(TableName tableName) { -1815return tableStates.containsKey(tableName) -1816 tableStates.get(tableName) -1817 .inStates(TableState.State.DISABLED, TableState.State.DISABLING); -1818 } -1819 -1820 /** -1821 * Scan HDFS for all regions, recording their information into -1822 * regionInfoMap -1823 */ -1824 public void loadHdfsRegionDirs() throws IOException, InterruptedException { -1825Path rootDir = FSUtils.getRootDir(getConf()); -1826FileSystem fs = rootDir.getFileSystem(getConf()); -1827 -1828// list all tables from HDFS -1829ListFileStatus tableDirs = Lists.newArrayList(); -1830 -1831boolean foundVersionFile = fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME)); +1808// Add hbase:meta so this tool keeps working. In hbase2, meta is always enabled though it +1809// has no entry in the table states. HBCK doesn't work right w/ hbase2 but just do this in +1810// meantime. +1811 this.tableStates.put(TableName.META_TABLE_NAME, +1812new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED)); +1813 } +1814 +1815 /** +1816 * Check if the specified region's table is disabled. +1817 * @param tableName table to check status of +1818 */ +1819 private boolean isTableDisabled(TableName tableName) { +1820return tableStates.containsKey(tableName) +1821 tableStates.get(tableName) +1822 .inStates(TableState.State.DISABLED, TableState.State.DISABLING); +1823 } +1824 +1825 /** +1826 * Scan HDFS for all regions, recording their information into +1827 * regionInfoMap +1828 */ +1829 public void loadHdfsRegionDirs() throws IOException, InterruptedException { +1830Path rootDir = FSUtils.getRootDir(getConf()); +1831FileSystem fs = rootDir.getFileSystem(getConf()); 1832 -1833ListPath paths = FSUtils.getTableDirs(fs, rootDir); -1834for (Path path : paths) { -1835 TableName tableName = FSUtils.getTableName(path); -1836 if ((!checkMetaOnly -1837 isTableIncluded(tableName)) || -1838 tableName.equals(TableName.META_TABLE_NAME)) { -1839 tableDirs.add(fs.getFileStatus(path)); -1840 } -1841} -1842 -1843// verify that version file exists -1844if (!foundVersionFile) { -1845 errors.reportError(ERROR_CODE.NO_VERSION_FILE, -1846 "Version file does not exist in root dir " + rootDir); -1847 if (shouldFixVersionFile()) { -1848LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME -1849+ " file."); -1850setShouldRerun(); -1851FSUtils.setVersion(fs, rootDir, getConf().getInt( -1852 HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt( -1853 HConstants.VERSION_FILE_WRITE_ATTEMPTS, -1854 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS)); -1855 } -1856} -1857 -1858// Avoid multithreading at table-level because already multithreaded internally at -1859// region-level. Additionally multithreading at table-level can lead to deadlock -1860// if there are many tables in the cluster. Since there are a limited # of threads -1861// in the executor's thread pool and if we multithread at the table-level by putting -1862// WorkItemHdfsDir callables into the executor, then we will have some threads in the -1863// executor tied up solely in waiting for the tables' region-level calls to complete. -1864// If there are enough tables then there will be no actual threads in the pool left -1865// for the region-level callables to be serviced. -1866for (FileStatus tableDir : tableDirs) { -1867 LOG.debug("Loading region dirs from " +tableDir.getPath()); -1868 WorkItemHdfsDir item = new WorkItemHdfsDir(fs, errors, tableDir); -1869 try { -1870item.call(); -1871 } catch (ExecutionException e) { -1872LOG.warn("Could not completely load table dir " + -1873tableDir.getPath(), e.getCause()); -1874 } -1875} -1876errors.print(""); -1877 } -1878 -1879 /** -1880 * Record the
[05/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/xref-test/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html -- diff --git a/xref-test/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html b/xref-test/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html index 484d2ee..b5415ac 100644 --- a/xref-test/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html +++ b/xref-test/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html @@ -553,121 +553,123 @@ 543 ListString queues = rq.getUnClaimedQueueIds(deadRsZnode); 544 for(String queue:queues){ 545 PairString, SortedSetString pair = rq.claimQueue(deadRsZnode, queue); -546 logZnodesMap.put(pair.getFirst(), pair.getSecond()); -547 } -548 server.abort("Done with testing", null); -549 } catch (Exception e) { -550 LOG.error("Got exception while running NodeFailoverWorker", e); -551 } finally { -552 latch.countDown(); -553 } -554 } -555 -556 /** -557 * @return 1 when the map is not empty. -558 */ -559 private int isLogZnodesMapPopulated() { -560 CollectionSetString sets = logZnodesMap.values(); -561 if (sets.size() 1) { -562 throw new RuntimeException("unexpected size of logZnodesMap: " + sets.size()); -563 } -564 if (sets.size() == 1) { -565 SetString s = sets.iterator().next(); -566 for (String file : files) { -567 // at least one file was missing -568 if (!s.contains(file)) { -569 return 0; -570 } -571 } -572 return 1; // we found all the files -573 } -574 return 0; -575 } -576 } -577 -578 static class FailInitializeDummyReplicationSource extends ReplicationSourceDummy { +546 if (pair != null) { +547 logZnodesMap.put(pair.getFirst(), pair.getSecond()); +548 } +549 } +550 server.abort("Done with testing", null); +551 } catch (Exception e) { +552 LOG.error("Got exception while running NodeFailoverWorker", e); +553 } finally { +554 latch.countDown(); +555 } +556 } +557 +558 /** +559 * @return 1 when the map is not empty. +560 */ +561 private int isLogZnodesMapPopulated() { +562 CollectionSetString sets = logZnodesMap.values(); +563 if (sets.size() 1) { +564 throw new RuntimeException("unexpected size of logZnodesMap: " + sets.size()); +565 } +566 if (sets.size() == 1) { +567 SetString s = sets.iterator().next(); +568 for (String file : files) { +569 // at least one file was missing +570 if (!s.contains(file)) { +571 return 0; +572 } +573 } +574 return 1; // we found all the files +575 } +576 return 0; +577 } +578 } 579 -580 @Override -581 public void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, -582 ReplicationQueues rq, ReplicationPeers rp, Stoppable stopper, String peerClusterId, -583 UUID clusterId, ReplicationEndpoint replicationEndpoint, MetricsSource metrics) -584 throws IOException { -585 throw new IOException("Failing deliberately"); -586 } -587 } -588 -589 static class DummyServer implements Server { -590 String hostname; -591 -592 DummyServer() { -593 hostname = "hostname.example.org"; -594 } -595 -596 DummyServer(String hostname) { -597 this.hostname = hostname; -598 } -599 -600 @Override -601 public Configuration getConfiguration() { -602 return conf; -603 } -604 -605 @Override -606 public ZooKeeperWatcher getZooKeeper() { -607 return zkw; -608 } -609 -610 @Override -611 public CoordinatedStateManager getCoordinatedStateManager() { -612 return null; -613 } -614 @Override -615 public ClusterConnection getConnection() { -616 return null; -617 } -618 -619 @Override -620 public MetaTableLocator getMetaTableLocator() { -621 return null; -622 } -623 -624 @Override -625 public ServerName getServerName() { -626 return ServerName.valueOf(hostname, 1234, 1L); -627 } -628 -629 @Override -630 public void abort(String why, Throwable e) { -631 // To change body of implemented methods use File | Settings | File Templates. -632 } -633 -634 @Override -635 public boolean isAborted() { -636 return false; -637 } -638 -639 @Override -640 public void stop(String why) { -641 // To change body of implemented methods use File | Settings | File Templates. -642 } -643 -644 @Override